repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
CarlosEOV/Filter
https://github.com/CarlosEOV/Filter
e1ca04967b855f6313c3db42cccd3bc96a29e8f9
52d8601fa46f33e800b760950ff2984a1bdb44bd
6139bd1045348e528be5eccd398fea959fb0667f
refs/heads/master
2023-07-04T13:04:36.964775
2021-08-12T01:39:10
2021-08-12T01:39:10
346,913,767
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4156103730201721, "alphanum_fraction": 0.4654863476753235, "avg_line_length": 34.06401062011719, "blob_id": "078c98a2199bbb0d7dca1fdd7265393282d6501f", "content_id": "8853ee493b62627af633a3b911b792e57754ce4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29032, "license_type": "no_license", "max_line_length": 201, "num_lines": 828, "path": "/src/Filters.py", "repo_name": "CarlosEOV/Filter", "src_encoding": "UTF-8", "text": "from PIL.ImageGrab import grab\nfrom PySimpleGUI.PySimpleGUI import Sizer\nfrom Decoder_IMG import update_img\nfrom PIL import Image, ImageFont, ImageDraw\nimport math\nimport random\n\ndef average_grayscale(image):\n pixels = image.load()\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n gray = int((pixel[0] + pixel[1] + pixel[2] ) / 3)\n pixels[i, j] = (gray, gray, gray, 255)\n return update_img(image)\n\ndef grayscale(image):\n pixels = image.load()\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n gray = int((pixel[0] * 0.3) + (pixel[1] * 0.59) + (pixel[2] * 0.11))\n pixels[i, j] = (gray, gray, gray, 255)\n return update_img(image)\n\ndef luma_grayscale(image):\n pixels = image.load()\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n gray = int((pixel[0] * 0.2126) + (pixel[1] * 0.7152) + (pixel[2] * 0.0722))\n pixels[i, j] = (gray, gray, gray, 255)\n return update_img(image)\n\ndef max_min_grayscale(image):\n pixels = image.load()\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n gray = int((max(max(pixel[0], pixel[1]), pixel[2]) \n + min(min(pixel[0], pixel[1]), pixel[2])) / 2)\n pixels[i, j] = (gray, gray, gray, 255)\n return update_img(image) \n\ndef max_grayscale(image):\n pixels = image.load()\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n gray = max(max(pixel[0], pixel[1]), pixel[2]) \n pixels[i, j] = (gray, gray, gray, 255)\n return update_img(image)\n\ndef min_grayscale(image):\n pixels = image.load()\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n gray = min(min(pixel[0], pixel[1]), pixel[2]) \n pixels[i, j] = (gray, gray, gray, 255)\n return update_img(image)\n\ndef red_grayscale(image):\n pixels = image.load()\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n gray = pixel[0]\n pixels[i, j] = (gray, gray, gray, 255)\n return update_img(image)\n\ndef green_grayscale(image):\n pixels = image.load()\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n gray = pixel[1]\n pixels[i, j] = (gray, gray, gray, 255)\n return update_img(image)\n\ndef blue_grayscale(image):\n pixels = image.load()\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n gray = pixel[2]\n pixels[i, j] = (gray, gray, gray, 255)\n return update_img(image)\n\ndef shades_of_grayscale(image, number_of_shades = 5):\n pixels = image.load()\n ConversionFactor = 255 / (number_of_shades - 1)\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n AverageValue = (pixel[0] + pixel[1] + pixel[2] ) / 3\n gray = int((AverageValue / ConversionFactor) + 0.5) * int(ConversionFactor)\n pixels[i, j] = (gray, gray, gray, 255)\n return update_img(image)\n\ndef brightness(image, brightness=0):\n pixels = image.load()\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n pixels[i, j] = (clamp(pixel[0] + brightness, 0 , 255), clamp(pixel[1] + brightness, 0 , 255), clamp(pixel[2] + brightness, 0 , 255), 255)\n return update_img(image)\n\ndef blend(image, image2, alpha):\n pixels = image.load()\n pixels_2 = image2.load()\n\n w1 = image.size[0]\n w2 = image2.size[0]\n h1 = image.size[1]\n h2 = image2.size[1]\n\n w = w1 if w1 < w2 else w2\n h = h1 if h1 < h2 else h2\n \n for i in range(w):\n for j in range(h):\n pixel = pixels[i, j]\n pixel2 = pixels_2[i, j]\n if pixel2[3] != 0:\n pixels[i, j] = (int((pixel[0] * alpha) + (pixel2[0] * (1.0 - alpha))), int((pixel[1] * alpha) + (pixel2[1] * (1.0 - alpha))), int((pixel[2] * alpha) + (pixel2[2] * (1.0 - alpha))), 255)\n return update_img(image)\n\ndef watermark(image, watermark, position, alpha):\n og_size = (image.size[0], image.size[1])\n watermark_img = Image.new('RGBA', og_size, (255,255,255,0))\n d = ImageDraw.Draw(watermark_img)\n fnt_size = int(og_size[1] * 0.1)\n fnt = ImageFont.truetype(\"/System/Library/Fonts/arial.ttf\", fnt_size)\n if len(watermark) > 60:\n watermark = watermark[:60]\n\n h = int(og_size[0] * 0.33)\n if position[0] == 0:\n h = int(og_size[0] * 0.05)\n elif position[0] == 2:\n h = int(og_size[0] * 0.66)\n \n v = int(og_size[1] * 0.33)\n if position[1] == 0:\n v = int(og_size[1] * 0.05)\n elif position[1] == 2:\n v = int(og_size[1] * 0.66) \n \n d.text((h,v), watermark, font=fnt, fill=(0, 0, 0, 255))\n\n return blend(image, watermark_img, alpha)\n\ndef mosaic_img_bw(image, img_grid, w, h):\n num_shades = 32\n shades = create_shades(img_grid, (w, h), num_shades)\n pixels = image.load()\n\n for j in range(0, image.size[1], h):\n for i in range(0, image.size[0], w):\n average_grid(pixels, i, j, w, h, (image.size[0], image.size[1]), False, True, False, shades)\n \n return update_img(image)\n\ndef semitone(image, set_idx, w, h):\n shades = load_shades(set_idx, (w, h))\n pixels = image.load()\n\n for j in range(0, image.size[1], h):\n for i in range(0, image.size[0], w):\n average_semitone(pixels, i, j, w, h, (image.size[0], image.size[1]), shades)\n\n return update_img(image)\n \ndef load_shades(set_idx, size):\n shades = []\n set_img = \"img/\"\n length = 10\n img_name = \"\"\n if(set_idx == 0):\n set_img += \"a/\"\n img_name += \"a\"\n elif(set_idx == 1):\n set_img += \"b/\"\n img_name += \"b\"\n else:\n set_img += \"c/\" \n img_name += \"c\"\n length = 5\n \n for image in range(0, length):\n shade = Image.open(set_img + img_name + str(image) + \".png\")\n shades.append(shade.resize(size))\n\n return shades\n\ndef average_semitone(pixels, origin_x, origin_y, x, y, img_size, shades):\n w = origin_x + x\n h = origin_y + y\n if w > img_size[0]: w = img_size[0]\n if h > img_size[1]: h = img_size[1]\n average = [0, 0, 0]\n total = 0\n \n for i in range(origin_x, w):\n for j in range(origin_y, h):\n total += 1\n pixel = pixels[i, j]\n average[0] += pixel[0] \n average[1] += pixel[1] \n average[2] += pixel[2]\n \n if total != 0:\n for idx in range(3):\n average[idx] = int(average[idx] / total)\n \n gray = int((average[0] + average[1] + average[2]) / 3)\n step = int(256 / len(shades))\n shade_idx = 0 \n for idx in range(0, 256, step):\n if idx <= gray <= (idx + step - 1):\n if shade_idx >= len(shades):\n shade_idx = len(shades) - 1\n shade = shades[shade_idx].copy()\n break\n shade_idx += 1\n\n pixels_sh = shade.load()\n ri_x = -1 \n for i in range(origin_x, w):\n ri_x += 1\n ri_y = -1\n for j in range(origin_y, h):\n ri_y += 1\n if ri_y < shade.size[1] and ri_x < shade.size[0]:\n pixel_sh: tuple = pixels_sh[ri_x, ri_y]\n pixels[i, j] = (pixel_sh[0], pixel_sh[1], pixel_sh[2], 255)\n\ndef mosaic_true_colors(image, img_grid, w, h):\n pixels = image.load()\n img_grid = img_grid.resize(size=(w, h))\n for j in range(0, image.size[1], h):\n for i in range(0, image.size[0], w):\n average_grid(pixels, i, j, w, h, (image.size[0], image.size[1]), is_for_txt=False, rep_img_bnw=False, rep_img=True, shades_or_img=img_grid)\n \n return update_img(image)\n\ndef create_shades(image, size, num_shades):\n shades = []\n image = image.resize(size=size)\n grayscale(image)\n \n jump = int(256 / num_shades)\n for bright in range(-127, 127, jump):\n new_img = image.copy()\n brightness(new_img, bright)\n shades.append(new_img)\n\n return shades\n\ndef mosaic(image, w, h, method_id=0, sign='CARLOS'):\n pixels = image.load()\n size = (image.size[0], image.size[1])\n sign_len = len(sign)\n if sign_len > 20:\n sign = sign[:20]\n sign_len = 20\n sign_idx = 0\n d = ImageDraw.Draw(image)\n fnt = ImageFont.truetype(\"/System/Library/Fonts/arial.ttf\", h)\n lasvb = ImageFont.truetype(\"fonts/Lasvbld.ttf\", h) \n lasvw = ImageFont.truetype(\"fonts/Lasvwd.ttf\", h)\n plcrds = ImageFont.truetype(\"fonts/PLAYCRDS.TTF\", h+5)\n\n for j in range(0, size[1], h):\n for i in range(0, size[0], w): \n if method_id == 0: \n average_grid(pixels, i, j, w, h, size)\n\n if method_id == 1 or method_id == 9:\n average = average_grid(pixels, i, j, w, h, size, is_for_txt=True)\n if method_id == 9:\n d.text((i,j), sign[sign_idx], font=fnt, fill=(average[0], average[1], average[2], 255))\n sign_idx = ( sign_idx + 1 ) % sign_len\n else:\n d.text((i,j), \"M\", font=fnt, fill=(average[0], average[1], average[2], 255))\n\n\n if method_id == 2:\n average = average_grid(pixels, i, j, w, h, size, is_for_txt=True)\n gray = int((average[0] + average[1] + average[2]) / 3)\n d.text((i,j), \"M\", font=fnt, fill=(gray, gray, gray, 255))\n \n if method_id == 3 or method_id == 4 or method_id == 5:\n fill_color = (0,0,0,255)\n average = average_grid(pixels, i, j, w, h, size, is_for_txt=True)\n gray = int((average[0] + average[1] + average[2]) / 3)\n if method_id == 4:\n fill_color = (average[0], average[1], average[2], 255)\n if method_id == 5:\n fill_color = (gray, gray, gray, 255)\n if 0 <= gray <= 15:\n d.text((i,j), \"M\", font=fnt, fill=fill_color)\n if 16 <= gray <= 31:\n d.text((i,j), \"N\", font=fnt, fill=fill_color)\n if 32 <= gray <= 47:\n d.text((i,j), \"H\", font=fnt, fill=fill_color)\n if 48 <= gray <= 63:\n d.text((i,j), \"#\", font=fnt, fill=fill_color)\n if 64 <= gray <= 79:\n d.text((i,j), \"Q\", font=fnt, fill=fill_color)\n if 80 <= gray <= 95:\n d.text((i,j), \"U\", font=fnt, fill=fill_color)\n if 96 <= gray <= 111:\n d.text((i,j), \"A\", font=fnt, fill=fill_color)\n if 112 <= gray <= 127:\n d.text((i,j), \"D\", font=fnt, fill=fill_color)\n if 128 <= gray <= 143:\n d.text((i,j), \"O\", font=fnt, fill=fill_color)\n if 144 <= gray <= 159:\n d.text((i,j), \"Y\", font=fnt, fill=fill_color)\n if 160 <= gray <= 175:\n d.text((i,j), \"2\", font=fnt, fill=fill_color)\n if 176 <= gray <= 191:\n d.text((i,j), \"$\", font=fnt, fill=fill_color)\n if 192 <= gray <= 209:\n d.text((i,j), \"%\", font=fnt, fill=fill_color)\n if 210 <= gray <= 225:\n d.text((i,j), \"+\", font=fnt, fill=fill_color)\n if 226 <= gray <= 239:\n d.text((i,j), \".\", font=fnt, fill=fill_color)\n if 240 <= gray <= 255:\n d.text((i,j), \" \", font=fnt, fill=fill_color)\n \n if method_id == 6:\n average = average_grid(pixels, i, j, w, h, size, is_for_txt=True)\n gray = int((average[0] + average[1] + average[2]) / 3)\n \n if 0 <= gray <= 25:\n d.text((i,j), \"0\", font=lasvb, fill=(0,0,0,255))\n if 26 <= gray <= 50:\n d.text((i,j), \"1\", font=lasvb, fill=(0,0,0,255))\n if 51 <= gray <= 75:\n d.text((i,j), \"2\", font=lasvb, fill=(0,0,0,255))\n if 76 <= gray <= 100:\n d.text((i,j), \"3\", font=lasvb, fill=(0,0,0,255))\n if 101 <= gray <= 125:\n d.text((i,j), \"4\", font=lasvb, fill=(0,0,0,255))\n if 126 <= gray <= 150:\n d.text((i,j), \"5\", font=lasvb, fill=(0,0,0,255))\n if 151 <= gray <= 175:\n d.text((i,j), \"6\", font=lasvb, fill=(0,0,0,255))\n if 176 <= gray <= 200:\n d.text((i,j), \"7\", font=lasvb, fill=(0,0,0,255))\n if 201 <= gray <= 225:\n d.text((i,j), \"8\", font=lasvb, fill=(0,0,0,255))\n if 226 <= gray <= 255:\n d.text((i,j), \"9\", font=lasvb, fill=(0,0,0,255))\n \n if method_id == 7:\n average = average_grid(pixels, i, j, w, h, size, is_for_txt=True)\n gray = int((average[0] + average[1] + average[2]) / 3)\n \n if 0 <= gray <= 25:\n d.text((i,j), \"9\", font=lasvw, fill=(0,0,0,255))\n if 26 <= gray <= 50:\n d.text((i,j), \"8\", font=lasvw, fill=(0,0,0,255))\n if 51 <= gray <= 75:\n d.text((i,j), \"7\", font=lasvw, fill=(0,0,0,255))\n if 76 <= gray <= 100:\n d.text((i,j), \"6\", font=lasvw, fill=(0,0,0,255))\n if 101 <= gray <= 125:\n d.text((i,j), \"5\", font=lasvw, fill=(0,0,0,255))\n if 126 <= gray <= 150:\n d.text((i,j), \"4\", font=lasvw, fill=(0,0,0,255))\n if 151 <= gray <= 175:\n d.text((i,j), \"3\", font=lasvw, fill=(0,0,0,255))\n if 176 <= gray <= 200:\n d.text((i,j), \"2\", font=lasvw, fill=(0,0,0,255))\n if 201 <= gray <= 225:\n d.text((i,j), \"1\", font=lasvw, fill=(0,0,0,255))\n if 226 <= gray <= 255:\n d.text((i,j), \"0\", font=lasvw, fill=(0,0,0,255))\n \n if method_id == 8:\n average = average_grid(pixels, i, j, w, h, size, is_for_txt=True)\n gray = int((average[0] + average[1] + average[2]) / 3)\n \n if 0 <= gray <= 23:\n d.text((i,j), \"k\", font=plcrds, fill=(0,0,0,255))\n if 24 <= gray <= 46:\n d.text((i,j), \"j\", font=plcrds, fill=(0,0,0,255))\n if 47 <= gray <= 69:\n d.text((i,j), \"i\", font=plcrds, fill=(0,0,0,255))\n if 70 <= gray <= 92:\n d.text((i,j), \"h\", font=plcrds, fill=(0,0,0,255))\n if 93 <= gray <= 115:\n d.text((i,j), \"g\", font=plcrds, fill=(0,0,0,255))\n if 116 <= gray <= 138:\n d.text((i,j), \"f\", font=plcrds, fill=(0,0,0,255))\n if 139 <= gray <= 161:\n d.text((i,j), \"e\", font=plcrds, fill=(0,0,0,255))\n if 162 <= gray <= 184:\n d.text((i,j), \"d\", font=plcrds, fill=(0,0,0,255))\n if 185 <= gray <= 207:\n d.text((i,j), \"c\", font=plcrds, fill=(0,0,0,255))\n if 208 <= gray <= 230:\n d.text((i,j), \"b\", font=plcrds, fill=(0,0,0,255))\n if 231 <= gray <= 255:\n d.text((i,j), \"a\", font=plcrds, fill=(0,0,0,255))\n\n return update_img(image)\n\ndef average_grid(pixels, origin_x, origin_y, x, y, img_size, is_for_txt=False, rep_img_bnw=False, rep_img=False, shades_or_img=None):\n w = origin_x + x\n h = origin_y + y\n if w > img_size[0]: w = img_size[0]\n if h > img_size[1]: h = img_size[1]\n average = [0, 0, 0]\n total = 0\n \n for i in range(origin_x, w):\n for j in range(origin_y, h):\n total += 1\n pixel = pixels[i, j]\n average[0] += pixel[0] \n average[1] += pixel[1] \n average[2] += pixel[2]\n \n if total != 0:\n for idx in range(3):\n average[idx] = int(average[idx] / total)\n \n if rep_img_bnw:\n \n gray = int((average[0] + average[1] + average[2]) / 3)\n shade_idx = 0\n shade = None\n\n for idx in range(0, 256, 8):\n if idx <= gray <= (idx + 7):\n shade = shades_or_img[shade_idx].copy()\n break\n shade_idx += 1\n \n pixels_sh = shade.load()\n \n ri_x = -1 \n for i in range(origin_x, w):\n ri_x += 1\n ri_y = -1\n for j in range(origin_y, h):\n ri_y += 1\n if ri_y <= shade.size[1] - 1 and ri_x <= shade.size[0] - 1:\n pixel_sh = pixels_sh[ri_x, ri_y]\n pixels[i, j] = (pixel_sh[0], pixel_sh[1], pixel_sh[2], 255)\n \n elif rep_img:\n img_grid = shades_or_img.copy()\n \n RGB_components(img_grid, average[0], average[1], average[2])\n pixels_grid = img_grid.load()\n \n ri_x = -1\n for i in range(origin_x, w):\n ri_x += 1\n ri_y = -1\n for j in range(origin_y, h):\n ri_y += 1\n if ri_y <= img_grid.size[1] - 1 and ri_x <= img_grid.size[0] - 1:\n pixel_grid = pixels_grid[ri_x, ri_y]\n pixels[i, j] = (pixel_grid[0], pixel_grid[1], pixel_grid[2], 255)\n\n else:\n for i in range(origin_x, w):\n for j in range(origin_y, h):\n pixel = pixels[i, j]\n pixels[i, j] = (average[0], average[1], average[2], 255)\n if is_for_txt:\n pixels[i, j] = (255, 255, 255, 255)\n return average\n\ndef clamp(x, minimum, maximum, factor=1, bias=0):\n return max(minimum, min(factor * x + bias, maximum))\n\ndef high_contrast(image, inverted=False):\n pixels = image.load()\n white = 255\n black = 0\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n gray = int((pixel[0] * 0.3) + (pixel[1] * 0.59) + (pixel[2] * 0.11))\n if inverted:\n color = white if gray < 127 else black\n else:\n color = white if gray > 127 else black\n pixels[i, j] = (color, color, color, 255)\n return update_img(image)\n\ndef RGB_components(image, r, g, b):\n pixels = image.load()\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n pixels[i, j] = (clamp(r & pixel[0], 0, 255), clamp(g & pixel[1], 0, 255), clamp(b & pixel[0], 0, 255), 255)\n return update_img(image)\n\ndef max_min(image, size, max=False):\n grayscale(image)\n img_copy = image.copy()\n pixels = img_copy.load()\n og_pixels = image.load()\n w = image.size[0]\n h = image.size[1]\n for i in range(w):\n for j in range(h):\n imageX = 0\n imageY = 0\n max_min_list = []\n for m in range(size[1]):\n for n in range(size[0]):\n imageX = (i - size[0] / 2 + n + w) % w\n imageY = (j - size[1] / 2 + m + h) % h\n pixel = pixels[imageX , imageY]\n if pixel[0] not in max_min_list:\n max_min_list.append(pixel[0])\n \n max_min_list.sort()\n gray = max_min_list[0]\n \n if max :\n gray = max_min_list[len(max_min_list) - 1]\n \n og_pixels[i, j] = (gray, gray, gray, 255)\n \n return update_img(image)\n\ndef convolution(image, filter_matrix, filter_width, filter_height, factor, bias):\n img_copy = image.copy()\n pixels = img_copy.load()\n og_pixels = image.load()\n w = image.size[0]\n h = image.size[1]\n for i in range(w):\n for j in range(h):\n red = 0\n green = 0\n blue = 0\n imageX = 0\n imageY = 0\n for m in range(filter_height):\n for n in range(filter_width):\n imageX = (i - filter_width / 2 + n + w) % w\n imageY = (j - filter_height / 2 + m + h) % h\n pixel = pixels[imageX , imageY]\n red += pixel[0] * filter_matrix[m][n]\n green += pixel[1] * filter_matrix[m][n]\n blue += pixel[2] * filter_matrix[m][n]\n \n og_pixels[i, j] = (int(clamp(red, 0, 255, factor, bias)), \n int(clamp(green, 0, 255, factor, bias)), \n int(clamp(blue, 0, 255, factor, bias)), \n 255)\n\n return update_img(image)\n\ndef blur(image, intensity=0):\n \n blur_matrix_1 = [\n [0.0, 0.2, 0.0],\n [0.2, 0.2, 0.2],\n [0.0, 0.2, 0.0]\n ]\n\n blur_width_1 = 3\n blur_height_1 = 3\n factor_1 = 1.0\n bias_1 = 0.0\n\n blur_matrix_2 = [\n [0, 0, 1, 0, 0],\n [0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1],\n [0, 1, 1, 1, 0],\n [0, 0, 1, 0, 0],\n ]\n\n blur_width_2 = 5\n blur_height_2 = 5\n factor_2 = 1.0 / 13.0\n bias_2 = 0.0\n\n if intensity == 0:\n return convolution(image, blur_matrix_1, blur_width_1, blur_height_1, factor_1, bias_1)\n else:\n return convolution(image, blur_matrix_2, blur_width_2, blur_height_2, factor_2, bias_2)\n\ndef motion_blur(image):\n motion_blur_matrix = [\n [1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1]\n ]\n\n motion_blur_w = 9\n motion_blur_h = 9\n factor = 1.0 / 9.0\n bias = 0.0\n\n return convolution(image, motion_blur_matrix, motion_blur_w, motion_blur_h, factor, bias)\n\ndef find_edges(image):\n find_edges_matrix = [\n [-1, 0, 0, 0, 0],\n [0, -2, 0, 0, 0],\n [0, 0, 6, 0, 0],\n [0, 0, 0, -2, 0],\n [0, 0, 0, 0, -1]\n ]\n\n find_edges_w = 5\n find_edges_h = 5\n factor = 1.0\n bias = 0.0\n\n return convolution(image, find_edges_matrix, find_edges_w, find_edges_h, factor, bias)\n\ndef sharpen(image):\n sharpen_matrix = [\n [-1, -1, -1],\n [-1, 9, -1],\n [-1, -1, -1]\n ]\n\n sharpen_w = 3 \n sharpen_h = 3\n factor = 1.0\n bias = 0.0\n\n return convolution(image, sharpen_matrix, sharpen_w, sharpen_h, factor, bias)\n\ndef emboss(image):\n emboss_matrix = [\n [-1, -1, -1, -1, 0],\n [-1, -1, -1, 0, 1],\n [-1, -1, 0, 1, 1],\n [-1, 0, 1, 1, 1],\n [0, 1, 1, 1, 1]\n ]\n\n emboss_w = 5\n emboss_h = 5\n factor = 1.0\n bias = 128.0\n\n return convolution(image, emboss_matrix, emboss_w, emboss_h, factor, bias)\n\ndef random_dithering(image):\n pixels = image.load()\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n gray = int((pixel[0] * 0.3) + (pixel[1] * 0.59) + (pixel[2] * 0.11))\n rdm = random.randint(0, 255)\n value = 0 if rdm > gray else 255\n pixels[i, j] = (value, value, value, 255)\n\n return update_img(image)\n\ndef dithering(image, matrix, w, h):\n pixels = image.load()\n for j in range(0, image.size[1], h):\n for i in range(0, image.size[0], w):\n dithering_matrix(pixels, i, j, w, h, (image.size[0], image.size[1]), matrix)\n \n return update_img(image)\n\ndef dithering_matrix(pixels, origin_x, origin_y, x, y, img_size, matrix):\n w = origin_x + x\n h = origin_y + y\n if w > img_size[0]: w = img_size[0]\n if h > img_size[1]: h = img_size[1]\n total = 0\n \n for j in range(origin_y, h):\n for i in range(origin_x, w):\n \n pixel = pixels[i, j]\n gray = int((pixel[0] * 0.3) + (pixel[1] * 0.59) + (pixel[2] * 0.11))\n value = int(gray / 28)\n color = 0 if value < matrix[total] else 255\n pixels[i, j] = (color, color, color, 255)\n total += 1\n\ndef clustered_dithering(image):\n matrix = [8, 3, 4, \n 6, 1, 2, \n 7, 5, 9]\n return dithering(image, matrix, 3, 3)\n\ndef scattered_dithering(image):\n matrix = [1, 7, 4, \n 5, 8, 3, \n 6, 2, 9]\n return dithering(image, matrix, 3, 3)\n\ndef average_color(image):\n pixels = image.load()\n total = image.size[0] * image.size[1]\n color = (0,0,0)\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n pixel = pixels[i, j]\n color = (color[0] + pixel[0], color[1] + pixel[1], color[2] + pixel[2])\n average = (int(color[0] / total) , int(color[1] / total) , int(color[2] / total) )\n return average\n\ndef load_library_idx(path):\n library_idx = open(path, 'r')\n lines = library_idx.readlines()\n library = []\n for line in lines:\n data = line.split()\n if len(data) < 4:\n continue\n library.append(data)\n library_idx.close()\n return library\n\ndef fotomorsaic(image, library_idx, size, bestFit=True):\n library = load_library_idx(library_idx[1])\n w = size[0]\n h = size[1]\n pixels = image.load()\n for j in range(0, image.size[1], h):\n for i in range(0, image.size[0], w):\n average_morsaic(pixels, i, j, w, h, (image.size[0], image.size[1]), library, library_idx[0], bestFit)\n \n return update_img(image)\n\ndef average_morsaic(pixels, origin_x, origin_y, x, y, img_size, library, foldername, bestFit):\n w = origin_x + x\n h = origin_y + y\n if w > img_size[0]: w = img_size[0]\n if h > img_size[1]: h = img_size[1]\n average = [0, 0, 0]\n total = 0\n \n for i in range(origin_x, w):\n for j in range(origin_y, h):\n total += 1\n pixel = pixels[i, j]\n average[0] += pixel[0] \n average[1] += pixel[1] \n average[2] += pixel[2]\n \n if total != 0:\n for idx in range(3):\n average[idx] = int(average[idx] / total)\n\n\n \n\n if not bestFit:\n distance = []\n \n for img in library:\n average2 = img.copy()\n average2 = average2[:3]\n for chanel in range(3):\n average2[chanel] = int(average2[chanel])\n \n distance.append((riemersma_distance(average, average2), img[3]))\n \n distance.sort(key=sort_by_distance)\n\n distance = distance[:10]\n \n selected = Image.open(foldername +'/'+ random.choice(distance)[1])\n else:\n best = None\n for img in library:\n average2 = img.copy()\n average2 = average2[:3]\n for chanel in range(3):\n average2[chanel] = int(average2[chanel])\n \n distance = riemersma_distance(average, average2)\n \n if best == None or best[0] > distance:\n best = (distance, img[3])\n\n selected = Image.open(foldername +'/'+best[1])\n \n img_grid = selected.resize(size=(x,y))\n \n pixels_grid = img_grid.load()\n ri_x = -1\n for i in range(origin_x, w):\n ri_x += 1\n ri_y = -1\n for j in range(origin_y, h):\n ri_y += 1\n if ri_y <= img_grid.size[1] - 1 and ri_x <= img_grid.size[0] - 1:\n pixel_grid = pixels_grid[ri_x, ri_y]\n pixels[i, j] = (pixel_grid[0], pixel_grid[1], pixel_grid[2], 255)\n\ndef sort_by_distance(e):\n return e[0]\n\ndef riemersma_distance(average1, average2):\n r = (average1[0] + average2[0]) / 2\n deltaR = average1[0] - average2[0] \n deltaG = average1[1] - average2[1] \n deltaB = average1[2] - average2[2]\n\n chanelR = (2 + (r / 256)) * (deltaR * deltaR)\n chanelG = 4 * (deltaG * deltaG)\n chanelB = (2 + ((255 - r) / 256)) * (deltaB * deltaB)\n\n return math.sqrt(chanelR + chanelG + chanelB)" }, { "alpha_fraction": 0.6089527010917664, "alphanum_fraction": 0.6140202879905701, "avg_line_length": 29.384614944458008, "blob_id": "498b6ddc1c0735a50647bea805600333dbf3d173", "content_id": "46d220a705d5094dd30b421cd9ec1ac8b3d5ca55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1184, "license_type": "no_license", "max_line_length": 92, "num_lines": 39, "path": "/src/Decoder_IMG.py", "repo_name": "CarlosEOV/Filter", "src_encoding": "UTF-8", "text": "import PIL.Image\nimport io\nimport base64\n\nfrom PySimpleGUI.PySimpleGUI import Debug\n\ndef convert_to_bytes(file_or_bytes, resize=None):\n \n if isinstance(file_or_bytes, str) and file_or_bytes != \"\":\n \n img = PIL.Image.open(file_or_bytes)\n else:\n try:\n img = PIL.Image.open(io.BytesIO(base64.b64decode(file_or_bytes)))\n except PIL.UnidentifiedImageError:\n Debug('Error al intentar abrir la imagen')\n return (None, None)\n except Exception:\n dataBytesIO = io.BytesIO(file_or_bytes)\n img = PIL.Image.open(dataBytesIO)\n\n cur_width, cur_height = img.size\n if resize:\n new_width, new_height = resize\n scale = min(new_height/cur_height, new_width/cur_width)\n img = img.resize((int(cur_width*scale), int(cur_height*scale)), PIL.Image.ANTIALIAS)\n with io.BytesIO() as bio:\n img.save(bio, format=\"PNG\")\n img_data = bio.getvalue()\n return (img, img_data)\n\ndef update_img(img):\n with io.BytesIO() as bio:\n img.save(bio, format=\"PNG\")\n img_data = bio.getvalue()\n return img_data\n\ndef get_bytes(img):\n return update_img(img)" }, { "alpha_fraction": 0.7850467562675476, "alphanum_fraction": 0.7850467562675476, "avg_line_length": 20.399999618530273, "blob_id": "2294f93983cdc890e439a3d072d1517343e3e3c4", "content_id": "557ccb3f6ec7d2cd54c714cf58992254c16930b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 111, "license_type": "no_license", "max_line_length": 88, "num_lines": 5, "path": "/README.md", "repo_name": "CarlosEOV/Filter", "src_encoding": "UTF-8", "text": "# Filter\n\nCarlos Eduardo Orozco Viveros\n\nAplicación de filtros y edición de imágenes para la clase de proceso digital de imágenes\n" }, { "alpha_fraction": 0.4303070306777954, "alphanum_fraction": 0.44846487045288086, "avg_line_length": 49.458778381347656, "blob_id": "4b70c7864dbf70ba6186e93cb4b4cea691e6e7b2", "content_id": "92ba2f2cee625da6f691570be3abc17626a7eadf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37945, "license_type": "no_license", "max_line_length": 221, "num_lines": 752, "path": "/src/Filter.py", "repo_name": "CarlosEOV/Filter", "src_encoding": "UTF-8", "text": "# pylint: disable=no-member\nimport PySimpleGUI as sg\nimport io\nimport os\nimport glob\nfrom PIL import Image, ImageGrab\nfrom Decoder_IMG import *\nfrom Filters import *\n\nRESOLUTION = (ImageGrab.grab().size)\nCOL_SIZE = (int(RESOLUTION[0] / 2) - 25, RESOLUTION[1])\nFRM_SIZE = (COL_SIZE[0] , int((COL_SIZE[1] / 3) * 2) )\nIMG_SIZE = (COL_SIZE[0] , int((COL_SIZE[1] / 3) * 2) )\nBG_COLOR = '#2f363d'\nMAIN_COLOR = '#1a1d21'\nSEC_COLOR = '#353942'\nTXT_COLOR = '#d5d8e0'\n\nsg.theme('DarkBlue2')\n\ndef start_filter_GUI():\n OG_IMG = None\n F_IMG = None\n\n menu_toolbar = [['&File', ['&Open', '&Save', '&Exit']],\n \n ['&Filters', ['&Grayscales' , ['Average Grayscale', 'Grayscale', 'Luma Grayscale', '---', \n 'Max and Min Grayscale', 'Max Grayscale', 'Min Grayscale', '---', \n 'Red Grayscale', 'Green Grayscale', 'Blue Grayscale', '---', \n 'Shades of Gray'],\n '&Brightness', \n '&Mosaics', ['&Mosaic', '&Image BnW', '&Image true colors'], \n '&High contrast', '&Inverted', '&RGB components', \n '&Convolution', ['&Blur', '&Motion blur', '&Find edges' , '&Sharpen', '&Emboss'],\n '&Text', ['&Color Ms', '&Grayscale Ms', '---', '&Color characters', '&Black and White characters', \n '&Grayscale characters', '---', '&Sign', '---', '&Black dominoes', '&White dominoes','---', '&Cards'],\n '&Blending' , '&Watermark', \n '&Semitones', ['&Semitone a', '&Semitone b', '&Semitone c'],\n '&Max Min', ['Max', 'Min'],\n 'Dithering', ['Random', 'Clustered', 'Scattered'],\n 'Fotomorsaics', ['Fotomorsaic']\n ]],\n ['&Tools', ['Create Library',]],\n ['&Help', '&About...'], ]\n \n column1_layout = [\n [sg.Frame(title='Original image', layout=[[sg.Image(size=IMG_SIZE, pad=(0, 5), key='-OG_IMAGE-', background_color=BG_COLOR)]], background_color=BG_COLOR, size=FRM_SIZE, key='-OGF_IMAGE-', relief=sg.RELIEF_RAISED)]\n \n ]\n\n column2_layout = [\n [sg.Frame(title='Modified image', layout=[[sg.Image(size=IMG_SIZE, pad=(0, 5), key='-F_IMAGE-', background_color=BG_COLOR)]], background_color=BG_COLOR, size=FRM_SIZE, key='-FF_IMAGE-', relief=sg.RELIEF_RAISED)]\n ]\n\n main_layout = [\n [sg.Menu(menu_definition=menu_toolbar, tearoff=False, background_color=MAIN_COLOR, text_color=TXT_COLOR)],\n [sg.Column(column1_layout, size=COL_SIZE, background_color= MAIN_COLOR, ),\n sg.VerticalSeparator(), \n sg.Column(column2_layout, size=COL_SIZE, background_color=MAIN_COLOR)\n ],\n ]\n \n main_window = sg.Window('Filter app',\n layout=main_layout,\n size=RESOLUTION,\n resizable=True, \n background_color=MAIN_COLOR,\n )\n\n while True:\n event, values = main_window.read()\n if event in (sg.WIN_CLOSED, 'Exit'):\n break\n \n # ------ Process menu choices ------ #\n if event == 'Open':\n filename = sg.popup_get_file('File to open', no_window=True, keep_on_top=True, modal=True, file_types=((\"PNG, JPG\", \"*.png *.jpg\"),))\n img, img_bytes = convert_to_bytes(filename)\n if img != None and img_bytes != None:\n pb_layout = [[sg.Text('Loading...')],\n [sg.ProgressBar(max_value=10, orientation='h', size=(40, 15), key='-PGRB-')]\n ]\n\n pb_window = sg.Window('Loading image', pb_layout, finalize=True, disable_close=True, modal=True)\n progress_bar = pb_window['-PGRB-']\n progress_bar.update_bar(2)\n OG_IMG = img.copy()\n F_IMG = OG_IMG.copy()\n progress_bar.update_bar(5)\n T_IMG = F_IMG.copy()\n T_IMG.thumbnail(size=IMG_SIZE)\n img.thumbnail(size=IMG_SIZE)\n progress_bar.update_bar(8)\n main_window['-OG_IMAGE-'].update(data=get_bytes(img), size=IMG_SIZE)\n main_window['-F_IMAGE-'].update(data=get_bytes(T_IMG), size=IMG_SIZE)\n progress_bar.update_bar(9)\n pb_window.close()\n\n if F_IMG != None:\n if event == 'Save':\n filename = ask_for_filename(default_filename='My_filter_img.png')\n if filename != None:\n F_IMG.save(filename, format='PNG')\n\n if OG_IMG != None:\n pb_layout = [[sg.Text('Loading...')],\n [sg.ProgressBar(max_value=10, orientation='h', size=(45, 15), key='-PGRB-')]\n ]\n if event == 'Average Grayscale':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n\n elif event == 'Grayscale':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n\n elif event == 'Luma Grayscale':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Max and Min Grayscale':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n\n elif event == 'Max Grayscale':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Min Grayscale':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n\n elif event == 'Red Grayscale':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Green Grayscale':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n\n elif event == 'Blue Grayscale':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n\n elif event == 'Shades of Gray':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n\n elif event == 'Brightness':\n b_event, b_values = sg.Window('Brightness', [\n [sg.T('Adjust brightness')],\n [sg.Slider(range=(-180, 180), default_value=0, resolution=1, tick_interval=100, \n orientation='h', border_width=3, size=(40, 10), key='-BRGHT-', tooltip='Brightness')],\n [sg.Button('Ok')]\n ], modal=True, keep_on_top=True).read(close=True)\n brightness_value = b_values['-BRGHT-']\n if brightness_value != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, int(brightness_value)) \n\n elif event == 'Mosaic':\n b_event, b_values = sg.Window('Mosaic', [\n [sg.T('Adjust mosaic size')],\n [sg.T('Height')],\n [sg.Slider(range=(10, 100), default_value=10, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-H_VALUE-', tooltip='Height')],\n [sg.T('Width')],\n [sg.Slider(range=(10, 100), default_value=10, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-W_VALUE-', tooltip='Width')],\n [sg.Button('Ok')]\n ], modal=True, keep_on_top=True).read(close=True)\n w_value = b_values['-W_VALUE-']\n h_value = b_values['-H_VALUE-']\n\n if w_value != None and h_value != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, int(w_value), int(h_value))\n \n elif event == 'High contrast':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Inverted':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n\n elif event == 'RGB components':\n b_event, b_values = sg.Window('RGB components', [\n [sg.T('Adjust RGB components')],\n [sg.T('Red')],\n [sg.Slider(range=(0, 255), default_value=0, resolution=1, tick_interval=40, \n orientation='h', border_width=3, size=(40, 10), key='-R_VALUE-', tooltip='Red')],\n [sg.T('Green')],\n [sg.Slider(range=(0, 255), default_value=0, resolution=1, tick_interval=40, \n orientation='h', border_width=3, size=(40, 10), key='-G_VALUE-', tooltip='Green')],\n [sg.T('Blue')],\n [sg.Slider(range=(0, 255), default_value=0, resolution=1, tick_interval=40, \n orientation='h', border_width=3, size=(40, 10), key='-B_VALUE-', tooltip='Blue')],\n [sg.Button('Ok')]\n ], modal=True, keep_on_top=True).read(close=True)\n \n red = b_values['-R_VALUE-']\n green = b_values['-G_VALUE-']\n blue = b_values['-B_VALUE-']\n \n if red != None and green != None and blue != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, int(red), int(green), int(blue))\n \n elif event == 'Blur':\n b_event, b_values = sg.Window('Blur', [\n [sg.T('Select intensity matrix for blur filter')],\n [sg.Radio(text='3x3 Matrix', group_id=1, default=True, key='-3_M-'), \n sg.Radio(text='5x5 Matrix', group_id=1, default=False, key='-5_M-')\n ],\n [sg.Button('Ok')]\n ], modal=True, keep_on_top=True).read(close=True)\n\n selection = 0 if b_values['-3_M-'] else 1\n \n if selection != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, selection)\n \n elif event == 'Motion blur':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Find edges':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Sharpen':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Emboss':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n\n elif event == 'Color Ms':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Grayscale Ms':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Black and White characters':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Color characters':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Grayscale characters':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Sign':\n s_event, s_values = sg.Window('Sign', layout=[\n [sg.T('Type a message for sign filter (20 characters maximum)')],\n [sg.Input(default_text='This is my message', size=(80, 10), tooltip='Type something!', do_not_clear=False, key='-SIGN_TXT-')],\n [sg.Button('Ok')]\n ], modal=True, keep_on_top=True).read(close=True)\n\n message = s_values['-SIGN_TXT-']\n\n if message != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, message)\n \n elif event == 'Black dominoes':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'White dominoes':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Cards':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Blending':\n filename = sg.popup_get_file('Image to blend', no_window=True, keep_on_top=True, modal=True, file_types=((\"PNG, JPG\", \"*.png *.jpg\"),))\n img_to_blend, img_bytes = convert_to_bytes(filename)\n if img_to_blend != None and img_bytes != None:\n b_event, b_values = sg.Window('Blending', [\n [sg.T('Adjust blending')],\n [sg.Column([[sg.Frame(title='Image to blend', \n layout=[[sg.Image(size=(230, 230), pad=(0, 5), \n key='-OG_IM-',\n data=get_bytes(img_to_blend.copy().resize((230,230))), \n background_color=BG_COLOR)]], \n background_color=BG_COLOR, \n size=(240, 240), \n key='-OGF_IM-', \n relief=sg.RELIEF_RAISED)]], \n size=(250, 250), \n background_color= MAIN_COLOR, ),\n sg.VerticalSeparator(), \n sg.Column([[sg.Frame(title='Original image', \n layout=[[sg.Image(size=(230, 230), pad=(0, 5), \n key='-BD_IM-',\n data=get_bytes(OG_IMG.copy().resize((230,230))), \n background_color=BG_COLOR)]], \n background_color=BG_COLOR, \n size=(240, 240), \n key='-BDF_IM-', \n relief=sg.RELIEF_RAISED)]], \n size=(250, 250), \n background_color= MAIN_COLOR, ),\n ],\n [sg.Slider(range=(0, 100), default_value=50, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-BLDN-', tooltip='Blending')],\n [sg.Button('Ok')]\n ], element_justification='center', modal=True, keep_on_top=True).read(close=True)\n blending_value = b_values['-BLDN-']\n if blending_value != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, img_to_blend, blending_value * 0.01)\n \n elif event == 'Watermark':\n w_layout = [[sg.T('Set your watermark')],\n [sg.HorizontalSeparator()],\n \n [sg.T('Position')],\n [sg.T('Horizontal'),\n sg.Radio(text='Left', group_id=1, default=True, key='-L_H-'),\n sg.Radio(text='Center', group_id=1, default=False, key='-C_H-'), \n sg.Radio(text='Right', group_id=1, default=False, key='-R_H-'),\n sg.VerticalSeparator(),\n sg.T('Vertical'),\n sg.Radio(text='Top', group_id=2, default=True, key='-T_V-'), \n sg.Radio(text='Center', group_id=2, default=False, key='-C_V-'),\n sg.Radio(text='Bottom', group_id=2, default=False, key='-B_V-')],\n \n [sg.T('Text (60 characters maximum)')],\n [sg.Input(default_text='This is my watermark', size=(80, 10), tooltip='Type something!', \n do_not_clear=False, key='-WM_TXT-')],\n [sg.T('Alpha')],\n [sg.Slider(range=(0, 100), default_value=50, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-ALPHA-', tooltip='Alpha')],\n\n [sg.Button('Ok')]\n ]\n w_event, w_values = sg.Window(title='Watermark', layout=w_layout, element_justification='center', \n modal=True, keep_on_top=True).read(close=True)\n text = w_values['-WM_TXT-']\n w_h = 2\n if w_values['-L_H-']:\n w_h = 0\n elif w_values['-C_H-']:\n w_h = 1\n w_v = 2\n if w_values['-T_V-']:\n w_v = 0\n elif w_values['-C_V-']:\n w_v = 1\n position = (w_h, w_v)\n alpha = w_values['-ALPHA-']\n \n if text != None and alpha != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, text, position, alpha * 0.01)\n \n elif event =='Image BnW':\n filename = sg.popup_get_file('Image for grid', no_window=True, keep_on_top=True, modal=True, file_types=((\"PNG, JPG\", \"*.png *.jpg\"),))\n img_for_grid, img_bytes = convert_to_bytes(filename)\n if img_for_grid != None and img_bytes != None:\n b_event, b_values = sg.Window('Mosaic', [\n [sg.Column([[sg.Frame(title='Image for grid', \n layout=[[sg.Image(size=(230, 230), pad=(0, 5), \n key='-OG_IM-',\n data=get_bytes(img_for_grid.copy().resize((230,230))), \n background_color=BG_COLOR)]], \n background_color=BG_COLOR, \n size=(240, 240), \n key='-OGF_IM-', \n relief=sg.RELIEF_RAISED)]], \n size=(250, 250), \n background_color= MAIN_COLOR, ),\n sg.VerticalSeparator(), \n sg.Column([[sg.Frame(title='Base image', \n layout=[[sg.Image(size=(230, 230), pad=(0, 5), \n key='-BD_IM-',\n data=get_bytes(OG_IMG.copy().resize((230,230))), \n background_color=BG_COLOR)]], \n background_color=BG_COLOR, \n size=(240, 240), \n key='-BDF_IM-', \n relief=sg.RELIEF_RAISED)]], \n size=(250, 250), \n background_color= MAIN_COLOR, ),\n ],\n [sg.T('Adjust mosaic size')],\n [sg.T('Height')],\n [sg.Slider(range=(10, 100), default_value=10, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-H_VALUE-', tooltip='Height')],\n [sg.T('Width')],\n [sg.Slider(range=(10, 100), default_value=10, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-W_VALUE-', tooltip='Width')],\n [sg.Button('Ok')]\n ], modal=True, keep_on_top=True).read(close=True)\n w_value = b_values['-W_VALUE-']\n h_value = b_values['-H_VALUE-']\n\n if w_value != None and h_value != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, img_for_grid, int(w_value), int(h_value))\n \n elif event == 'Image true colors':\n filename = sg.popup_get_file('Image for grid', no_window=True, keep_on_top=True, modal=True, file_types=((\"PNG, JPG\", \"*.png *.jpg\"),))\n img_for_grid, img_bytes = convert_to_bytes(filename)\n if img_for_grid != None and img_bytes != None:\n b_event, b_values = sg.Window('Mosaic true colors', [\n [sg.Column([[sg.Frame(title='Image for grid', \n layout=[[sg.Image(size=(230, 230), pad=(0, 5), \n key='-OG_IM-',\n data=get_bytes(img_for_grid.copy().resize((230,230))), \n background_color=BG_COLOR)]], \n background_color=BG_COLOR, \n size=(240, 240), \n key='-OGF_IM-', \n relief=sg.RELIEF_RAISED)]], \n size=(250, 250), \n background_color= MAIN_COLOR, ),\n sg.VerticalSeparator(), \n sg.Column([[sg.Frame(title='Base image', \n layout=[[sg.Image(size=(230, 230), pad=(0, 5), \n key='-BD_IM-',\n data=get_bytes(OG_IMG.copy().resize((230,230))), \n background_color=BG_COLOR)]], \n background_color=BG_COLOR, \n size=(240, 240), \n key='-BDF_IM-', \n relief=sg.RELIEF_RAISED)]], \n size=(250, 250), \n background_color= MAIN_COLOR, ),\n ],\n [sg.T('Adjust mosaic size')],\n [sg.T('Height')],\n [sg.Slider(range=(10, 100), default_value=10, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-H_VALUE-', tooltip='Height')],\n [sg.T('Width')],\n [sg.Slider(range=(10, 100), default_value=10, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-W_VALUE-', tooltip='Width')],\n [sg.Button('Ok')]\n ], modal=True, keep_on_top=True).read(close=True)\n w_value = b_values['-W_VALUE-']\n h_value = b_values['-H_VALUE-']\n\n if w_value != None and h_value != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, img_for_grid, int(w_value), int(h_value))\n\n elif event == 'Semitone a':\n b_event, b_values = sg.Window('Semitone a', [\n [sg.T('Adjust mosaic size')],\n [sg.T('Height')],\n [sg.Slider(range=(10, 100), default_value=10, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-H_VALUE-', tooltip='Height')],\n [sg.T('Width')],\n [sg.Slider(range=(10, 100), default_value=10, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-W_VALUE-', tooltip='Width')],\n [sg.Button('Ok')]\n ], modal=True, keep_on_top=True).read(close=True)\n\n w_value = b_values['-W_VALUE-']\n h_value = b_values['-H_VALUE-']\n\n if w_value != None and h_value != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, 0, int(w_value), int(h_value))\n \n elif event == 'Semitone b':\n b_event, b_values = sg.Window('Semitone b', [\n [sg.T('Adjust mosaic size')],\n [sg.T('Height')],\n [sg.Slider(range=(10, 100), default_value=10, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-H_VALUE-', tooltip='Height')],\n [sg.T('Width')],\n [sg.Slider(range=(10, 100), default_value=10, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-W_VALUE-', tooltip='Width')],\n [sg.Button('Ok')]\n ], modal=True, keep_on_top=True).read(close=True)\n\n w_value = b_values['-W_VALUE-']\n h_value = b_values['-H_VALUE-']\n\n if w_value != None and h_value != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, 1, int(w_value), int(h_value))\n \n elif event == 'Semitone c':\n b_event, b_values = sg.Window('Semitone c', [\n [sg.T('Adjust mosaic size')],\n [sg.T('Height')],\n [sg.Slider(range=(10, 100), default_value=10, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-H_VALUE-', tooltip='Height')],\n [sg.T('Width')],\n [sg.Slider(range=(10, 100), default_value=10, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-W_VALUE-', tooltip='Width')],\n [sg.Button('Ok')]\n ], modal=True, keep_on_top=True).read(close=True)\n\n w_value = b_values['-W_VALUE-']\n h_value = b_values['-H_VALUE-']\n\n if w_value != None and h_value != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, 2, int(w_value), int(h_value))\n \n elif event == 'Max':\n b_event, b_values = sg.Window('Max', [\n [sg.T('Select intensity matrix for Max filter')],\n [sg.Radio(text='3x3 Matrix', group_id=1, default=True, key='-3_M-'), \n sg.Radio(text='5x5 Matrix', group_id=1, default=False, key='-5_M-')\n ],\n [sg.Button('Ok')]\n ], modal=True, keep_on_top=True).read(close=True)\n\n selection = 3 if b_values['-3_M-'] else 5\n \n if selection != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, (selection,selection), True)\n \n elif event == 'Min':\n b_event, b_values = sg.Window('Min', [\n [sg.T('Select intensity matrix for Min filter')],\n [sg.Radio(text='3x3 Matrix', group_id=1, default=True, key='-3_M-'), \n sg.Radio(text='5x5 Matrix', group_id=1, default=False, key='-5_M-')\n ],\n [sg.Button('Ok')]\n ], modal=True, keep_on_top=True).read(close=True)\n\n selection = 3 if b_values['-3_M-'] else 5\n \n if selection != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, (selection,selection), False)\n \n elif event == 'Random':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n \n elif event == 'Clustered':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n elif event == 'Scattered':\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window)\n elif event == 'Fotomorsaic':\n filename = sg.popup_get_file('Library idx', no_window=True, keep_on_top=True, modal=True, file_types=((\"IDX\", \"*.idx\"),))\n foldername = sg.popup_get_folder('Images library', no_window=True, keep_on_top=True, modal=True)\n b_event, b_values = sg.Window('Mosaic', [\n [sg.T('Adjust mosaic size')],\n [sg.T('Height')],\n [sg.Slider(range=(10, 100), default_value=10, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-H_VALUE-', tooltip='Height')],\n [sg.T('Width')],\n [sg.Slider(range=(10, 100), default_value=10, resolution=1, tick_interval=20, \n orientation='h', border_width=3, size=(40, 10), key='-W_VALUE-', tooltip='Width')],\n [sg.T('Image selection')],\n [sg.Radio(text='Best fit', group_id=1, default=True, key='-BF-'), \n sg.Radio(text='Randomize', group_id=1, default=False, key='-RM-')\n ],\n [sg.Button('Ok')]\n ], modal=True, keep_on_top=True).read(close=True)\n w_value = b_values['-W_VALUE-']\n h_value = b_values['-H_VALUE-']\n selection = b_values['-BF-']\n if w_value != None and h_value != None and filename != None and foldername != None and selection != None:\n F_IMG = OG_IMG.copy()\n apply_filter(event, F_IMG, main_window, (foldername, filename), (int(w_value), int(h_value)), selection)\n\n if event == 'Create Library':\n foldername = sg.popup_get_folder('Images folder', no_window=True, keep_on_top=True, modal=True)\n if foldername != None and foldername != '': \n create_library(foldername)\n\n if event == 'About...':\n sg.popup('Filter App', 'Version 2.00', 'Carlos Eduardo Orozco Viveros', 'Release date: 08/11/21',\n grab_anywhere=True, modal=True, \n background_color=MAIN_COLOR, text_color=TXT_COLOR, no_titlebar=True)\n\n main_window.close()\n\ndef ask_for_filename(default_filename='', initial_folder=None, size=None):\n \n if initial_folder is None:\n initial_folder = os.getcwd()\n\n save_layout = [[\n sg.InputText(key='-FILETOSAVE-', default_text=default_filename, enable_events=True, justification='l'),\n sg.InputText(key='-SAVEAS-', do_not_clear=False, enable_events=True, visible=False, ),\n sg.FileSaveAs('Select', initial_folder=initial_folder, file_types=((\"PNG\", \"*.png\"),))],\n [sg.Button('OK', bind_return_key=True), sg.Button('Cancel')]\n ]\n\n save_window = sg.Window('Save image', save_layout, keep_on_top=True, modal=True)\n\n while True:\n event, values = save_window.Read()\n if event is None or event == 'Cancel':\n save_window.close()\n return None\n elif event == '-SAVEAS-':\n filename = values['-SAVEAS-']\n if filename:\n save_window['-FILETOSAVE-'].update(value=filename)\n elif event == \"OK\":\n save_window.close()\n return values['-FILETOSAVE-']\n\ndef apply_filter(filter_name, F_IMG, main_window, param_1=0, param_2=0, param_3=0):\n pb_layout = [[sg.Text('Loading...')],\n [sg.ProgressBar(max_value=10, orientation='h', size=(45, 15), key='-PGRB-')]\n ]\n pb_window = sg.Window(title='Loading filter', layout=pb_layout, finalize=True, disable_close=True, modal=True)\n progress_bar = pb_window['-PGRB-']\n progress_bar.update_bar(1)\n choose_filter(filter_name, F_IMG, param_1, param_2, param_3)\n progress_bar.update_bar(6)\n T_IMG = F_IMG.copy()\n T_IMG.thumbnail(size=IMG_SIZE)\n progress_bar.update_bar(8)\n main_window['-F_IMAGE-'].update(data=get_bytes(T_IMG), size=IMG_SIZE)\n progress_bar.update_bar(9)\n pb_window.close()\n\ndef create_library(path):\n files = os.listdir(path)\n number_files = len(files) + 10\n progress_value = 0\n \n pb_layout = [[sg.Text('Loading...', key='-ACTUAL-')],\n [sg.ProgressBar(max_value=number_files, orientation='h', size=(100, 15), key='-PGRB-')]\n ]\n pb_window = sg.Window(title='Loading images', layout=pb_layout, finalize=True, disable_close=True, modal=True)\n progress_bar = pb_window['-PGRB-']\n progress_value += 1\n progress_bar.update_bar(progress_value)\n \n file = open('library/lib.idx', 'w')\n images = glob.glob(path + \"/*.jpg\")\n progress_value += 1\n progress_bar.update_bar(progress_value)\n for image in images:\n img = Image.open(image)\n color = average_color(img)\n line = str(color[0]) +' '+ str(color[1]) +' '+ str(color[2]) +' '+ os.path.basename(image) + os.linesep\n progress_value += 1\n progress_bar.update_bar(progress_value)\n file.write(line)\n \n file.close()\n pb_window.close()\n \ndef choose_filter(filter_name, F_IMG, param_1, param_2, param_3):\n if filter_name == 'Average Grayscale':\n average_grayscale(F_IMG)\n if filter_name == 'Grayscale':\n grayscale(F_IMG)\n if filter_name == 'Luma Grayscale':\n luma_grayscale(F_IMG)\n if filter_name == 'Max and Min Grayscale':\n max_min_grayscale(F_IMG)\n if filter_name == 'Max Grayscale':\n max_grayscale(F_IMG)\n if filter_name == 'Min Grayscale':\n min_grayscale(F_IMG)\n if filter_name == 'Red Grayscale':\n red_grayscale(F_IMG)\n if filter_name == 'Green Grayscale':\n green_grayscale(F_IMG)\n if filter_name == 'Blue Grayscale':\n blue_grayscale(F_IMG)\n if filter_name == 'Shades of Gray':\n shades_of_grayscale(F_IMG, 8)\n if filter_name == 'Brightness':\n brightness(F_IMG, param_1)\n if filter_name == 'Mosaic':\n mosaic(F_IMG, param_1, param_2)\n if filter_name == 'High contrast':\n high_contrast(F_IMG)\n if filter_name == 'Inverted':\n high_contrast(F_IMG, True)\n if filter_name == 'RGB components':\n RGB_components(F_IMG, param_1, param_2, param_3)\n if filter_name == 'Blur':\n blur(F_IMG, param_1)\n if filter_name == 'Motion blur':\n motion_blur(F_IMG)\n if filter_name == 'Find edges':\n find_edges(F_IMG)\n if filter_name == 'Sharpen':\n sharpen(F_IMG)\n if filter_name == 'Emboss': \n emboss(F_IMG)\n if filter_name == 'Color Ms':\n mosaic(F_IMG, 5, 5, 1)\n if filter_name == 'Grayscale Ms':\n mosaic(F_IMG, 5, 5, 2)\n if filter_name == 'Black and White characters':\n mosaic(F_IMG, 5, 5, 3)\n if filter_name == 'Color characters':\n mosaic(F_IMG, 5, 5, 4)\n if filter_name == 'Grayscale characters':\n mosaic(F_IMG, 5, 5, 5)\n if filter_name == 'Sign':\n mosaic(F_IMG, 10, 10, 9, param_1.upper())\n if filter_name == 'Black dominoes':\n mosaic(F_IMG, 10, 10, 6)\n if filter_name == 'White dominoes':\n mosaic(F_IMG, 10, 10, 7)\n if filter_name == 'Cards':\n mosaic(F_IMG, 10, 10, 8)\n if filter_name == 'Blending':\n blend(F_IMG, param_1, param_2)\n if filter_name == 'Watermark':\n watermark(F_IMG, param_1, param_2, param_3)\n if filter_name == 'Image BnW':\n mosaic_img_bw(F_IMG, param_1, param_2, param_3)\n if filter_name == 'Image true colors':\n mosaic_true_colors(F_IMG, param_1, param_2, param_3)\n if filter_name == 'Semitone a':\n semitone(F_IMG, param_1, param_2, param_3)\n if filter_name == 'Semitone b':\n semitone(F_IMG, param_1, param_2, param_3)\n if filter_name == 'Semitone c':\n semitone(F_IMG, param_1, param_2, param_3)\n if filter_name == 'Max':\n max_min(F_IMG, param_1, param_2)\n if filter_name == 'Min':\n max_min(F_IMG, param_1, param_2)\n if filter_name == 'Random':\n random_dithering(F_IMG)\n if filter_name == 'Clustered':\n clustered_dithering(F_IMG)\n if filter_name == 'Scattered':\n scattered_dithering(F_IMG)\n if filter_name == 'Create Library':\n create_library(param_1)\n if filter_name == 'Fotomorsaic':\n fotomorsaic(F_IMG, param_1, param_2, param_3)\n\nif __name__ == '__main__':\n start_filter_GUI()\n" } ]
4
ishambhandari/Python
https://github.com/ishambhandari/Python
dbc3a2bd2962657a16ebb72482ea1c651ebcfb96
409fd8c2bf015da903b5d7eca0b65f33af4f3eab
20298acc3a88c80edc2fab493e1798b80e775422
refs/heads/master
2021-05-11T14:16:33.790373
2018-01-16T17:22:19
2018-01-16T17:22:19
117,699,264
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.584269642829895, "alphanum_fraction": 0.5955055952072144, "avg_line_length": 30.81818199157715, "blob_id": "4385afdcea29c232dfe3160b8845ce4afb2846a3", "content_id": "90f3cbf1feac3b97be6c2edaaf51aae83dbc3200", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 356, "license_type": "no_license", "max_line_length": 56, "num_lines": 11, "path": "/readfiles.py", "repo_name": "ishambhandari/Python", "src_encoding": "UTF-8", "text": "#This function reads the text files..\ndef readInventory():\n inventory = []\n file = open(\"stock.txt\", \"r\") #Reading the text file\n lines = file.readlines()\n for line in lines:\n product = line.split(',')\n product[1] = int(product[1])\n product[2] = int(product[2])\n inventory.append(product)\n return inventory\n \n\n" }, { "alpha_fraction": 0.5237458348274231, "alphanum_fraction": 0.5284280776977539, "avg_line_length": 39.054054260253906, "blob_id": "8eabffb2b55defbd51c2a013ae67aa9cc96f2aa2", "content_id": "a81c9d99db8f2e9310f36478e5a7c3954feee312", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1495, "license_type": "no_license", "max_line_length": 143, "num_lines": 37, "path": "/main.py", "repo_name": "ishambhandari/Python", "src_encoding": "UTF-8", "text": "from purchase import purchase,discountAmount,createInvoice\nfrom readfiles import readInventory\nfrom updateinventory import updateStock\nimport datetime\nprint(\"Hello!!! This is an electronic store.We sell different kinds of mobile phones,laptops and Harddisks.Please Proceed if you wish to buy.\")\n\ndef main():\n person_name = input(\"Enter your full name\")\n inventory = readInventory()\n purchases = []\n ans = True\n while ans == True:\n handling_1 = True\n while handling_1 == True:\n try: \n ans = input(\"would you like to make a purchase?(y/n)\")\n if ans==\"y\":\n purchased_item = purchase(inventory)\n if (purchased_item):\n purchases.append(purchased_item)\n ans = True\n elif ans==\"n\":\n ans=False\n handling_1 = False\n else:\n handling_1 = True\n print(\"Please enter y or n\")\n except:\n print(\"Please enter correct values.\")\n handling_1 = True\n \n \n print(\"We give 10% discount in our product.Discount amount is subtracted in your bills.Enjoy shopping...\")\n discount_check = True\n createInvoice(person_name, purchases, discount_check)\n print(\"Thank you for visiting our store..\")\nmain()\n\t\n\t\t\n\t\t\n \n" }, { "alpha_fraction": 0.5373831987380981, "alphanum_fraction": 0.5514018535614014, "avg_line_length": 34.83333206176758, "blob_id": "5138a647ec964306568e72bbf3b140cbc239f5b1", "content_id": "aa02507058b3149102c1b368d2745ddbd6710e0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 214, "license_type": "no_license", "max_line_length": 80, "num_lines": 6, "path": "/updateinventory.py", "repo_name": "ishambhandari/Python", "src_encoding": "UTF-8", "text": "def updateStock(inventory):\n file = open(\"stock.txt\", \"w\")\n for product in inventory:\n line = product[0] + \",\" + str(product[1]) + \",\" + str(product[2]) + \"\\n\"\n file.write(line)\n file.close()" }, { "alpha_fraction": 0.5847275853157043, "alphanum_fraction": 0.5924496054649353, "avg_line_length": 39.77193069458008, "blob_id": "8b150a351b3da32afdd3cb9ea1899c67deba4e7e", "content_id": "dd8f5d96dbdea88048d298a4d4972580c96fab38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2331, "license_type": "no_license", "max_line_length": 102, "num_lines": 57, "path": "/purchase.py", "repo_name": "ishambhandari/Python", "src_encoding": "UTF-8", "text": "import readfiles\nfrom updateinventory import updateStock\nimport datetime\ninventory = readfiles.readInventory() #Assigning readInventory function from readfiles.py to inventory\n#This is the main function. It take input and calls other functions.\n#This is purchase(inventory) function. \ndef purchase(inventory):\n for index, product in enumerate(inventory, 1):\n print(str(index) + \". \" + product[0])\n choice = int(input(\"What would you like to purchase? \"))\n name = inventory[choice - 1][0]\n price = inventory[choice - 1][1]\n stock = int(inventory[choice - 1][2])\n print(\"Price: \" + str(price))\n print(\"Available: \" + str(stock))\n quantity = int(input(\"How many \" + name + \" would you like to buy?\"))\n if stock - quantity < 0:\n print(\"Out of stock!!\")\n return False\n stock = stock - quantity\n inventory[choice - 1][2] = stock\n updateStock(inventory)\n return [name, price, quantity]\n\ndef discountAmount(price):\n return price * 0.1\n \ndef createInvoice(person_name, purchases, discount_check):\n Total_price = []\n invoice_name = person_name + '-' + str(datetime.datetime.now())\n file = open(invoice_name+\".txt\",\"w\")\n file.write('Person Name: ' + person_name + '\\n')\n file.write('Purchase Date ' + str(datetime.datetime.now()) + '\\n')\n file.write('Purchase details\\n'+\"\\n\")\n for purchase in purchases:\n price = purchase[1]\n quantity = purchase[2]\n total = price * quantity\n if (discount_check):\n discount = discountAmount(total)\n else:\n discount = 0\n net = total - discount\n file.write(\"Product Name=\" + '\\t'+ purchase[0]+ '\\n')\n file.write(\"Price=\" + '\\t'+ str(price)+\"$\" + '\\n')\n file.write(\"Quantity=\" + '\\t'+ str(quantity)+\" piece\" + '\\n')\n file.write(\"Total=\" + '\\t'+ str(total) +\"$\"+ '\\n')\n file.write(\"Discount amount=\" + '\\t'+ str(discount) +\"$\"+ '\\n')\n file.write(\"Final amount=\" + '\\t'+ str(net) + \"$\"+'\\n'+\"\\n\"+\"\\n\"+\"\\n\")\n Total_price.append(int(net))\n sum_ = 0\n for prices in Total_price:\n sum_ = float(sum_) + prices\n file.write(\"Total amount =\" + str(sum_)+\"$\")\n print(\"Total amount =\",float(sum_),\"$\"+'\\n')\n print(\"Please check your invoice for further details..\")\n file.close()\n \n\n\n" } ]
4
Abdulmateenchitrali/python_project
https://github.com/Abdulmateenchitrali/python_project
1ef52268baa463c491e3ca66c5b64e3b2152d31b
bf74acf5751efa9038317667bde2d669d3206510
3c7bd99babc1f7be83ddd3978b2902a58ce7159c
refs/heads/master
2020-07-30T10:52:29.443558
2019-09-22T19:30:29
2019-09-22T19:30:29
210,202,023
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 11, "blob_id": "4b8991a67cfab6701a82b5c49881996f8646a4ea", "content_id": "628b6036fe3c7e006fddda26f447701e91645f03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 16, "num_lines": 4, "path": "/demo.py", "repo_name": "Abdulmateenchitrali/python_project", "src_encoding": "UTF-8", "text": "import tkinter\ntop=tkinter.Tk()\n\ntop.mainloop()\n" } ]
1
StephenLesage/send_SMS_via_gmail
https://github.com/StephenLesage/send_SMS_via_gmail
4a5a82a4eec02561c5f8904bd2464da920144695
0025b1868439df5472b7ec68e8063224054dc82d
166807cade8e50283fec790dde55b2ebae1cc381
refs/heads/master
2023-06-10T16:23:30.831447
2021-07-03T06:10:19
2021-07-03T06:10:19
382,539,411
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5642756819725037, "alphanum_fraction": 0.5665259957313538, "avg_line_length": 25.514925003051758, "blob_id": "3221437ab9ee11cf47639fee8aed7268f663ef91", "content_id": "e4dc41f42467dd5cd4cbd32ee3cfd8ca6697ada3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3555, "license_type": "no_license", "max_line_length": 92, "num_lines": 134, "path": "/send_SMS.py", "repo_name": "StephenLesage/send_SMS_via_gmail", "src_encoding": "UTF-8", "text": "'''\nIf an error is raised, visit:\nhttps://www.google.com/settings/security/lesssecureapps\nand change:\n'Allow less secure apps'\nto \n'ON'\n'''\n\nimport smtplib \nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\n############################################################################################\n################################# Enter Specifics of User ##################################\n############################################################################################\n\n# server login info\nemail_info = ['[email protected]','John_Doe_email_password']\n\n# sender info\nsender_info = ['John Doe']\n\n# contacts list\ncontacts_list = {\n\t\t\t\t 1: {'name': 'John Doe', 'number': 'XXXXXXXXXX', 'carrier': 'att' },\n\t\t\t\t 2: {'name': 'Jane Doe', 'number': 'XXXXXXXXXX', 'carrier': 'verizon'}\n\t\t\t\t}\n############################################################################################\n############################################################################################\n############################################################################################\n\ndef message_info(contacts_list):\n\n\t# parse contacts list\n\tkeys = contacts_list.keys()\n\n\t# print contacts list\n\tprint('\\n-- CONTACTS LIST --\\n')\n\tfor key in keys:\n\t\tprint(str(key)+': '+contacts_list[key]['name'])\n\n\t# prompt user to select contact from contacts list\n\tuser_input = int(input('\\nSelect contact:\\n'))\n\n\t# check user input\n\tif user_input <= key and user_input >= 1:\n\t\tgood_input = True \n\telse:\n\t\tgood_input = False\n\n\t# if bad user input print again until good user input\n\twhile good_input == False:\n\n\t\t# print contacts list\n\t\tprint('\\n-- CONTACTS LIST --\\n')\n\t\tfor key in keys:\n\t\t\tprint(str(key)+': '+contacts_list[key]['name'])\n\n\t\t# prompt user to select contact from contacts list\n\t\tprint('\\nBad input. Try again.')\n\t\tuser_input = int(input('Select contact:\\n'))\n\n\t\t# check user input\n\t\tif user_input <= key and user_input >= 1:\n\t\t\tgood_input = True \n\t\telse:\n\t\t\tgood_input = False\n\n\t# SMS contact info\n\tcontact_info = contacts_list[int(user_input)]\n\n\t# SMS subject\n\tsubject = str(input('\\nEnter SMS subject:\\n'))\n\n\t# SMS message\n\tmessage = str(input('\\nEnter SMS message:\\n'))\n\n\treturn( contact_info, subject, message )\n\n# mobile carriers list\nmobile_carriers = {'att': '@txt.att.net',\n\t\t\t\t 'sprint': '@messaging.sprintpcs.com',\n\t\t\t\t 'verizon': '@vtext.com',\n\t\t\t\t 'cricket': '@mms.cricketwireless.net',\n\t\t\t\t 'tmobile': '@tmomail.net',\n\t\t\t\t 'metropcs': '@mymetropcs.com',\n\t\t\t\t 'spectrum': '@vtext.com',\n\t\t\t\t 'boostmobile': '@sms.myboostmobile.com'}\n\n# retrieve server login info\nemail, password = email_info\n\n# retrieve SMS info\ncontact_info, sms_subject, sms_body = message_info(contacts_list)\n\n# parse contact info to be sent to server\nmobile_carrier_keys = mobile_carriers.keys()\nfor key in mobile_carrier_keys:\n\tif contact_info['carrier'] == key:\n\t\tsend_sms_to = contact_info['number'] + mobile_carriers[key]\n\n# gmail server info\nsmtp = \"smtp.gmail.com\" \nport = 587\n\n# initialize server\nserver = smtplib.SMTP(smtp, port)\n\n# open server sonnection\nserver.starttls()\n\n# login to server\nserver.login(email, password)\n\n# structure message\nmsg = MIMEMultipart()\n\n# fill in SMS info\nmsg['From'] = sender_info[0]\nmsg['To'] = send_sms_to\nmsg['Subject'] = sms_subject + '\\n'\nmsg.attach( MIMEText( sms_body, 'plain' ) )\n\n# convert SMS from <byte> to <string>\nsms = msg.as_string()\n\n# send SMS\nprint('\\nSending SMS ... ')\nserver.sendmail( email, send_sms_to, sms )\nprint('SMS sent.')\n\n# close server connection\nserver.quit()\n\n\n" } ]
1
99ashr/WebScraping
https://github.com/99ashr/WebScraping
fdbe4ef91d4ecf09d6692eb1033759d3e5e5dcdb
b09e774be9adadccc02e69c055bfd8dae8f05822
8689e659c48ad46a151d5b8a43f0da485b6472c2
refs/heads/master
2020-04-17T07:29:04.535950
2019-01-18T22:45:11
2019-01-18T22:45:11
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5515367388725281, "alphanum_fraction": 0.5916857123374939, "avg_line_length": 45.17195510864258, "blob_id": "c3b8a30d55345b13e6d92940f4a2b352e03ec504", "content_id": "56d4f5dea418bb8c3729b9dcc4209c345438f2a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 62576, "license_type": "no_license", "max_line_length": 1190, "num_lines": 1355, "path": "/Untitled.md", "repo_name": "99ashr/WebScraping", "src_encoding": "UTF-8", "text": "\n\n```python\n#!!!import urllib!!!\nimport urllib.request as req\n```\n\n\n```python\n#!!!import beautifulsoup!!!\nfrom bs4 import BeautifulSoup\n```\n\n\n```python\n#!!!url of the link!!!\nlink = \"https://secure.php.net/\"\n#dir(link)\n```\n\n\n```python\nwebpage = req.urlopen(link)\n#dir(webpage)\n```\n\n\n```python\n tags= BeautifulSoup(webpage)\n#soup=BeautifulSoup(webpage)\n```\n\n\n```python\nprint(tags.prettify)\n```\n\n <bound method Tag.prettify of <!DOCTYPE html>\n <html lang=\"en\" xmlns=\"http://www.w3.org/1999/xhtml\"><head>\n \n <meta charset=\"utf-8\"/>\n <meta content=\"width=device-width, initial-scale=1.0\" name=\"viewport\"/>\n \n <title>PHP: Hypertext Preprocessor</title>\n \n <link href=\"https://php.net/favicon.ico\" rel=\"shortcut icon\"/>\n <link href=\"http://php.net/phpnetimprovedsearch.src\" rel=\"search\" title=\"Add PHP.net search\" type=\"application/opensearchdescription+xml\"/>\n <link href=\"https://php.net/releases/feed.php\" rel=\"alternate\" title=\"PHP Release feed\" type=\"application/atom+xml\"/>\n <link href=\"https://php.net/feed.atom\" rel=\"alternate\" title=\"PHP: Hypertext Preprocessor\" type=\"application/atom+xml\"/>\n \n <link href=\"http://php.net/index.php\" rel=\"canonical\"/>\n <link href=\"http://php.net/index\" rel=\"shorturl\"/>\n <link href=\"http://php.net/index\" hreflang=\"x-default\" rel=\"alternate\"/>\n \n \n \n <link href=\"/cached.php?t=1539771603&amp;f=/fonts/Fira/fira.css\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\"/>\n <link href=\"/cached.php?t=1539765004&amp;f=/fonts/Font-Awesome/css/fontello.css\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\"/>\n <link href=\"/cached.php?t=1540425603&amp;f=/styles/theme-base.css\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\"/>\n <link href=\"/cached.php?t=1540425603&amp;f=/styles/theme-medium.css\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\"/>\n <link href=\"/cached.php?t=1429259403&amp;f=/styles/home.css\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\"/>\n \n <!--[if lte IE 7]>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"https://php.net/styles/workarounds.ie7.css\" media=\"screen\">\n <![endif]-->\n \n <!--[if lte IE 8]>\n <script>\n window.brokenIE = true;\n </script>\n <![endif]-->\n \n <!--[if lte IE 9]>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"https://php.net/styles/workarounds.ie9.css\" media=\"screen\">\n <![endif]-->\n \n <!--[if IE]>\n <script src=\"https://php.net/js/ext/html5.js\"></script>\n <![endif]-->\n \n <base href=\"https://php.net/index.php\"/>\n \n </head>\n <body class=\"home\">\n \n <nav class=\"navbar navbar-fixed-top\" id=\"head-nav\">\n <div class=\"navbar-inner clearfix\">\n <a class=\"brand\" href=\"/\"><img alt=\"php\" height=\"24\" src=\"/images/logos/php-logo.svg\" width=\"48\"/></a>\n <div id=\"mainmenu-toggle-overlay\"></div>\n <input id=\"mainmenu-toggle\" type=\"checkbox\"/>\n <ul class=\"nav\">\n <li class=\"\"><a href=\"/downloads\">Downloads</a></li>\n <li class=\"\"><a href=\"/docs.php\">Documentation</a></li>\n <li class=\"\"><a href=\"/get-involved\">Get Involved</a></li>\n <li class=\"\"><a href=\"/support\">Help</a></li>\n </ul>\n <form action=\"/search.php\" class=\"navbar-search\" id=\"topsearch\">\n <input name=\"show\" type=\"hidden\" value=\"quickref\"/>\n <input accesskey=\"s\" class=\"search-query\" name=\"pattern\" placeholder=\"Search\" type=\"search\"/>\n </form>\n </div>\n <div id=\"flash-message\"></div>\n </nav>\n <nav id=\"trick\"><div><dl>\n <dt><a href=\"/manual/en/getting-started.php\">Getting Started</a></dt>\n \t<dd><a href=\"/manual/en/introduction.php\">Introduction</a></dd>\n \t<dd><a href=\"/manual/en/tutorial.php\">A simple tutorial</a></dd>\n <dt><a href=\"/manual/en/langref.php\">Language Reference</a></dt>\n \t<dd><a href=\"/manual/en/language.basic-syntax.php\">Basic syntax</a></dd>\n \t<dd><a href=\"/manual/en/language.types.php\">Types</a></dd>\n \t<dd><a href=\"/manual/en/language.variables.php\">Variables</a></dd>\n \t<dd><a href=\"/manual/en/language.constants.php\">Constants</a></dd>\n \t<dd><a href=\"/manual/en/language.expressions.php\">Expressions</a></dd>\n \t<dd><a href=\"/manual/en/language.operators.php\">Operators</a></dd>\n \t<dd><a href=\"/manual/en/language.control-structures.php\">Control Structures</a></dd>\n \t<dd><a href=\"/manual/en/language.functions.php\">Functions</a></dd>\n \t<dd><a href=\"/manual/en/language.oop5.php\">Classes and Objects</a></dd>\n \t<dd><a href=\"/manual/en/language.namespaces.php\">Namespaces</a></dd>\n \t<dd><a href=\"/manual/en/language.errors.php\">Errors</a></dd>\n \t<dd><a href=\"/manual/en/language.exceptions.php\">Exceptions</a></dd>\n \t<dd><a href=\"/manual/en/language.generators.php\">Generators</a></dd>\n \t<dd><a href=\"/manual/en/language.references.php\">References Explained</a></dd>\n \t<dd><a href=\"/manual/en/reserved.variables.php\">Predefined Variables</a></dd>\n \t<dd><a href=\"/manual/en/reserved.exceptions.php\">Predefined Exceptions</a></dd>\n \t<dd><a href=\"/manual/en/reserved.interfaces.php\">Predefined Interfaces and Classes</a></dd>\n \t<dd><a href=\"/manual/en/context.php\">Context options and parameters</a></dd>\n \t<dd><a href=\"/manual/en/wrappers.php\">Supported Protocols and Wrappers</a></dd>\n </dl>\n <dl>\n <dt><a href=\"/manual/en/security.php\">Security</a></dt>\n \t<dd><a href=\"/manual/en/security.intro.php\">Introduction</a></dd>\n \t<dd><a href=\"/manual/en/security.general.php\">General considerations</a></dd>\n \t<dd><a href=\"/manual/en/security.cgi-bin.php\">Installed as CGI binary</a></dd>\n \t<dd><a href=\"/manual/en/security.apache.php\">Installed as an Apache module</a></dd>\n \t<dd><a href=\"/manual/en/security.sessions.php\">Session Security</a></dd>\n \t<dd><a href=\"/manual/en/security.filesystem.php\">Filesystem Security</a></dd>\n \t<dd><a href=\"/manual/en/security.database.php\">Database Security</a></dd>\n \t<dd><a href=\"/manual/en/security.errors.php\">Error Reporting</a></dd>\n \t<dd><a href=\"/manual/en/security.globals.php\">Using Register Globals</a></dd>\n \t<dd><a href=\"/manual/en/security.variables.php\">User Submitted Data</a></dd>\n \t<dd><a href=\"/manual/en/security.magicquotes.php\">Magic Quotes</a></dd>\n \t<dd><a href=\"/manual/en/security.hiding.php\">Hiding PHP</a></dd>\n \t<dd><a href=\"/manual/en/security.current.php\">Keeping Current</a></dd>\n <dt><a href=\"/manual/en/features.php\">Features</a></dt>\n \t<dd><a href=\"/manual/en/features.http-auth.php\">HTTP authentication with PHP</a></dd>\n \t<dd><a href=\"/manual/en/features.cookies.php\">Cookies</a></dd>\n \t<dd><a href=\"/manual/en/features.sessions.php\">Sessions</a></dd>\n \t<dd><a href=\"/manual/en/features.xforms.php\">Dealing with XForms</a></dd>\n \t<dd><a href=\"/manual/en/features.file-upload.php\">Handling file uploads</a></dd>\n \t<dd><a href=\"/manual/en/features.remote-files.php\">Using remote files</a></dd>\n \t<dd><a href=\"/manual/en/features.connection-handling.php\">Connection handling</a></dd>\n \t<dd><a href=\"/manual/en/features.persistent-connections.php\">Persistent Database Connections</a></dd>\n \t<dd><a href=\"/manual/en/features.safe-mode.php\">Safe Mode</a></dd>\n \t<dd><a href=\"/manual/en/features.commandline.php\">Command line usage</a></dd>\n \t<dd><a href=\"/manual/en/features.gc.php\">Garbage Collection</a></dd>\n \t<dd><a href=\"/manual/en/features.dtrace.php\">DTrace Dynamic Tracing</a></dd>\n </dl>\n <dl>\n <dt><a href=\"/manual/en/funcref.php\">Function Reference</a></dt>\n \t<dd><a href=\"/manual/en/refs.basic.php.php\">Affecting PHP's Behaviour</a></dd>\n \t<dd><a href=\"/manual/en/refs.utilspec.audio.php\">Audio Formats Manipulation</a></dd>\n \t<dd><a href=\"/manual/en/refs.remote.auth.php\">Authentication Services</a></dd>\n \t<dd><a href=\"/manual/en/refs.utilspec.cmdline.php\">Command Line Specific Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.compression.php\">Compression and Archive Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.creditcard.php\">Credit Card Processing</a></dd>\n \t<dd><a href=\"/manual/en/refs.crypto.php\">Cryptography Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.database.php\">Database Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.calendar.php\">Date and Time Related Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.fileprocess.file.php\">File System Related Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.international.php\">Human Language and Character Encoding Support</a></dd>\n \t<dd><a href=\"/manual/en/refs.utilspec.image.php\">Image Processing and Generation</a></dd>\n \t<dd><a href=\"/manual/en/refs.remote.mail.php\">Mail Related Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.math.php\">Mathematical Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.utilspec.nontext.php\">Non-Text MIME Output</a></dd>\n \t<dd><a href=\"/manual/en/refs.fileprocess.process.php\">Process Control Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.basic.other.php\">Other Basic Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.remote.other.php\">Other Services</a></dd>\n \t<dd><a href=\"/manual/en/refs.search.php\">Search Engine Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.utilspec.server.php\">Server Specific Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.basic.session.php\">Session Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.basic.text.php\">Text Processing</a></dd>\n \t<dd><a href=\"/manual/en/refs.basic.vartype.php\">Variable and Type Related Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.webservice.php\">Web Services</a></dd>\n \t<dd><a href=\"/manual/en/refs.utilspec.windows.php\">Windows Only Extensions</a></dd>\n \t<dd><a href=\"/manual/en/refs.xml.php\">XML Manipulation</a></dd>\n \t<dd><a href=\"/manual/en/refs.ui.php\">GUI Extensions</a></dd>\n </dl>\n <dl>\n <dt>Keyboard Shortcuts</dt><dt>?</dt>\n <dd>This help</dd>\n <dt>j</dt>\n <dd>Next menu item</dd>\n <dt>k</dt>\n <dd>Previous menu item</dd>\n <dt>g p</dt>\n <dd>Previous man page</dd>\n <dt>g n</dt>\n <dd>Next man page</dd>\n <dt>G</dt>\n <dd>Scroll to bottom</dd>\n <dt>g g</dt>\n <dd>Scroll to top</dd>\n <dt>g h</dt>\n <dd>Goto homepage</dd>\n <dt>g s</dt>\n <dd>Goto search<br/>(current page)</dd>\n <dt>/</dt>\n <dd>Focus search box</dd>\n </dl></div></nav>\n <div id=\"goto\">\n <div class=\"search\">\n <div class=\"text\"></div>\n <div class=\"results\"><ul></ul></div>\n </div>\n </div>\n \n \n \n <div class=\"clearfix\" id=\"intro\">\n <div class=\"container\">\n <div class=\"row clearfix\">\n <div class=\"blurb\">\n <p>PHP is a popular general-purpose scripting language that is especially suited to web development.</p>\n <p>Fast, flexible and pragmatic, PHP powers everything from your blog to the most popular websites in the world.</p>\n </div>\n <div class=\"download\">\n <h3>Download</h3><ul>\n \n <li><a class=\"download-link\" href=\"/downloads.php#v5.6.40\">5.6.40</a><span class=\"dot\">·</span><a class=\"notes\" href=\"/ChangeLog-5.php#5.6.40\">Release Notes</a><span class=\"dot\">·</span><a class=\"notes\" href=\"/migration56\">Upgrading</a></li>\n \n <li><a class=\"download-link\" href=\"/downloads.php#v7.1.26\">7.1.26</a><span class=\"dot\">·</span><a class=\"notes\" href=\"/ChangeLog-7.php#7.1.26\">Release Notes</a><span class=\"dot\">·</span><a class=\"notes\" href=\"/migration71\">Upgrading</a></li>\n \n <li><a class=\"download-link\" href=\"/downloads.php#v7.2.14\">7.2.14</a><span class=\"dot\">·</span><a class=\"notes\" href=\"/ChangeLog-7.php#7.2.14\">Release Notes</a><span class=\"dot\">·</span><a class=\"notes\" href=\"/migration72\">Upgrading</a></li>\n \n <li><a class=\"download-link\" href=\"/downloads.php#v7.3.1\">7.3.1</a><span class=\"dot\">·</span><a class=\"notes\" href=\"/ChangeLog-7.php#7.3.1\">Release Notes</a><span class=\"dot\">·</span><a class=\"notes\" href=\"/migration73\">Upgrading</a></li>\n </ul>\n </div>\n </div> </div>\n </div>\n \n \n <div class=\"clearfix\" id=\"layout\">\n <section id=\"layout-content\">\n <div class=\"home-content\"><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2019-01-10T15:01:12-08:00\">10 Jan 2019</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2019.php#id2019-01-10-4\" id=\"id2019-01-10-4\">PHP 5.6.40 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>The PHP development team announces the immediate availability of PHP\n 5.6.40. This is a security release. Several security bugs have been fixed\n in this release.\n \n All PHP 5.6 users are encouraged to upgrade to this version.\n </p>\n \n <p>For source downloads of PHP 5.6.40 please visit our <a href=\"http://www.php.net/downloads.php\">downloads page</a>,\n Windows source and binaries can be found on <a href=\"http://windows.php.net/download/\">windows.php.net/download/</a>.\n The list of changes is recorded in the <a href=\"http://www.php.net/ChangeLog-5.php#5.6.40\">ChangeLog</a>.\n </p>\n \n <p>Please note that according to the <a href=\"http://php.net/supported-versions.php\">PHP version\n support timelines</a>,\n PHP 5.6.40 is the last scheduled release of PHP 5.6 branch. There may be additional release if we\n discover\n important security issues that warrant it, otherwise this release will be the final one in the PHP\n 5.6 branch.\n If your PHP installation is based on PHP 5.6, it may be a good time to start making the plans for\n the upgrade\n to PHP 7.1, PHP 7.2 or PHP 7.3.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2019-01-10T18:52:00+00:00\">10 Jan 2019</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2019.php#id2019-01-10-3\" id=\"id2019-01-10-3\">PHP 7.1.26 Release Announcement</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>The PHP development team announces the immediate availability of PHP\n 7.1.26. This is a security release which also contains several bug fixes.</p>\n \n <p>All PHP 7.1 users are encouraged to upgrade to this version.</p>\n \n <p>For source downloads of PHP 7.1.26 please visit our <a href=\"http://www.php.net/downloads.php\">downloads page</a>,\n Windows source and binaries can be found on <a href=\"http://windows.php.net/download/\">windows.php.net/download/</a>.\n The list of changes is recorded in the <a href=\"http://www.php.net/ChangeLog-7.php#7.1.26\">ChangeLog</a>.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2019-01-10T11:51:06+01:00\">10 Jan 2019</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2019.php#id2019-01-10-2\" id=\"id2019-01-10-2\">PHP 7.3.1 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>The PHP development team announces the immediate availability of PHP\n 7.3.1. This is a security release which also contains several bug fixes.</p>\n \n <p>All PHP 7.3 users are encouraged to upgrade to this version.</p>\n \n <p>For source downloads of PHP 7.3.1 please visit our <a href=\"http://www.php.net/downloads.php\">downloads page</a>,\n Windows source and binaries can be found on <a href=\"http://windows.php.net/download/\">windows.php.net/download/</a>.\n The list of changes is recorded in the <a href=\"http://www.php.net/ChangeLog-7.php#7.3.1\">ChangeLog</a>.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2019-01-10T08:30:27+00:00\">10 Jan 2019</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2019.php#id2019-01-10-1\" id=\"id2019-01-10-1\">PHP 7.2.14 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>The PHP development team announces the immediate availability of PHP\n 7.2.14. This is a security release which also contains several minor bug fixes.</p>\n \n <p>All PHP 7.2 users are encouraged to upgrade to this version.</p>\n \n <p>For source downloads of PHP 7.2.14 please visit our <a href=\"http://www.php.net/downloads.php\">downloads page</a>,\n Windows source and binaries can be found on <a href=\"http://windows.php.net/download/\">windows.php.net/download/</a>.\n The list of changes is recorded in the <a href=\"http://www.php.net/ChangeLog-7.php#7.2.14\">ChangeLog</a>.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-12-06T13:00:00+01:00\">06 Dec 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-12-06-5\" id=\"id2018-12-06-5\">PHP 7.0.33 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>The PHP development team announces the immediate availability of PHP\n 7.0.33. Five security-related issues were fixed in this release.\n All PHP 7.0 users are encouraged to upgrade to this version.\n </p>\n \n <p>For source downloads of PHP 7.0.33 please visit our <a href=\"http://www.php.net/downloads.php\">downloads page</a>,\n Windows source and binaries can be found on <a href=\"http://windows.php.net/download/\">windows.php.net/download/</a>.\n The list of changes is recorded in the <a href=\"http://www.php.net/ChangeLog-7.php#7.0.33\">ChangeLog</a>.\n </p>\n \n <p>Please note that according to the <a href=\"http://php.net/supported-versions.php\">PHP version support timelines</a>,\n PHP 7.0.33 is the last scheduled release of PHP 7.0 branch. There may be additional release if we discover\n important security issues that warrant it, otherwise this release will be the final one in the PHP 7.0 branch.\n If your PHP installation is based on PHP 7.0, it may be a good time to start making the plans for the upgrade\n to PHP 7.1, PHP 7.2 or PHP 7.3.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-12-06T16:10:25+00:00\">06 Dec 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-12-06-4\" id=\"id2018-12-06-4\">PHP 7.1.25 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>The PHP development team announces the immediate availability of PHP 7.1.25.\n This is a security release.</p>\n \n <p>All PHP 7.1 users are encouraged to upgrade to this version.</p>\n \n <p>For source downloads of PHP 7.1.25 please visit our <a href=\"http://www.php.net/downloads.php\">downloads page</a>,\n Windows source and binaries can be found on <a href=\"http://windows.php.net/download/\">windows.php.net/download/</a>.\n The list of changes is recorded in the <a href=\"http://www.php.net/ChangeLog-7.php#7.1.25\">ChangeLog</a>.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-11-22T12:22:47+01:00\">22 Nov 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-11-22-1\" id=\"id2018-11-22-1\">PHP 7.3.0RC6 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP team is glad to announce the presumably last PHP 7.3.0 pre-release, PHP 7.3.0RC6.\n The rough outline of the PHP 7.3 release cycle is specified in the\n <a href=\"https://wiki.php.net/todo/php73\">PHP Wiki</a>.\n </p>\n \n <p>\n For source downloads of PHP 7.3.0RC6 please visit the <a href=\"https://downloads.php.net/~cmb/\">download page</a>.\n Windows sources and binaries can be found on <a href=\"https://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n Please carefully test this version and report any issues found in the <a href=\" https://bugs.php.net\">bug reporting system</a>.\n </p>\n \n <p>\n <b>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</b>\n </p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC6/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC6/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. Internal changes are listed in the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC6/UPGRADING.INTERNALS\">UPGRADING.INTERNALS</a> file.\n These files can also be found in the release archive.\n </p>\n \n <p>\n The next release would be 7.3.0 (GA), planned for December 6th.\n </p>\n \n <p>\n The signatures for the release can be found in <a href=\"https://gist.github.com/cmb69/6d9574612d0fb78b8549e42ec096a5a6\">the manifest</a>\n or on <a href=\"https://qa.php.net/\">the QA site</a>.\n </p>\n \n <p>\n Thank you for helping us make PHP better.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-11-08T11:11:26+01:00\">08 Nov 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-11-08-1\" id=\"id2018-11-08-1\">PHP 7.3.0RC5 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP team is glad to announce the next PHP 7.3.0 pre-release, PHP 7.3.0RC5.\n The rough outline of the PHP 7.3 release cycle is specified in the\n <a href=\"https://wiki.php.net/todo/php73\">PHP Wiki</a>.\n </p>\n \n <p>\n For source downloads of PHP 7.3.0RC5 please visit the <a href=\"https://downloads.php.net/~cmb/\">download page</a>.\n Windows sources and binaries can be found on <a href=\"https://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n Please carefully test this version and report any issues found in the <a href=\" https://bugs.php.net\">bug reporting system</a>.\n </p>\n \n <p>\n <b>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</b>\n </p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC5/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC5/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. Internal changes are listed in the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC5/UPGRADING.INTERNALS\">UPGRADING.INTERNALS</a> file.\n These files can also be found in the release archive.\n </p>\n \n <p>\n The next release would be RC6, planned for November 22nd.\n </p>\n \n <p>\n The signatures for the release can be found in <a href=\"https://gist.github.com/cmb69/a14634afdd52b7f69d65d2bd5a79ac99\">the manifest</a>\n or on <a href=\"https://qa.php.net/\">the QA site</a>.\n </p>\n \n <p>\n Thank you for helping us make PHP better.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-10-25T11:07:32+02:00\">25 Oct 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-10-25-1\" id=\"id2018-10-25-1\">PHP 7.3.0RC4 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP team is glad to announce the next PHP 7.3.0 pre-release, PHP 7.3.0RC4.\n The rough outline of the PHP 7.3 release cycle is specified in the\n <a href=\"https://wiki.php.net/todo/php73\">PHP Wiki</a>.\n </p>\n \n <p>\n For source downloads of PHP 7.3.0RC4 please visit the <a href=\"https://downloads.php.net/~cmb/\">download page</a>.\n Windows sources and binaries can be found on <a href=\"https://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n Please carefully test this version and report any issues found in the <a href=\" https://bugs.php.net\">bug reporting system</a>.\n </p>\n \n <p>\n <b>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</b>\n </p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC4/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC4/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. Internal changes are listed in the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC4/UPGRADING.INTERNALS\">UPGRADING.INTERNALS</a> file.\n These files can also be found in the release archive.\n </p>\n \n <p>\n The next release would be RC5, planned for November 8th.\n </p>\n \n <p>\n The signatures for the release can be found in <a href=\"https://gist.github.com/cmb69/594d9a18290f1b019b2ba68a098413c6\">the manifest</a>\n or on <a href=\"https://qa.php.net/\">the QA site</a>.\n </p>\n \n <p>\n Thank you for helping us make PHP better.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-10-11T13:47:50+02:00\">11 Oct 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-10-11-1\" id=\"id2018-10-11-1\">PHP 7.3.0RC3 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP team is glad to announce the next PHP 7.3.0 pre-release, PHP 7.3.0RC3.\n The rough outline of the PHP 7.3 release cycle is specified in the\n <a href=\"https://wiki.php.net/todo/php73\">PHP Wiki</a>.\n </p>\n \n <p>\n For source downloads of PHP 7.3.0RC3 please visit the <a href=\"https://downloads.php.net/~cmb/\">download page</a>.\n Windows sources and binaries can be found on <a href=\"https://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n Please carefully test this version and report any issues found in the <a href=\" https://bugs.php.net\">bug reporting system</a>.\n </p>\n \n <p>\n <b>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</b>\n </p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC3/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC3/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. Internal changes are listed in the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC3/UPGRADING.INTERNALS\">UPGRADING.INTERNALS</a> file.\n These files can also be found in the release archive.\n </p>\n \n <p>\n The next release would be RC4, planned for October 25th.\n </p>\n \n <p>\n The signatures for the release can be found in <a href=\"https://gist.github.com/cmb69/3b521933b5524c92e880fc96559a5f5c\">the manifest</a>\n or on <a href=\"https://qa.php.net/\">the QA site</a>.\n </p>\n \n <p>\n Thank you for helping us make PHP better.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-09-28T10:31:16+02:00\">28 Sep 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-09-28-1\" id=\"id2018-09-28-1\">PHP 7.3.0RC2 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP team is glad to announce the next PHP 7.3.0 pre-release, PHP 7.3.0RC2.\n The rough outline of the PHP 7.3 release cycle is specified in the\n <a href=\"https://wiki.php.net/todo/php73\">PHP Wiki</a>.\n </p>\n \n <p>\n For source downloads of PHP 7.3.0RC2 please visit the <a href=\"https://downloads.php.net/~cmb/\">download page</a>.\n Windows sources and binaries can be found on <a href=\"https://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n Please carefully test this version and report any issues found in the <a href=\" https://bugs.php.net\">bug reporting system</a>.\n </p>\n \n <p>\n <b>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</b>\n </p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC2/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC2/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. Internal changes are listed in the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC2/UPGRADING.INTERNALS\">UPGRADING.INTERNALS</a> file.\n These files can also be found in the release archive.\n </p>\n \n <p>\n The next release would be RC3, planned for October 11th.\n </p>\n \n <p>\n The signatures for the release can be found in <a href=\"https://gist.github.com/cmb69/ffe9373d127254a19e73e73251e4ff7d\">the manifest</a>\n or on <a href=\"https://qa.php.net/\">the QA site</a>.\n </p>\n \n <p>\n Thank you for helping us make PHP better.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-09-13T10:57:40+02:00\">13 Sep 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-09-13-2\" id=\"id2018-09-13-2\">PHP 7.3.0RC1 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP team is glad to announce the release of the next PHP 7.3.0 pre-release, PHP 7.3.0RC1.\n The rough outline of the PHP 7.3 release cycle is specified in the\n <a href=\"https://wiki.php.net/todo/php73\">PHP Wiki</a>.\n </p>\n \n <p>\n For source downloads of PHP 7.3.0RC1 please visit the <a href=\"https://downloads.php.net/~cmb/\">download page</a>.\n Windows sources and binaries can be found on <a href=\"https://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n Please carefully test this version and report any issues found in the <a href=\" https://bugs.php.net\">bug reporting system</a>.\n </p>\n \n <p>\n <b>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</b>\n </p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC1/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC1/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. Internal changes are listed in the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0RC1/UPGRADING.INTERNALS\">UPGRADING.INTERNALS</a> file.\n These files can also be found in the release archive.\n </p>\n \n <p>\n The next release would be RC2, planned for September 27th.\n </p>\n \n <p>\n The signatures for the release can be found in <a href=\"https://gist.github.com/cmb69/224ae1ef28b1e3f2e0a62a4ab50966e4\">the manifest</a>\n or on <a href=\"https://qa.php.net/\">the QA site</a>.\n </p>\n \n <p>\n Thank you for helping us make PHP better.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-08-30T17:37:34+02:00\">30 Aug 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-08-30-1\" id=\"id2018-08-30-1\">PHP 7.3.0.beta3 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP team is glad to announce the release of the seventh PHP 7.3.0 version, PHP 7.3.0beta3.\n The rough outline of the PHP 7.3 release cycle is specified in the\n <a href=\"https://wiki.php.net/todo/php73\">PHP Wiki</a>.\n </p>\n \n <p>\n For source downloads of PHP 7.3.0beta3 please visit the <a href=\"https://downloads.php.net/~cmb/\">download page</a>.\n Windows sources and binaries can be found on <a href=\"https://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n Please carefully test this version and report any issues found in the <a href=\" https://bugs.php.net\">bug reporting system</a>.\n </p>\n \n <p>\n <b>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</b>\n </p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0beta3/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.3.0beta3/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. Internal changes are listed in the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0beta3/UPGRADING.INTERNALS\">UPGRADING.INTERNALS</a> file.\n These files can also be found in the release archive.\n </p>\n \n <p>\n The next release would be RC1, planned for September 13th.\n </p>\n \n <p>\n The signatures for the release can be found in <a href=\"https://gist.github.com/cmb69/aeef8c8877a451ba6fce6f990dd3860b\">the manifest</a>\n or on <a href=\"https://qa.php.net/\">the QA site</a>.\n </p>\n \n <p>\n Thank you for helping us make PHP better.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-08-16T14:11:38+02:00\">16 Aug 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-08-16-1\" id=\"id2018-08-16-1\">PHP 7.3.0.beta2 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP team is glad to announce the release of the sixth PHP 7.3.0 version, PHP 7.3.0beta2.\n The rough outline of the PHP 7.3 release cycle is specified in the\n <a href=\"https://wiki.php.net/todo/php73\">PHP Wiki</a>.\n </p>\n \n <p>\n For source downloads of PHP 7.3.0beta2 please visit the <a href=\"https://downloads.php.net/~cmb/\">download page</a>.\n Windows sources and binaries can be found on <a href=\"https://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n Please carefully test this version and report any issues found in the <a href=\" https://bugs.php.net\">bug reporting system</a>.\n </p>\n \n <p>\n <b>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</b>\n </p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0beta2/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.3.0beta2/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. Internal changes are listed in the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0beta2/UPGRADING.INTERNALS\">UPGRADING.INTERNALS</a> file.\n These files can also be found in the release archive.\n </p>\n \n <p>\n The next release would be Beta 3, planned for August 30th.\n </p>\n \n <p>\n The signatures for the release can be found in <a href=\"https://gist.github.com/cmb69/4bfd2f4d54ebc01cd37ba3dc86f1f814\">the manifest</a>\n or on <a href=\"https://qa.php.net/\">the QA site</a>.\n </p>\n \n <p>\n Thank you for helping us make PHP better.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-08-02T11:44:58+02:00\">02 Aug 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-08-02-1\" id=\"id2018-08-02-1\">PHP 7.3.0.beta1 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP team is glad to announce the release of the fifth PHP 7.3.0 version, PHP 7.3.0beta1.\n The rough outline of the PHP 7.3 release cycle is specified in the\n <a href=\"https://wiki.php.net/todo/php73\">PHP Wiki</a>.\n </p>\n \n <p>\n For source downloads of PHP 7.3.0beta1 please visit the <a href=\"https://downloads.php.net/~cmb/\">download page</a>.\n Windows sources and binaries can be found on <a href=\"https://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n Please carefully test this version and report any issues found in the <a href=\" https://bugs.php.net\">bug reporting system</a>.\n </p>\n \n <p>\n <b>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</b>\n </p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0beta1/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.3.0beta1/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. These files can also be found in the release archive.\n </p>\n \n <p>\n The next release would be Beta 2, planned for August 16th.\n </p>\n \n <p>\n The signatures for the release can be found in <a href=\"https://gist.github.com/cmb69/e666c3f1622321f868de9282bee67e43\">the manifest</a>\n or on <a href=\"https://qa.php.net/\">the QA site</a>.\n </p>\n \n <p>\n Thank you for helping us make PHP better.\n </p>\n \n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-07-19T11:02:21+02:00\">19 Jul 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-07-19-1\" id=\"id2018-07-19-1\">PHP 7.3.0alpha4 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP team is glad to announce the release of the fourth PHP 7.3.0 version, PHP 7.3.0alpha4.\n The rough outline of the PHP 7.3 release cycle is specified in the\n <a href=\"https://wiki.php.net/todo/php73\">PHP Wiki</a>.\n </p>\n \n <p>\n For source downloads of PHP 7.3.0alpha4 please visit the <a href=\"https://downloads.php.net/~cmb/\">download page</a>.\n Windows sources and binaries can be found on <a href=\"https://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n Please carefully test this version and report any issues found in the <a href=\" https://bugs.php.net\">bug reporting system</a>.\n </p>\n \n <p>\n <b>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</b>\n </p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0alpha4/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.3.0alpha4/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. These files can also be found in the release archive.\n </p>\n \n <p>\n The next release would be Beta 1, planned for August 2nd.\n </p>\n \n <p>\n The signatures for the release can be found in <a href=\"https://gist.github.com/cmb69/b30366855341382046687ce7adb20f69\">the manifest</a> or on <a href=\"https://qa.php.net/\">the QA site</a>.\n </p>\n \n <p>\n Thank you for helping us make PHP better.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-07-05T11:41:41+02:00\">05 Jul 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-07-05-1\" id=\"id2018-07-05-1\">PHP 7.3.0 alpha 3 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP team is glad to announce the release of the third PHP 7.3.0 version, PHP 7.3.0 Alpha 3.\n The rough outline of the PHP 7.3 release cycle is specified in the\n <a href=\"https://wiki.php.net/todo/php73\">PHP Wiki</a>.\n </p>\n \n <p>\n For source downloads of PHP 7.3.0 Alpha 3 please visit the <a href=\"https://downloads.php.net/~cmb/\">download page</a>.\n Windows sources and binaries can be found on <a href=\"https://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n Please carefully test this version and report any issues found in the <a href=\" http://bugs.php.net\">bug reporting system</a>.\n </p>\n \n <p>\n <b>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</b>\n </p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0alpha3/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.3.0alpha3/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. These files can also be found in the release archive.\n </p>\n \n <p>\n The next release would be Beta 1, planned for July 19th.\n </p>\n \n <p>\n The signatures for the release can be found in <a href=\"https://gist.github.com/cmb69/e2e76ac0072474b019b0c9f1aef249f1\">the manifest</a> or on <a href=\"https://qa.php.net/\">the QA site</a>.\n </p>\n \n <p>\n Thank you for helping us make PHP better.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-06-21T11:46:20+02:00\">21 Jun 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-06-21-1\" id=\"id2018-06-21-1\">PHP 7.3.0 alpha 2 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP team is glad to announce the release of the second PHP 7.3.0 version, PHP 7.3.0 Alpha 2.\n The rough outline of the PHP 7.3 release cycle is specified in the\n <a href=\"https://wiki.php.net/todo/php73\">PHP Wiki</a>.\n </p>\n \n <p>\n For source downloads of PHP 7.3.0 Alpha 2 please visit the <a href=\"https://downloads.php.net/~cmb/\">download page</a>.\n Windows sources and binaries can be found on <a href=\"https://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n Please carefully test this version and report any issues found in the <a href=\" http://bugs.php.net\">bug reporting system</a>.\n </p>\n \n <p>\n <b>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</b>\n </p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.3.0alpha2/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.3.0alpha2/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. These files can also be found in the release archive.\n </p>\n \n <p>\n The next release would be Alpha 3, planned for July 5.\n </p>\n \n <p>\n The signatures for the release can be found in <a href=\"https://gist.github.com/cmb69/2c54d0972b296a905062f52c0852e7cb\">the manifest</a> or on <a href=\"https://qa.php.net/\">the QA site</a>.\n </p>\n \n <p>\n Thank you for helping us make PHP better.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-06-07T18:36:37+00:00\">07 Jun 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-06-07-1\" id=\"id2018-06-07-1\">PHP 7.3.0 alpha 1 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n PHP team is glad to announce the release of the first PHP 7.3.0 version, PHP 7.3.0 Alpha 1.\n \t This starts the PHP 7.3 release cycle, the rough outline of which is specified in the\n \t <a href=\"https://wiki.php.net/todo/php73\">PHP Wiki</a>.\n </p>\n \n <p>For source downloads of PHP 7.3.0 Alpha 1 please visit the <a href=\"https://downloads.php.net/~stas/\">download page</a>.</p>\n \n <p>\n Please carefully test this version and report any issues found in the <a href=\" http://bugs.php.net\">bug reporting system</a>.\n </p>\n \n <p>\n <b>Please DO NOT use this version in production, it is an early test version.</b>\n </p>\n \n <p>\n \t For more information on the new features and other changes, you can read the\n \t <a href=\"https://github.com/php/php-src/blob/php-7.3.0alpha1/NEWS\">NEWS</a> file,\n \t or the <a href=\"https://github.com/php/php-src/blob/php-7.3.0alpha1/UPGRADING\">UPGRADING</a>\n \t file for a complete list of upgrading notes. These files can also be found in the release archive.\n \t </p>\n \n <p>\n The next release would be Alpha 2, planned for June 21.\n </p>\n \n <p>The signatures for the release can be found in <a href=\"https://gist.github.com/smalyshev/b0994d4dd138007237911429702ee040\">the manifest</a> or on <a href=\"https://qa.php.net/\">the QA site</a>.\n </p>\n \n <p>Thank you for helping us make PHP better.</p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2018-02-01T09:12:34+00:00\">01 Feb 2018</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2018.php#id2018-02-01-1\" id=\"id2018-02-01-1\">PHP 7.2.2 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>The PHP development team announces the immediate availability of PHP\n 7.2.2. This is a bugfix release, with several bug fixes included.</p>\n \n <p>All PHP 7.2 users are encouraged to upgrade to this version.</p>\n \n <p>For source downloads of PHP 7.2.2 please visit our <a href=\"http://www.php.net/downloads.php\">downloads page</a>,\n Windows source and binaries can be found on <a href=\"http://windows.php.net/download/\">windows.php.net/download/</a>.\n The list of changes is recorded in the <a href=\"http://www.php.net/ChangeLog-7.php#7.2.2\">ChangeLog</a>.\n </p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2017-10-12T11:46:49+02:00\">12 Oct 2017</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2017.php#id2017-10-12-1\" id=\"id2017-10-12-1\">PHP 7.2.0 Release Candidate 4 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP development team announces the immediate availability of PHP 7.2.0 RC4.\n This release is the fourth Release Candidate for 7.2.0.\n All users of PHP are encouraged to test this version carefully, and report any bugs\n and incompatibilities in the <a href=\"https://bugs.php.net/\">bug tracking system</a>.\n </p>\n \n <p><strong>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</strong></p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.2.0RC4/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.2.0RC4/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. These files can also be found in the release archive.\n </p>\n \n <p>\n For source downloads of PHP 7.2.0 Release Candidate 4 please visit the\n <a href=\"https://downloads.php.net/~remi/\">download</a> page,\n Windows sources and binaries can be found at\n <a href=\"http://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n The next Release Candidate will be announced on the 26th of October.\n You can also read the full list of planned releases on\n <a href=\"https://wiki.php.net/todo/php72\">our wiki</a>.\n </p>\n \n <p>Thank you for helping us make PHP better.</p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2017-09-28T12:58:56+02:00\">28 Sep 2017</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2017.php#id2017-09-28-2\" id=\"id2017-09-28-2\">PHP 7.2.0 Release Candidate 3 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP development team announces the immediate availability of PHP 7.2.0 RC3.\n This release is the third Release Candidate for 7.2.0.\n All users of PHP are encouraged to test this version carefully, and report any bugs\n and incompatibilities in the <a href=\"https://bugs.php.net/\">bug tracking system</a>.\n </p>\n \n <p><strong>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</strong></p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.2.0RC3/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.2.0RC3/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. These files can also be found in the release archive.\n </p>\n \n <p>\n For source downloads of PHP 7.2.0 Release Candidate 3 please visit the\n <a href=\"https://downloads.php.net/~remi/\">download</a> page,\n Windows sources and binaries can be found at\n <a href=\"http://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n The next Release Candidate will be announced on the 12th of October.\n You can also read the full list of planned releases on\n <a href=\"https://wiki.php.net/todo/php72\">our wiki</a>.\n </p>\n \n <p>Thank you for helping us make PHP better.</p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2017-08-31T10:53:58+02:00\">31 Aug 2017</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2017.php#id2017-08-31-1\" id=\"id2017-08-31-1\">PHP 7.2.0 Release Candidate 1 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP development team announces the immediate availability of PHP 7.2.0 Release\n Candidate 1. This release is the first Release Candidate for 7.2.0.\n All users of PHP are encouraged to test this version carefully, and report any bugs\n and incompatibilities in the <a href=\"https://bugs.php.net/\">bug tracking system</a>.\n </p>\n \n <p><strong>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</strong></p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.2.0RC1/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.2.0RC1/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. These files can also be found in the release archive.\n </p>\n \n <p>\n For source downloads of PHP 7.2.0 Release Candidate 1 please visit the\n <a href=\"https://downloads.php.net/~remi/\">download</a> page,\n Windows sources and binaries can be found at\n <a href=\"http://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n The second Release Candidate will be released on the 14th of September.\n You can also read the full list of planned releases on\n <a href=\"https://wiki.php.net/todo/php72\">our wiki</a>.\n </p>\n \n <p>Thank you for helping us make PHP better.</p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2017-08-17T10:17:44+02:00\">17 Aug 2017</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2017.php#id2017-08-17-1\" id=\"id2017-08-17-1\">PHP 7.2.0 Beta 3 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>\n The PHP development team announces the immediate availability of PHP 7.2.0 Beta 3.\n This release is the third and final beta for 7.2.0. All users of PHP are encouraged\n to test this version carefully, and report any bugs and incompatibilities in the\n <a href=\"https://bugs.php.net/\">bug tracking system</a>.\n </p>\n \n <p><strong>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</strong></p>\n \n <p>\n For more information on the new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.2.0beta3/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.2.0beta3/UPGRADING\">UPGRADING</a>\n file for a complete list of upgrading notes. These files can also be found in the release archive.\n </p>\n \n <p>\n For source downloads of PHP 7.2.0 Beta 3 please visit the\n <a href=\"https://downloads.php.net/~remi/\">download</a> page,\n Windows sources and binaries can be found at\n <a href=\"http://windows.php.net/qa/\">windows.php.net/qa/</a>.\n </p>\n \n <p>\n The first Release Candidate will be released on the 31th of August.\n You can also read the full list of planned releases on\n <a href=\"https://wiki.php.net/todo/php72\">our wiki</a>.\n </p>\n \n <p>Thank you for helping us make PHP better.</p>\n </div>\n \n </div>\n </article><article class=\"newsentry\">\n <header class=\"title\">\n <time datetime=\"2017-07-06T12:25:08+02:00\">06 Jul 2017</time>\n <h2 class=\"newstitle\">\n <a href=\"https://php.net/archive/2017.php#id2017-07-06-2\" id=\"id2017-07-06-2\">PHP 7.2.0 Alpha 3 Released</a>\n </h2>\n </header>\n <div class=\"newscontent\">\n <div>\n <p>The PHP development team announces the immediate availability of PHP 7.2.0 Alpha 3.\n This release contains fixes and improvements relative to Alpha 2.\n All users of PHP are encouraged to test this version carefully,\n and report any bugs and incompatibilities in the\n <a href=\"https://bugs.php.net/\">bug tracking system</a>.</p>\n \n <p><strong>THIS IS A DEVELOPMENT PREVIEW - DO NOT USE IT IN PRODUCTION!</strong></p>\n \n <p>For information on new features and other changes, you can read the\n <a href=\"https://github.com/php/php-src/blob/php-7.2.0alpha3/NEWS\">NEWS</a> file,\n or the <a href=\"https://github.com/php/php-src/blob/php-7.2.0alpha3/UPGRADING\">UPGRADING</a> file\n for a complete list of upgrading notes. These files can also be found in the release archive.</p>\n \n <p>For source downloads of PHP 7.2.0 Alpha 3 please visit the <a href=\"https://downloads.php.net/~remi/\">download</a> page,\n Windows sources and binaries can be found on <a href=\"http://windows.php.net/qa/\">windows.php.net/qa/</a>.</p>\n \n <p>The first beta will be released on the 20th of July. You can also read the full list of planned releases on our\n <a href=\"https://wiki.php.net/todo/php72#timetable\">wiki</a>.</p>\n \n <p>Thank you for helping us make PHP better.</p>\n </div>\n \n </div>\n </article><p class=\"archive\"><a href=\"/archive/\">Older News Entries</a></p></div> </section><!-- layout-content -->\n \n <aside class=\"tips\">\n <div class=\"inner\">\n <div class=\"panel\"> <a class=\"headline\" href=\"/conferences\" title=\"Conferences calling for papers\">Conferences calling for papers</a><div class=\"body\"><ul><li><a href=\"http://php.net/conferences/index.php#id2019-01-15-1\" title=\"ScotlandPHP 2019\">ScotlandPHP 2019</a></li><li><a href=\"http://php.net/conferences/index.php#id2018-12-10-1\" title=\"PHPKonf Istanbul PHP Conference 2019\">PHPKonf Istanbul PHP Conference 2019</a></li><li><a href=\"http://php.net/conferences/index.php#id2018-11-20-1\" title=\"Dutch PHP Conference - CfP is open!\">Dutch PHP Conference - CfP is open!</a></li></ul></div></div><div class=\"panel\"> <a class=\"headline\" href=\"/conferences\" title=\"Upcoming conferences\">Upcoming conferences</a><div class=\"body\"><ul><li><a href=\"http://php.net/conferences/index.php#id2018-12-15-1\" title=\"SunshinePHP 2019\">SunshinePHP 2019</a></li><li><a href=\"http://php.net/conferences/index.php#id2018-11-20-2\" title=\"Dutch PHP Conference 2019\">Dutch PHP Conference 2019</a></li><li><a href=\"http://php.net/conferences/index.php#id2018-10-12-2\" title=\"International PHP Conference 2019 - Spring Edition\">International PHP Conference 2019 - Spring Edition</a></li></ul></div></div>\n <p class=\"panel\"><a href=\"/cal.php\">User Group Events</a></p>\n <p class=\"panel\"><a href=\"/thanks.php\">Special Thanks</a></p>\n <p class=\"panel social-media\">\n <span class=\"headline\">Social media</span>\n </p><div class=\"body\">\n <ul>\n <li>\n <a href=\"https://twitter.com/official_php\">\n <i class=\"icon-twitter\"></i>\n @official_php\n </a>\n </li>\n </ul>\n </div>\n <p></p>\n </div>\n </aside>\n \n </div><!-- layout -->\n \n <footer>\n <div class=\"container footer-content\">\n <div class=\"row-fluid\">\n <ul class=\"footmenu\">\n <li><a href=\"/copyright.php\">Copyright © 2001-2019 The PHP Group</a></li>\n <li><a href=\"/my.php\">My PHP.net</a></li>\n <li><a href=\"/contact.php\">Contact</a></li>\n <li><a href=\"/sites.php\">Other PHP.net sites</a></li>\n <li><a href=\"/mirrors.php\">Mirror sites</a></li>\n <li><a href=\"/privacy.php\">Privacy policy</a></li>\n </ul>\n </div>\n </div>\n </footer>\n \n <div class=\"elephpants\"><div class=\"images\"></div></div>\n <!-- External and third party libraries. -->\n <script src=\"//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js\"></script>\n <script src=\"/cached.php?t=1421837618&amp;f=/js/ext/modernizr.js\"></script>\n <script src=\"/cached.php?t=1421837618&amp;f=/js/ext/hogan-2.0.0.min.js\"></script>\n <script src=\"/cached.php?t=1421837618&amp;f=/js/ext/typeahead.min.js\"></script>\n <script src=\"/cached.php?t=1421837618&amp;f=/js/ext/mousetrap.min.js\"></script>\n <script src=\"/cached.php?t=1421837618&amp;f=/js/search.js\"></script>\n <script src=\"/cached.php?t=1539765004&amp;f=/js/common.js\"></script>\n \n <a href=\"javascript:;\" id=\"toTop\"><span id=\"toTopHover\"></span><img alt=\"To Top\" height=\"40\" src=\"/images/[email protected]\" width=\"40\"/></a>\n \n \n \n </body></html>>\n\n\n\n```python\na=soup.title.text\n#b=soup.title.strings\n#c=soup.title.string\n```\n\n\n```python\nprint(a)\n```\n\n PHP: Hypertext Preprocessor\n\n\n\n```python\n#dir(tags)\n```\n\n\n```python\ntags.find_all('div', class_='blurb')\n#tags.getText(\" \")\n```\n\n\n\n\n [<div class=\"blurb\">\n <p>PHP is a popular general-purpose scripting language that is especially suited to web development.</p>\n <p>Fast, flexible and pragmatic, PHP powers everything from your blog to the most popular websites in the world.</p>\n </div>]\n\n\n" }, { "alpha_fraction": 0.8024691343307495, "alphanum_fraction": 0.8024691343307495, "avg_line_length": 39.5, "blob_id": "e36bf3ffd9ea33aa5bf884caa2af6e885489cec2", "content_id": "e12a8b58f686af2f5c71c680aa5d9d1b5fe989a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 81, "license_type": "no_license", "max_line_length": 66, "num_lines": 2, "path": "/README.md", "repo_name": "99ashr/WebScraping", "src_encoding": "UTF-8", "text": "# WebScraping\nfetching data from the sites that doesn't authenticate it by API's\n" }, { "alpha_fraction": 0.6866883039474487, "alphanum_fraction": 0.6899350881576538, "avg_line_length": 13.690476417541504, "blob_id": "5eea2541796ef973e3f7eb9789275a078ee315bb", "content_id": "11348c346893fe0be5b2975d5fac50685aa9357d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 616, "license_type": "no_license", "max_line_length": 42, "num_lines": 42, "path": "/webScrapping.py", "repo_name": "99ashr/WebScraping", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\n#!!!import urllib!!!\nimport urllib.request as req\n\n#!!!import beautifulsoup!!!\nfrom bs4 import BeautifulSoup\n\n#!!!url of the link!!!\n\nlink = \"https://secure.php.net/\"\n#dir(link)\n\nwebpage = req.urlopen(link)\n#dir(webpage)\n\ntags= BeautifulSoup(webpage,'html.parser')\n\n#print(tags.prettify)\n\na=tags.title.text\n#b=tags.title.strings\n#c=tags.title.string\n\n#print(a)\n\n#dir(tags)\n\ndiv_blurb=tags.find('div', class_='blurb')\n\n\n#tags.getText(\" \")\n#print(div_blurb)\n\ndiv_text=div_blurb.get_text()\n\n#all_text=tags.get_text()\n\nfile = open(\"scrapped_data\", \"w\")\nfile.write(a)\nfile.write(div_text)\nfile.close()" } ]
3
shellfly/TagClassifier
https://github.com/shellfly/TagClassifier
89a6187165125f99a1a6c5145ddda8312e8d4bed
78dfdbb1495b100666685e4d7c981b884e70d150
efb0cf56f632e732a6906f5d2abddc0739a0a834
refs/heads/master
2021-07-21T01:56:56.453022
2017-10-29T14:12:40
2017-10-29T14:12:40
108,387,401
0
2
null
null
null
null
null
[ { "alpha_fraction": 0.761904776096344, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 29, "blob_id": "371f7a5bd9fc359cdc47b9ca02add3c491cb1de6", "content_id": "05efc0a725584c1a00615cd74665d744d05140bb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 210, "license_type": "permissive", "max_line_length": 80, "num_lines": 7, "path": "/README.md", "repo_name": "shellfly/TagClassifier", "src_encoding": "UTF-8", "text": "# TagClassifier\na simple tag classifier use nltk.NaiveBayesClassifier and content from zhihu.com\n\n### How to run\n1. copy cookies from browser and paste it to crawl.py\n2. python3 crawl.py\n3. python3 analysis.py\n" }, { "alpha_fraction": 0.6017526984214783, "alphanum_fraction": 0.6475170254707336, "avg_line_length": 28.768115997314453, "blob_id": "222b9042093f9914a2eac607deeed0a1b0568bef", "content_id": "b7040f6316f62092f0b34639d06cb00354d06b1e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2110, "license_type": "permissive", "max_line_length": 79, "num_lines": 69, "path": "/analysis.py", "repo_name": "shellfly/TagClassifier", "src_encoding": "UTF-8", "text": "from collections import Counter\n\nimport random\nimport nltk\nimport jieba\n\njieba.add_word(\"村上春树\")\njieba.add_word(\"夏目漱石\")\nstopwords = set([l.strip() for l in open('stopwords.txt')])\n\ntopics = {\n 19551556: \"旅行\",\n 19789286: \"日语学习\",\n 19562563: \"日剧\",\n 19860581: \"JLPT\",\n 19622153: \"留学日本\",\n 19550994: \"游戏\",\n 19597091: \"日本文学\",\n 19591985: \"动漫\"\n}\n\n\ndef tokenize(content):\n content = content.replace(' ', '').replace('\\n', '')\n return [w for w in jieba.cut(content) if len(w) > 1 and w not in stopwords]\n\n\ndef build_dictionary(num):\n dictionary = set()\n for name in topics.values():\n words = [w for l in open(name + \".txt\") for w in tokenize(l)]\n words = Counter(words)\n dictionary |= set(w[0] for w in words.most_common(num))\n return dictionary\n\ndictionary = build_dictionary(100)\nprint('dictionary length: ', len(dictionary))\n\n\ndef gen_features(content):\n words = list(tokenize(content))\n return {w: w in words for w in dictionary}\n\noriginal_featuresets = []\nfor name in topics.values():\n content = open(name + \".txt\").read()\n items = [i for i in content.split('\\n\\n') if i]\n original_featuresets.extend(\n [(gen_features(item), name, item) for item in items])\n\nrandom.shuffle(original_featuresets)\nfeaturesets = [of[:2] for of in original_featuresets]\nprint('all feature sets: ', len(featuresets))\ntrain_set, devtest_set, test_set = featuresets[\n 200:], featuresets[100:200], featuresets[:100]\nclassifier = nltk.NaiveBayesClassifier.train(train_set)\nprint('accuracy: ', nltk.classify.accuracy(classifier, devtest_set))\n\n\nfor (features, name, item) in original_featuresets[10:20]:\n print('item : ', item.replace('\\n', '')[:30])\n print('category: %s, guess: %s' % (name, classifier.classify(features)))\n\n# errors = []\n# for (features, name, item) in original_featuresets[100:200]:\n# guess = classifier.classify(features)\n# if guess != name:\n# print('item: ', item, 'name: ', name, 'guess: ', guess)\n# print([(f, a) for f, a in features.items() if a])\n" }, { "alpha_fraction": 0.5387299656867981, "alphanum_fraction": 0.608513593673706, "avg_line_length": 28.244897842407227, "blob_id": "ea6fb455a4ea49cdded78fe9274705c72747357a", "content_id": "15b8e441b0064329767d53ffef96d875803b88d6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1481, "license_type": "permissive", "max_line_length": 141, "num_lines": 49, "path": "/crawl.py", "repo_name": "shellfly/TagClassifier", "src_encoding": "UTF-8", "text": "import os\nimport random\nimport time\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\ncookies = dict([a.split('=', 1) for a in \"\"\"cookies from browser\"\"\".split(';')])\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\ntopics = {\n 19551556: \"旅行\",\n 19789286: \"日语学习\",\n 19562563: \"日剧\",\n 19860581: \"JLPT\",\n 19622153: \"留学日本\",\n 19550994: \"游戏\",\n 19597091: \"日本文学\",\n 19591985: \"动漫\"\n}\n\ntop_url = 'https://www.zhihu.com/topic/%s/top-answers?page=%s'\n\n\ndef crawl_topic_top_answers(topic_id, name):\n filepath = name + '.txt'\n if os.path.exists(filepath):\n return\n\n contents = []\n for page in range(1, 11):\n url = top_url % (topic_id, page)\n print('crawl url: ', url)\n res = requests.get(url, headers=headers, cookies=cookies)\n soup = BeautifulSoup(res.content, \"lxml\")\n for item in soup.find_all(class_=\"feed-item\"):\n title = item.find(\"a\", class_=\"question_link\").text\n summary = item.find(class_=\"summary\").text.replace(\"显示全部\", \"\")\n contents.append(\"%s\\n%s\" % (title.strip(), summary.strip()))\n\n with open(filepath, 'w') as f:\n f.write('\\n\\n'.join(contents))\n\nfor topic_id, name in topics.items():\n crawl_topic_top_answers(topic_id, name)\n print('sleeping for next task...')\n time.sleep(random.randint(30, 100))\n" } ]
3
hyejuKil/Django-practice
https://github.com/hyejuKil/Django-practice
512933c64813c1bb8fa4a688b412cbe619357c2f
d22d5cb9a1438cd07e065d87a7ab88797b535882
d6cd07533c7cd3c31e765308b67e2630806c8e24
refs/heads/main
2023-06-25T06:55:14.133133
2021-07-22T11:36:03
2021-07-22T11:36:03
387,841,535
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.655639111995697, "alphanum_fraction": 0.6586466431617737, "avg_line_length": 25.639999389648438, "blob_id": "437cef4db518663255a18cd5146ea95a6e62185a", "content_id": "a50a72425df7a4616e03a064d53521f315382c2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "no_license", "max_line_length": 123, "num_lines": 25, "path": "/myprojcet/myapp/views.py", "repo_name": "hyejuKil/Django-practice", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\ndef welcome(request):\n return render(request, \"welcome.html\")\n\ndef hello(requset):\n userName = requset.GET['name']\n return render(requset,'hello.html',{'userName' : userName})\n\ndef wordCount(requset):\n return render(requset,'wordcount.html')\n\ndef wordCountAns(request):\n sentence = request.GET['sentence']\n\n wordList = sentence.split()\n\n wordDict = {}\n for word in wordList:\n if word in wordDict:\n wordDict[word] +=1\n else:\n wordDict[word] = 1\n\n return render(request,'wordCountAns.html',{'sentence' : sentence,'count' : len(wordList), 'wordDict' : wordDict.items})" } ]
1
ArturMaiaP/StochasticSampling
https://github.com/ArturMaiaP/StochasticSampling
dc496eda2385e8deadbdc10fe8422653240a0bf4
9fbc7995e6e5fff1326783386a2a5c8078e5d047
2beabccb90ff6385f7403382cfd566602d9160ba
refs/heads/master
2023-02-26T06:50:26.148678
2021-01-31T02:19:16
2021-01-31T02:19:16
334,554,015
0
0
null
2021-01-31T02:11:55
2021-01-31T02:15:03
2021-01-31T02:19:17
Python
[ { "alpha_fraction": 0.6130936145782471, "alphanum_fraction": 0.622484564781189, "avg_line_length": 34.5047607421875, "blob_id": "ba96fb96cd9eb99c42fdbbcdbd6f3505e12b1fc0", "content_id": "71f6cf67d75297460dfab6b96587097f26c4b7f9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3727, "license_type": "permissive", "max_line_length": 152, "num_lines": 105, "path": "/StochasticSampling.py", "repo_name": "ArturMaiaP/StochasticSampling", "src_encoding": "UTF-8", "text": "import random\nimport pandas as pd\nfrom sklearn import svm\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#Algorithm to select points using inverse transform sampling and SVM(binary classification)\nclass StochasticSelection():\n\n\t#Aplly SVM\n def runSVM(self,dfTest,dfTrain):\n\n #Break the training set into two parts:\n #X = attributes\n #y = classes\n X = dfTrain.drop([\"image_name\",\"Class\"],axis=1)\n y = dfTrain[\"Class\"]\n\n #fit the classifier \n #The parameters gamma and C may be different for other cases, you can try values from ([0.01;100]), in my case these reached the best accuracy. \n clf = svm.SVC(kernel=\"rbf\", gamma=0.05,C=1)\n clf.fit(X,y)\n\n # decision_function will return for each dataframe element its distance to the SVM hyperplane \n predictList = clf.decision_function(test)\n \n #If Class > 0, belongs to class 1\n #If Class < 0, belongs to class -1\n dfTest[\"Class\"] = predictList\n dfTest = dfTest.sort_values(by=['Class'],ascending=False )\n \n # Return the test set associated with the predicted classes\n return dfTest\n\n\n\n def selectPtsSVMInverseTransf(self,dfClassified,samples):\n\n \t#Calculate the probability of sampling a point that belongs to class 1\n positiveRowIndex = dfClassified.loc[dfClassified['Class'] > 0.0].index\n dfPositive = dfClassified.loc[positiveRowIndex, :]\n dfPositive = self.calcProb(dfPositive)\n \n #Calculate the probability of sampling a point that belongs to class -1\n negativeRowIndex = dfClassified.loc[dfClassified['Class'] < 0.0].index\n dfNegative = dfClassified.loc[negativeRowIndex, :]\n dfNegative = self.calcProb(dfNegative)\n \n \t#Sample X(qtdPos) points from class 1\n self.df1 = self.selectPtsProb(dfPositive,qtdPos,\"Positive\",randomMin=0)\n \n\t\t#Sample Y(qtdNeg) points from class -1\n self.df2 = self.selectPtsProb(dfNegative,qtdNeg,\"Negative\",randomMin=0)\n \n dfPoints = pd.concat([self.df1, self.df2])\n \n return dfPoints\n\n def calcProb(self,df):\n\n \tdf['Class'] = df['Class'].abs()\n sumSVMDistance = df[\"Class\"].values.sum()\n df = df.sort_values(by=['Class'], ascending=True)\n\t\t\n\t\tlistPj = []\n listCumulativeSumPj= []\n listDistance = df[\"Class\"].tolist()\n last = 0\n \n for distance in listDistance:\n\n \t#Probability to select a point Pj\n pj = distance/sumSVMDistance\n listPj.append(pj)\n\n sumPj = pj + last\n listCumulativeSumPj.append(sumPj)\n last = sumPj\n\n df['Pj'] = listPj\n #Saves the cumulative sum of the probability until each point P\n df['CumulativeSumPj'] = listCumulativeSumPj\n\n return df\n\n\n def selectPtsProb(self,df,samples,controle,flag,randomMin):\n newRows = pd.DataFrame(columns=df.columns)\n\n listCumulativeSum = df[\"CumulativeSumPj\"].tolist()\n \n for x in range(0,samples):\n \t#Generates a random number between [0,1]\n u = random.uniform(randomMin, 1)\n for cumulativeSum in listCumulativeSum:\n\n \t#Selects the point P if the cumulative sum of probability until P is greater than the generated random number \n if u < cumulativeSum:\n dfTemp = df.loc[df['CumulativeSumPj']== cumulativeSum]\n newRows = pd.concat([newRows,dfTemp])\n listCumulativeSum.remove(cumulativeSum)\n df = df.drop(df.loc[df['CumulativeSumPj']==cumulativeSum].index,axis =0)\n break\n \n return newRows" } ]
1
yuqing-feng/vfdbQuery
https://github.com/yuqing-feng/vfdbQuery
ab7ba2376aa579c56123f879880aebded154e402
757d944156ae3f5569573fdd3c78937d7464b1e4
d6c19a85304f525c0db66fa54cfa3b19b744a34d
refs/heads/master
2020-04-24T18:51:17.246468
2019-02-01T16:21:37
2019-02-01T16:21:37
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.40909090638160706, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 21, "blob_id": "25aae970b1c1b5b03b0fec08d1ae9b36ed9a0cf7", "content_id": "275413b72bab21d533386bc6b20ea93412bac3c5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 66, "license_type": "permissive", "max_line_length": 21, "num_lines": 3, "path": "/vfdbQuery/__init__.py", "repo_name": "yuqing-feng/vfdbQuery", "src_encoding": "UTF-8", "text": "__version__ = \"0.0.5\"\n__author__ = \"Forest Dussault\"\n__email__ = \"[email protected]\"\n" }, { "alpha_fraction": 0.4508928656578064, "alphanum_fraction": 0.6830357313156128, "avg_line_length": 15, "blob_id": "322ba067ac74b20fb13a968f842e668304127f3f", "content_id": "988ef00896a44dd4d28d814a227500f4deca365d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 224, "license_type": "permissive", "max_line_length": 22, "num_lines": 14, "path": "/requirements.txt", "repo_name": "yuqing-feng/vfdbQuery", "src_encoding": "UTF-8", "text": "certifi==2018.10.15\nClick==7.0\ncycler==0.10.0\nkiwisolver==1.0.1\nmatplotlib==3.0.0\nmkl-fft==1.0.6\nmkl-random==1.0.1\nnumpy==1.15.2\npandas==0.23.4\npyparsing==2.2.2\npython-dateutil==2.7.3\npytz==2018.5\nsix==1.11.0\ntornado==5.1.1\n" }, { "alpha_fraction": 0.6014707684516907, "alphanum_fraction": 0.6261127591133118, "avg_line_length": 36.086124420166016, "blob_id": "c218cbe872e524ed2f1e882462206674f6492d42", "content_id": "a9f96602ff7f4c77beb76e401451ab8980ef3be1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7751, "license_type": "permissive", "max_line_length": 118, "num_lines": 209, "path": "/vfdbQuery/vfdbQuery.py", "repo_name": "yuqing-feng/vfdbQuery", "src_encoding": "UTF-8", "text": "import os\nimport click\nimport shutil\nimport logging\nimport subprocess\nimport pandas as pd\nfrom pathlib import Path\n\nDEPENDENCIES = [\n 'blastn',\n 'makeblastdb'\n]\n\n\ndef convert_to_path(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n value = Path(value)\n return value\n\n\[email protected](help=\"vfdbQuery is a simple script for querying an input genome assembly against the Virulence Factor \"\n \"Database (VFDB).\")\[email protected](\"-i\", \"--infile\", type=click.Path(exists=True), required=True,\n help='FASTA file that you want to search against VFDB', callback=convert_to_path)\[email protected](\"-db\", \"--database\", type=click.Path(exists=True), required=True,\n help='Path to Virulence Factor Database (VFDB)', callback=convert_to_path)\ndef cli(infile, database):\n logging.basicConfig(\n format='\\033[92m \\033[1m %(asctime)s \\033[0m %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S')\n\n logging.info(f\"Started {Path(__file__).name}\")\n check_all_dependencies()\n\n blast_file = blast(infile, database)\n filtered_file = filter_blast(blast_file)\n generate_plot(filtered_file)\n logging.info(\"Process Complete\")\n\n\ndef check_db_exists(database: Path):\n # Format DB\n folder_files = list(database.parent.glob(\"*\"))\n nhr, nin, nsq = False, False, False\n for f in folder_files:\n if f.suffix == '.nhr':\n nhr = True\n elif f.suffix == '.nin':\n nin = True\n elif f.suffix == '.nsq':\n nsq = True\n if nhr and nin and nsq:\n database_exists = True\n else:\n database_exists = False\n return database_exists\n\n\ndef blast(infile: Path, database: Path):\n # Create db if it doesn't already exist\n db_exists = check_db_exists(database)\n if not db_exists:\n logging.info(f\"Creating BLAST database with {database.name}\")\n formatdb_cmd = f\"makeblastdb -in {database} -dbtype nucl\"\n p = subprocess.Popen(formatdb_cmd, shell=True)\n p.wait()\n else:\n logging.info(\"BLAST database detected\")\n\n # Need to perform first BLAST phase, if something is found go onto the next\n logging.info(f\"Running blastn on {infile}\")\n out_blast = infile.with_suffix(\".VFDB_BLASTn\")\n blast_cmd = f\"blastn -db {database} -query {infile} -outfmt '6 qseqid stitle slen \" \\\n f\"length qstart qend sstrand pident score' > {out_blast}\"\n\n p = subprocess.Popen(blast_cmd, shell=True)\n p.wait()\n # Check the tab-delineated VFDB.blastn to see if either of the two essential markers are present\n activator_check = False\n with open(str(out_blast), 'r') as f:\n blastn_file = f.readlines()\n for line in blastn_file:\n target = line.split('\\t')[1]\n if target == \"plcR Transcriptional activator\" or target == \"papR Signal peptide\":\n activator_check = True\n if activator_check:\n active_out_name = out_blast.with_suffix(\".VFDB_Active\")\n os.rename(str(out_blast), str(active_out_name))\n return active_out_name\n else:\n inactive_out_name = out_blast.with_suffix(\".VFDB_NOT-Active\")\n os.rename(str(out_blast), str(inactive_out_name))\n logging.info('No virulence activators found. Quitting.')\n logging.info(f'See potential virulence factors in {inactive_out_name}')\n quit()\n\n\ndef filter_blast(infile: Path):\n with open(str(infile), 'r') as f:\n blastn_file = f.readlines()\n\n filtered_out_name = infile.with_suffix(\".VFDB_Active_Filtered\")\n with open(str(filtered_out_name), 'w') as out:\n # Will now remove any hits that are less than 70% over 70% of the VFDB gene length\n out.write(\"qseqid\\tstitle\\tslen\\tlength\\tqstart\\tqend\\tsstrand\\tpident\\tscore\\n\")\n for line in blastn_file:\n slen = line.split('\\t')[2]\n length = line.split('\\t')[3]\n pident = line.split('\\t')[7]\n len_percent = float(length) / float(slen)\n if len_percent >= 0.7 and float(pident) >= 70.00:\n out.write(line)\n return filtered_out_name\n\n\ndef generate_plot(filtered_filepath: Path):\n sample = filtered_filepath.name\n sample = sample.split('.')[0]\n df = pd.read_csv(filtered_filepath, sep='\\t')\n\n target_dict = {\n 'plcR Transcriptional activator': 0,\n 'papR Signal peptide': 0,\n 'HblL2 BC3104 Hemolysin BL lytic component L2': 0,\n 'HblL1 BC3103 Hemolysin BL lytic component L1': 0,\n 'HblB BC3102 Hemolysin BL binding component precursor': 0,\n 'NheA BC1809 Non-hemolytic enterotoxin lytic component L2': 0,\n 'NheB BC1810 Non-hemolytic enterotoxin lytic component L1': 0,\n 'NheC BC1811 Enterotoxin C': 0,\n 'CytK BC1110 Cytotoxin K': 0,\n 'HlyI BC5101 Perfringolysin O precursor': 0,\n \"HblB' BC3101 Hemolysin BL binding component precursor\": 0,\n 'HlyII BC3523 Hemolysin II': 0,\n 'EntFM BC1953 Enterotoxin': 0,\n 'EntA BC5239 Enterotoxin - cell wall binding protein': 0,\n 'EntB BC2952 Enterotoxin - cell wall binding protein': 0,\n 'EntC BC0813 Enterotoxin - cell wall binding protein': 0\n }\n\n for target in df['stitle']:\n target = target.strip()\n logging.debug(f\"Checking for {target}\")\n if target in target_dict:\n logging.debug(f\"Found {target}\")\n target_dict[target] += 1\n else:\n logging.debug(f\"Could not find {target}\")\n\n df_processed = pd.DataFrame(list(target_dict.items()), columns=['', 'Count'])\n\n # Picked 16 colours from https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/\n my_colors = [\n '#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#46f0f0', '#f032e6', '#bcf60c', '#fabebe',\n '#008080', '#800000', '#aaffc3', '#808000', '#000075', '#808080'\n ]\n\n plt = df_processed.plot(x='', y='Count', kind='barh',\n title=sample, xticks=range(max(df_processed['Count'] + 1)),\n figsize=(4, 6), legend=False, color=my_colors, edgecolor='black')\n\n fig = plt.get_figure()\n\n pdf_path = filtered_filepath.parent / f'{sample}_TargetCheck.pdf'\n fig.savefig(pdf_path, bbox_inches='tight')\n png_path = filtered_filepath.parent / f'{sample}_TargetCheck.png'\n fig.savefig(png_path, bbox_inches='tight', dpi=300)\n csv_path = filtered_filepath.parent / f'{sample}_TargetCheck.tsv'\n df_processed.to_csv(csv_path, sep='\\t', index=None)\n\n logging.info(f\"Created plot at {pdf_path}\")\n logging.info(f\"Created plot at {png_path}\")\n logging.info(f\"Created CSV virulence activator count data at {csv_path}\")\n\n\ndef dependency_check(dependency: str) -> bool:\n \"\"\"\n Checks if a given program is present in the user's $PATH\n :param dependency: String of program name\n :return: True if program is in $PATH, False if not\n \"\"\"\n check = shutil.which(dependency)\n if check is not None:\n return True\n else:\n return False\n\n\ndef check_all_dependencies():\n # Dependency check\n logging.info(\"Conducting dependency check...\")\n dependency_dict = dict()\n for dependency in DEPENDENCIES:\n dependency_dict[dependency] = dependency_check(dependency)\n if False in dependency_dict.values():\n logging.error(\"ERROR: Cannot locate some dependencies in $PATH...\")\n for key, value in dependency_dict.items():\n if not value:\n logging.error(f\"Dependency missing: {key}\")\n quit()\n else:\n for key, value in dependency_dict.items():\n logging.debug(f\"Dependency {key}: {value}\")\n logging.info(\"Dependencies OK\")\n\n\nif __name__ == \"__main__\":\n cli()\n" }, { "alpha_fraction": 0.6692546606063843, "alphanum_fraction": 0.6723602414131165, "avg_line_length": 24.799999237060547, "blob_id": "fa798a82e2949e886a9cac6c9d21235222a59ef7", "content_id": "7193ba37fc8164223f2182ddf379cfcb6b112274", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 644, "license_type": "permissive", "max_line_length": 142, "num_lines": 25, "path": "/README.md", "repo_name": "yuqing-feng/vfdbQuery", "src_encoding": "UTF-8", "text": "# vfdbQuery\n\n### Requirements\n- Python 3.6\n- [ncbi-blast+](https://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastDocs&DOC_TYPE=Download) (makeblastdb and blastn must be in your $PATH)\n\n### Installation\n```\npip install vfdbQuery\n```\n\n### Usage\n```\nUsage: vfdbQuery [OPTIONS]\n\n vfdbQuery is a simple script for querying an input genome assembly against\n the Virulence Factor Database (VFDB).\n\nOptions:\n -i, --infile PATH FASTA file that you want to search against VFDB\n [required]\n -db, --database PATH Path to Virulence Factor Database (VFDB) [required]\n --help Show this message and exit.\n\n```" } ]
4
sarahgrace/mirovia
https://github.com/sarahgrace/mirovia
29a514c2e917a5c7e9f2a631d4d94a1c5203287e
45c72d64a93463d0e87645ee0ffd94eaa7063c2c
be2661d68cc655dea25dbbfdd25ae40ddc07bfd0
refs/heads/master
2021-03-22T02:59:55.977500
2018-05-09T17:52:52
2018-05-09T17:52:52
118,677,677
5
3
null
null
null
null
null
[ { "alpha_fraction": 0.5520296692848206, "alphanum_fraction": 0.5722233653068542, "avg_line_length": 23.823530197143555, "blob_id": "7ede41a4245c1343c84a581554cc8fe1da51bbbe", "content_id": "bed8e9d16f09f9abdd4131c725b2a59b60c115b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9706, "license_type": "no_license", "max_line_length": 105, "num_lines": 391, "path": "/src/cuda/level2/neuralnet/include/mat_cpu.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#include \"mat_cpu.h\"\n#include <algorithm>\n#include <cmath>\n\n/*\nMatCPU MatCPU::operator () (size_t ind) {\n MatCPU val_mat;\n val_mat.attach(&data(ind), 1, 1);\n return val_mat;\n}*/\n\n// memory functions\n\nMatCPU::MatCPU() {\n init();\n}\n\nMatCPU::MatCPU(size_t size1, size_t size2) {\n init();\n resize(size1, size2);\n}\n\nMatCPU::MatCPU(const MatCPU &b) {\n init();\n if (b.empty()) return;\n resize(b.size1_, b.size2_);\n (*this) = b;\n}\n\nMatCPU::MatCPU(MatCPU &&b) {\n init();\n Swap(*this, b);\n}\n\nMatCPU::~MatCPU() {\n clear();\n}\n\nMatCPU& MatCPU::init() {\n data_ = NULL;\n size1_ = 0;\n size2_ = 0;\n order_ = kInternalOrder;\n owner_ = false;\n return *this;\n}\n\n// only this function supports order_ == true\nMatCPU& MatCPU::operator = (const MatCPU &b) {\n //mexPrintMsg(\"Array const assignment\");\n mexAssertMsg(size1_ == b.size1_ && size2_ == b.size2_,\n \"In MatCPU::operator = the arrays have different size\");\n if (order_ == b.order_) {\n for (size_t i = 0; i < size(); ++i) {\n data(i) = b.data(i);\n }\n } else if (b.order_ != kInternalOrder) { // order_ == kInternalOrder\n MatCPU br;\n br.attach(b.data_, b.size2_, b.size1_, kInternalOrder);\n Trans(br, *this);\n } else { // b.order_ == kInternalOrder, order_ == !kInternalOrder\n MatCPU br;\n br.attach(data_, size2_, size1_, kInternalOrder);\n Trans(b, br);\n }\n //mexPrintMsg(\"Array const assignment end\");\n return *this;\n}\n\nMatCPU& MatCPU::resize(size_t size1, size_t size2) {\n //mexPrintMsg(\"Array resize\");\n if (size1 * size2 != size()) {\n clear();\n if (size1 * size2 > 0) {\n data_ = new ftype[size1 * size2];\n owner_ = true;\n }\n }\n size1_ = size1;\n size2_ = size2;\n //mexPrintMsg(\"Array resize end\");\n return *this;\n}\n\nMatCPU& MatCPU::reshape(size_t size1, size_t size2) {\n mexAssertMsg(size() == size1 * size2,\n \"In MatCPU::reshape the sizes do not correspond\");\n size1_ = size1;\n size2_ = size2;\n return *this;\n}\n\nMatCPU& MatCPU::set_order(bool order) {\n order_ = order;\n return *this;\n}\n\nMatCPU& MatCPU::attach(const MatCPU &b) {\n return attach(b.data_, b.size1_, b.size2_, b.order_);\n}\n\nMatCPU& MatCPU::attach(const MatCPU &b, size_t offset, size_t size1, size_t size2, bool order) {\n mexAssertMsg(b.size1_ == 1 || b.size2_ == 1, \"In MatCPU::attach with offset one of sizes should be 1\");\n mexAssertMsg(offset + size1 * size2 <= b.size(),\n \"In MatCPU::attach the sizes don't correspond each other\");\n return attach(b.data_ + offset, size1, size2, order);\n}\n\nMatCPU& MatCPU::attach(ftype *ptr, size_t size1, size_t size2) {\n return attach(ptr, size1, size2, kInternalOrder);\n}\n\nMatCPU& MatCPU::attach(ftype *ptr, size_t size1, size_t size2, bool order) {\n //mexAssertMsg(order == false, \"In MatCPU::attach order should be always false\");\n clear();\n data_ = ptr;\n size1_ = size1;\n size2_ = size2;\n order_ = order;\n return *this;\n}\n\nMatCPU& MatCPU::clear() {\n //mexPrintMsg(\"Array clear\");\n if (owner_) {\n delete [] data_;\n owner_ = false;\n }\n return init();\n //mexPrintMsg(\"Array clear end\");\n}\n\nvoid Swap(MatCPU &a, MatCPU &b) {\n ftype *data_tmp = b.data_;\n b.data_ = a.data_;\n a.data_ = data_tmp;\n\n size_t size1_tmp = b.size1_;\n b.size1_ = a.size1_;\n a.size1_ = size1_tmp;\n\n size_t size2_tmp = b.size2_;\n b.size2_ = a.size2_;\n a.size2_ = size2_tmp;\n\n bool order_tmp = b.order_;\n b.order_ = a.order_;\n a.order_ = order_tmp;\n\n bool owner_tmp = b.owner_;\n b.owner_ = a.owner_;\n a.owner_ = owner_tmp;\n}\n\n// data functions\n\nMatCPU& MatCPU::ident() {\n mexAssertMsg(size1_ == size2_,\n \"In 'MatCPU::ident' the matrix must be squared\");\n for (size_t i = 0; i < size1_; ++i) {\n for (size_t j = 0; j < size2_; ++j) {\n if (i == j) {\n data(i, j) = 1;\n } else {\n data(i, j) = 0;\n }\n }\n }\n return *this;\n}\n\nMatCPU& MatCPU::assign(ftype c) {\n for (size_t i = 0; i < size(); ++i) {\n data(i) = c;\n }\n return *this;\n}\n\nMatCPU& MatCPU::assign(const std::vector<ftype> &vect) {\n mexAssertMsg(size1_ == 1 && size2_ == vect.size(),\n \"In MatCPU::assign the sizes do not correspond\");\n for (size_t i = 0; i < vect.size(); ++i) {\n data(i) = vect[i];\n }\n return *this;\n}\n\nMatCPU& MatCPU::operator += (const MatCPU &b) {\n mexAssertMsg(size1_ == b.size1_ && size2_ == b.size2_,\n \"In MatCPU::+= the sizes of matrices do not correspond\");\n for (size_t i = 0; i < size1_; ++i) {\n for (size_t j = 0; j < size2_; ++j) {\n data(i, j) += b(i, j);\n }\n }\n return *this;\n}\n\nMatCPU& MatCPU::operator -= (const MatCPU &b) {\n mexAssertMsg(size1_ == b.size1_ && size2_ == b.size2_,\n \"In MatCPU::-= the sizes of matrices do not correspond\");\n for (size_t i = 0; i < size1_; ++i) {\n for (size_t j = 0; j < size2_; ++j) {\n data(i, j) -= b(i, j);\n }\n }\n return *this;\n}\n\nMatCPU& MatCPU::operator *= (const MatCPU &b) {\n mexAssertMsg(size1_ == b.size1_ && size2_ == b.size2_,\n \"In 'MatCPU::*=' the matrices are of the different size\");\n for (size_t i = 0; i < size1_; ++i) {\n for (size_t j = 0; j < size2_; ++j) {\n data(i, j) *= b(i, j);\n }\n }\n return *this;\n}\n\nMatCPU& MatCPU::operator /= (const MatCPU &b) {\n mexAssertMsg(size1_ == b.size1_ && size2_ == b.size2_,\n \"In 'MatCPU::/=' the matrices are of the different size\");\n for (size_t i = 0; i < size1_; ++i) {\n for (size_t j = 0; j < size2_; ++j) {\n data(i, j) /= b(i, j);\n }\n }\n return *this;\n}\n\nMatCPU& MatCPU::operator += (ftype c) {\n for (size_t i = 0; i < size(); ++i) {\n data(i) += c;\n }\n return *this;\n}\n\nMatCPU& MatCPU::operator -= (ftype c) {\n for (size_t i = 0; i < size(); ++i) {\n data(i) -= c;\n }\n return *this;\n}\n\nMatCPU& MatCPU::operator *= (ftype c) {\n for (size_t i = 0; i < size(); ++i) {\n data(i) *= c;\n }\n return *this;\n}\n\nMatCPU& MatCPU::operator /= (ftype c) {\n for (size_t i = 0; i < size(); ++i) {\n data(i) /= c;\n }\n return *this;\n}\n\nMatCPU& MatCPU::Reorder(bool order) {\n //mexAssertMsg(order_ == order, \"In MatCPU::reorder orders should be the same\");\n if (order_ != order) {\n if (size1_ > 1 && size2_ > 1) {\n MatCPU m(size1_, size2_);\n m.order_ = !order_;\n m = (*this);\n order_ = m.order_;\n (*this) = m;\n } else {\n order_ = !order_;\n }\n }\n return *this;\n}\n\n// friend functions\n\nvoid Sum(const MatCPU &a, MatCPU &vect, int dim) {\n\n if (dim == 1) {\n mexAssertMsg(vect.size1_ == 1 && vect.size2_ == a.size2_,\n \"In Sum the sizes do not correspond each other\");\n vect.assign(0);\n for (size_t i = 0; i < a.size1_; ++i) {\n for (size_t j = 0; j < a.size2_; ++j) {\n vect(0, j) += a(i, j);\n }\n }\n } else if (dim == 2) {\n mexAssertMsg(vect.size1_ == a.size1_ && vect.size2_ == 1,\n \"In Sum the sizes do not correspond each other\");\n vect.assign(0);\n for (size_t i = 0; i < a.size1_; ++i) {\n for (size_t j = 0; j < a.size2_; ++j) {\n vect(i, 0) += a(i, j);\n }\n }\n } else {\n mexAssertMsg(false, \"In MatCPU Sum the dimension parameter must be either 1 or 2\");\n }\n}\n\nvoid Mean(const MatCPU &a, MatCPU &vect, int dim) {\n Sum(a, vect, dim);\n if (dim == 1) {\n vect /= (ftype) a.size1_;\n } else if (dim == 2) {\n vect /= (ftype) a.size2_;\n } else {\n mexAssertMsg(false, \"In MatCPU Mean the dimension parameter must be either 1 or 2\");\n }\n}\n\nvoid Trans(const MatCPU &a, MatCPU &b) {\n // no resize to ensure that b.data_ is not relocated\n mexAssertMsg(a.size1_ == b.size2_ && a.size2_ == b.size1_,\n \"In Trans the sizes of matrices do not correspond\");\n mexAssertMsg(a.data_ != b.data_, \"In Trans the matrices are the same\");\n for (size_t i = 0; i < b.size1_; ++i) {\n for (size_t j = 0; j < b.size2_; ++j) {\n b(i, j) = a(j, i);\n }\n }\n}\n\nvoid Shuffle(MatCPU &a, MatCPU &b) {\n mexAssertMsg(a.order_ == true && b.order_ == true, \"In Shuffle the orders should be true\");\n mexAssertMsg(a.size1_ == b.size1_, \"In Shuffle the sizes do not correspond\");\n size_t train_num = a.size1_;\n //mexPrintMsg(\"train_num\", train_num);\n std::vector<int> randind(train_num);\n for (size_t i = 0; i < train_num; ++i) {\n randind[i] = i;\n }\n std::random_shuffle(randind.begin(), randind.end());\n\n MatCPU a_new(a.size1_, a.size2_);\n MatCPU b_new(b.size1_, b.size2_);\n for (size_t i = 0; i < train_num; ++i) {\n ftype *a_ptr = a.data_ + i * a.size2_;\n ftype *a_new_ptr = a_new.data_ + randind[i] * a.size2_;\n ftype *b_ptr = b.data_ + i * b.size2_;\n ftype *b_new_ptr = b_new.data_ + randind[i] * b.size2_;\n for (size_t j = 0; j < a.size2_; ++j) {\n a_new_ptr[j] = a_ptr[j];\n }\n for (size_t j = 0; j < b.size2_; ++j) {\n b_new_ptr[j] = b_ptr[j];\n }\n }\n a_new.order_ = true;\n b_new.order_ = true;\n\n Swap(a, a_new);\n Swap(b, b_new);\n}\n\n\nftype MatCPU::sum() const {\n ftype matsum = 0;\n for (size_t i = 0; i < size(); ++i) {\n matsum += data(i);\n }\n return matsum;\n}\n\nbool MatCPU::hasZeros() const {\n for (size_t i = 0; i < size(); ++i) {\n if (data(i) == 0) return true;\n }\n return false;\n}\n" }, { "alpha_fraction": 0.7119113802909851, "alphanum_fraction": 0.7119113802909851, "avg_line_length": 24.785715103149414, "blob_id": "3f3af1c6eadc94a9b97dc25a75a89905e21ca121", "content_id": "f274b1eacab2b5adfaddf26b48955280212c2719", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 722, "license_type": "no_license", "max_line_length": 82, "num_lines": 28, "path": "/src/cuda/level2/mandelbrot/Makefile.am", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "include $(top_builddir)/config/config.mk\ninclude $(top_builddir)/config/targets.mk\n\nEXTRAFLAGS = -rdc=true -dc -lcudadevrt -lcudart\n\n# How to find source files\nVPATH = $(srcdir):$(srcdir)/../../common:$(srcdir)/../../../common\n\nAM_LDFLAGS = $(CUDA_LDFLAGS)\nAM_CPPFLAGS = $(CUDA_INC)\n\n# What is the destination for programs built from this directory?\ncudadir = $(bindir)/CUDA\n\n# What programs should be installed to that destination?\ncuda_PROGRAMS = mandelbrot\n\nmandelbrot_link.o: mandelbrot.o\n\t$(NVCC) ${CUDA_CPPFLAGS} $(CPPFLAGS) $(NVCXXFLAGS) -dlink $< -o mandelbrot_link.o\n\n# How to build those programs?\nmandelbrot_SOURCES = \\\nmain.cpp\n\nmandelbrot_LDADD = \\\nmandelbrot.o \\\nmandelbrot_link.o \\\n$(CUDA_LIBS) $(LIBS) -lm\n" }, { "alpha_fraction": 0.6007773280143738, "alphanum_fraction": 0.6057745814323425, "avg_line_length": 39.022220611572266, "blob_id": "773128d7dd30409214a2c528f8649cc3f4e4feec", "content_id": "aa6506c901c34208788b9dfd1db8ac909f1c4b68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1801, "license_type": "no_license", "max_line_length": 122, "num_lines": 45, "path": "/README.md", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "# Mirovia\n\n## To Install:\n1. ```git clone https://github.com/sarahgrace/mirovia.git```\n2. ```cd mirovia```\n3. ```bash configure --prefix=$MIROVIA_ROOT```\n4. ```make```\n5. ```make install```\n\n## To Run Suite:\n``` python driver.py [options]```\n```\nOptions:\n -h, --help show help message and exit\n -p, --prefix=PREFIX location of Mirovia root, defaults to current working directory\n -d, --device=DEVICE device to run the benchmarks on\n -s, --size=SIZE problem size\n -b, --benchmark=BENCHMARKS comma-separated list of benchmarks to run, or 'all' to run entire suite, defaults to 'all'\n -v, --verbose enable verbose output\n```\nNote: Results are written to ```$MIROVIA_ROOT/results/$BENCHMARK```\n\n## To Run a Single Benchmark:\n1. ```cd src/cuda/$BENCHMARK_LEVEL/$BENCHMARK```\n2. ``` ./$BENCHMARK [options]```\n```\nGeneral Options: \n -c, --configFile specify configuration file\n -d, --device specify device to run on\n -i, --inputFile path of input file\n -o, --outputFile path of output file\n -m, --metricsFile path of file to write metrics to\n -n, --passes specify number of passes\n -p, --properties show properties for available platforms and devices (exits afterwards)\n -q, --quiet enable minimal output\n -s, --size specify problem size\n -v, --verbose enable verbose output\n```\nNote: Run benchmark with --help to see full list of options available for that specific benchmark\n\n## To Run a Benchmark with Custom Data:\n1. ```python data/$BENCHMARK/datagen.py [options]```\n2. Run benchmark with ```-i $DATA_FILEPATH```\n\nNote: Not all benchmarks have a datagen\n" }, { "alpha_fraction": 0.6676946878433228, "alphanum_fraction": 0.6784887909889221, "avg_line_length": 36.59420394897461, "blob_id": "a3f910dd0005a91edc1250d2ff1aa22534381bee", "content_id": "e3ea8f3e490bbbfd82ee888bf2bd576dbbbdd4f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2594, "license_type": "no_license", "max_line_length": 114, "num_lines": 69, "path": "/src/cuda/level2/neuralnet/include/layer_deconv.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#include \"layer_deconv.h\"\n\nLayerDeconv::LayerDeconv() {\n conv_mode_ = CUDNN_CROSS_CORRELATION;\n CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc_));\n}\n\nLayerDeconv::~LayerDeconv() {\n CUDNN_CALL(cudnnDestroyConvolutionDescriptor(conv_desc_));\n}\n\nvoid LayerDeconv::Init(const mxArray *mx_layer, const Layer *prev_layer) {\n mexAssertMsg(mexIsField(mx_layer, \"channels\"), \"The 'deconv' type layer must contain the 'channels' field\");\n mexAssertMsg(mexIsField(mx_layer, \"filtersize\"), \"The 'deconv' type layer must contain the 'filtersize' field\");\n // swapping filter dims as required by Deconv\n filters_.dims(1) = filters_.dims(0);\n filters_.dims(0) = prev_layer->dims_[1];\n Pair upscale = {1, 1};\n CUDNN_CALL(cudnnSetConvolution2dDescriptor(\n conv_desc_, padding_[0], padding_[1], stride_[0], stride_[1],\n upscale[0], upscale[1], conv_mode_\n ));\n for (size_t i = 0; i < 2; ++i) {\n dims_[i+2] = (prev_layer->dims_[i+2] - 1) * stride_[i] + filters_.dims(i+2);\n }\n}\n\nvoid LayerDeconv::TransformForward(Layer *prev_layer, PassNum passnum) {\n ConvolutionBackwardData(prev_layer->activ_mat_, filters_.get(),\n activ_mat_, conv_desc_);\n activ_mat_.Validate();\n}\n\nvoid LayerDeconv::TransformBackward(Layer *prev_layer) {\n ConvolutionForward(deriv_mat_, filters_.get(),\n prev_layer->deriv_mat_, conv_desc_);\n prev_layer->deriv_mat_.Validate();\n}\n\nvoid LayerDeconv::WeightGrads(Layer *prev_layer, GradInd gradind) {\n if (gradind == GradInd::First) {\n ConvolutionBackwardFilter(deriv_mat_, prev_layer->activ_mat_,\n filters_.der(), conv_desc_);\n filters_.der() *= (lr_coef_ / dims_[0]);\n } else if (gradind == GradInd::Second) {\n ConvolutionBackwardFilter(deriv_mat_, prev_layer->activ_mat_,\n filters_.der2(), conv_desc_);\n filters_.der2() *= (lr_coef_ / dims_[0]);\n }\n}\n" }, { "alpha_fraction": 0.698630154132843, "alphanum_fraction": 0.698630154132843, "avg_line_length": 13.399999618530273, "blob_id": "fa921acfdee1f4e81939df4ca3275ce56a69748e", "content_id": "e26551c741227e3db1e2c3f4b79df8c94d5afeca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 73, "license_type": "no_license", "max_line_length": 28, "num_lines": 5, "path": "/src/Makefile.am", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "if BUILD_CUDA\n MAYBE_CUDA = cuda\nendif\n\nSUBDIRS=common $(MAYBE_CUDA)\n\n" }, { "alpha_fraction": 0.6477987170219421, "alphanum_fraction": 0.7358490824699402, "avg_line_length": 18.875, "blob_id": "2c34236805576356c253edec3d7a0042ef14e67b", "content_id": "ffb4732621a414651c2e98333be907c8f96bf2a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 159, "license_type": "no_license", "max_line_length": 24, "num_lines": 8, "path": "/data/gemm/gen_dataset.sh", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\npython3 datagen.py -n 1\npython3 datagen.py -n 2\npython3 datagen.py -n 4\npython3 datagen.py -n 8\npython3 datagen.py -n 16\npython3 datagen.py -n 32\n" }, { "alpha_fraction": 0.6929972171783447, "alphanum_fraction": 0.6963585615158081, "avg_line_length": 24.869565963745117, "blob_id": "77b5c947e8511857836733f0c10076259e7c63d2", "content_id": "8b95bffc0a918a2e025c8897a476f8710b76f2d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1785, "license_type": "no_license", "max_line_length": 69, "num_lines": 69, "path": "/src/cuda/level2/neuralnet/cuda_print.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#ifndef _CUDA_PRINT_H_\n#define _CUDA_PRINT_H_\n\n#include \"mex_print.h\"\n\n#include <cuda.h>\n#include <cublas_v2.h>\n#include <curand.h>\n#include <cudnn.h>\n\nconst char* curandGetErrorString(curandStatus_t error);\nconst char* cublasGetErrorString(cublasStatus_t status);\n\n#ifndef CUDA_CALL\n #define CUDA_CALL(fun) { \\\n cudaError_t status = (fun); \\\n if (status != cudaSuccess) { \\\n mexAssertMsg(false, cudaGetErrorString(status)); \\\n } \\\n }\n#endif\n\n#ifndef CURAND_CALL\n #define CURAND_CALL(fun) { \\\n curandStatus_t status = (fun); \\\n if (status != CURAND_STATUS_SUCCESS) { \\\n mexAssertMsg(false, curandGetErrorString(status)); \\\n } \\\n }\n#endif\n\n#ifndef CUBLAS_CALL\n #define CUBLAS_CALL(fun) { \\\n cublasStatus_t status = (fun); \\\n if (status != CUBLAS_STATUS_SUCCESS) { \\\n mexAssertMsg(false, cublasGetErrorString(status)); \\\n } \\\n }\n#endif\n\n#ifndef CUDNN_CALL\n #define CUDNN_CALL(fun) { \\\n cudnnStatus_t status = (fun); \\\n if (status != CUDNN_STATUS_SUCCESS) { \\\n mexAssertMsg(false, cudnnGetErrorString(status)); \\\n } \\\n }\n#endif\n\n#endif\n" }, { "alpha_fraction": 0.7333005666732788, "alphanum_fraction": 0.7362475395202637, "avg_line_length": 29.388059616088867, "blob_id": "f8ff84080cae2fccdb354f045e6ab329dfb3d4c3", "content_id": "47f68c48c21ee7cc2bf3bb06ec0633897c97a2fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2036, "license_type": "no_license", "max_line_length": 85, "num_lines": 67, "path": "/src/cuda/level2/neuralnet/include/net.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#ifndef _NET_H_\n#define _NET_H_\n\n#include \"layer.h\"\n#include \"params.h\"\n#include \"layer_input.h\"\n#include \"layer_jitt.h\"\n#include \"layer_conv.h\"\n#include \"layer_deconv.h\"\n#include \"layer_pool.h\"\n#include \"layer_full.h\"\n\nclass Net {\n\nprivate:\n std::vector<Layer*> layers_;\n size_t first_layer_, first_trained_;\n Weights weights_;\n Params params_;\n MatCPU data_, labels_, preds_;\n MatCPU losses_;\n MatGPU classcoefs_; // in fact vector\n MatGPU lossmat_, lossmat2_;\n\n void ReadData(const mxArray *mx_data);\n void ReadLabels(const mxArray *mx_labels);\n void InitActiv(const MatGPU &data_batch);\n void Forward(MatGPU &pred, PassNum passnum, GradInd gradind);\n void InitDeriv(const MatGPU &labels_batch, const MatGPU &coef_batch, ftype &loss);\n void InitActivIBP(ftype &loss, int normfun);\n void InitActivAT(ftype coef, int normfun);\n void Backward(PassNum passnum, GradInd gradind);\n void UpdateWeights();\n\npublic:\n Net(const mxArray *mx_params);\n ~Net();\n void InitLayers(const mxArray *mx_layers);\n void Train(const mxArray *mx_data, const mxArray *mx_labels);\n void Classify(const mxArray *mx_data, const mxArray *mx_labels, mxArray *&mx_pred);\n void InitWeights(const mxArray *mx_weights_in);\n void GetLosses(mxArray *&mx_losses) const;\n void GetWeights(mxArray *&mx_weights) const;\n size_t NumWeights() const;\n\n};\n\n#endif\n" }, { "alpha_fraction": 0.4856858253479004, "alphanum_fraction": 0.4870550036430359, "avg_line_length": 47.39156723022461, "blob_id": "cb5b08e9188748fb12252aba998dcc4f67a4ec59", "content_id": "94735cbc3ffd95eb4362f121f96343372017136b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8034, "license_type": "no_license", "max_line_length": 91, "num_lines": 166, "path": "/src/cuda/level2/kmeans/cluster.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*****************************************************************************/\n/*IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. */\n/*By downloading, copying, installing or using the software you agree */\n/*to this license. If you do not agree to this license, do not download, */\n/*install, copy or use the software. */\n/* */\n/* */\n/*Copyright (c) 2005 Northwestern University */\n/*All rights reserved. */\n\n/*Redistribution of the software in source and binary forms, */\n/*with or without modification, is permitted provided that the */\n/*following conditions are met: */\n/* */\n/*1 Redistributions of source code must retain the above copyright */\n/* notice, this list of conditions and the following disclaimer. */\n/* */\n/*2 Redistributions in binary form must reproduce the above copyright */\n/* notice, this list of conditions and the following disclaimer in the */\n/* documentation and/or other materials provided with the distribution.*/ \n/* */\n/*3 Neither the name of Northwestern University nor the names of its */\n/* contributors may be used to endorse or promote products derived */\n/* from this software without specific prior written permission. */\n/* */\n/*THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS */\n/*IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */\n/*TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT AND */\n/*FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL */\n/*NORTHWESTERN UNIVERSITY OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, */\n/*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */\n/*(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR */\n/*SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) */\n/*HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, */\n/*STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */\n/*ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */\n/*POSSIBILITY OF SUCH DAMAGE. */\n/******************************************************************************/\n\n/*************************************************************************/\n/** File: cluster.c **/\n/** Description: Takes as input a file, containing 1 data point per **/\n/** per line, and performs a fuzzy c-means clustering **/\n/** on the data. Fuzzy clustering is performed using **/\n/** min to max clusters and the clustering that gets **/\n/** the best score according to a compactness and **/\n/** separation criterion are returned. **/\n/** Author: Brendan McCane **/\n/** James Cook University of North Queensland. **/\n/** Australia. email: [email protected] **/\n/** **/\n/** Edited by: Jay Pisharath, Wei-keng Liao **/\n/** Northwestern University. **/\n/**\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t**/\n/** ================================================================ **/\n/**\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t**/\n/** Edited by: Shuai Che, David Tarjan, Sang-Ha Lee\t\t\t\t\t**/\n/**\t\t\t\t University of Virginia\t\t\t\t\t\t\t\t\t**/\n/**\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t**/\n/** Description:\tNo longer supports fuzzy c-means clustering;\t \t**/\n/**\t\t\t\t\tonly regular k-means clustering.\t\t\t\t\t**/\n/**\t\t\t\t\tNo longer performs \"validity\" function to analyze\t**/\n/**\t\t\t\t\tcompactness and separation crietria; instead\t\t**/\n/**\t\t\t\t\tcalculate root mean squared error.\t\t\t\t\t**/\n/** **/\n/*************************************************************************/\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <limits.h>\n#include <math.h>\n#include <float.h>\n#include <omp.h>\n\n#include \"ResultDatabase.h\"\n#include \"kmeans_cuda.h\"\n#include \"kmeans.h\"\n\nextern double wtime(void);\nfloat\tmin_rmse_ref = FLT_MAX;\t\t\t/* reference min_rmse value */\n\n/*---< cluster() >-----------------------------------------------------------*/\nint cluster(int npoints,\t /* number of data points */\n int nfeatures,\t\t\t/* number of attributes for each point */\n float **features,\t\t\t/* array: [npoints][nfeatures] */ \n int min_nclusters,\t\t/* range of min to max number of clusters */\n\t\t\tint\t\t max_nclusters,\n float threshold,\t\t\t/* loop terminating factor */\n int *best_nclusters,\t/* out: number between min and max with lowest RMSE */\n float ***cluster_centres,\t/* out: [best_nclusters][nfeatures] */\n\t\t\tfloat\t*min_rmse,\t\t\t/* out: minimum RMSE */\n\t\t\tint\t\t isRMSE,\t\t\t/* calculate RMSE */\n\t\t\tint\t\t nloops,\t\t\t\t/* number of iteration for each number of clusters */\n\t\t\tResultDatabase &resultDB,\n bool quiet)\n{ \n\tint\t\tnclusters;\t\t\t\t/* number of clusters k */\t\n\tint\t\tindex =0;\t\t\t\t/* number of iteration to reach the best RMSE */\n\tint\t\trmse;\t\t\t\t\t/* RMSE for each clustering */\n int *membership;\t\t\t\t/* which cluster a data point belongs to */\n float **tmp_cluster_centres;\t/* hold coordinates of cluster centers */\n\tint\t\ti;\n\n\t/* allocate memory for membership */\n membership = (int*) malloc(npoints * sizeof(int));\n\n\t/* sweep k from min to max_nclusters to find the best number of clusters */\n\tfor(nclusters = min_nclusters; nclusters <= max_nclusters; nclusters++)\n\t{\n if(!quiet) {\n printf(\"\\nRunning k means for %d clusters:\\n\", nclusters);\n }\n\t\tif (nclusters > npoints) break;\t/* cannot have more clusters than points */\n\n\t\t/* allocate device memory, invert data array (@ kmeans_cuda.cu) */\n\t\tallocateMemory(npoints, nfeatures, nclusters, features);\n\n\t\t/* iterate nloops times for each number of clusters */\n\t\tfor(i = 0; i < nloops; i++)\n\t\t{\n\t\t\t/* initialize initial cluster centers, CUDA calls (@ kmeans_cuda.cu) */\n if(!quiet) {\n printf(\"Pass %d: \", i);\n }\n tmp_cluster_centres = kmeans_clustering(features,\n nfeatures,\n npoints,\n nclusters,\n threshold,\n membership,\n resultDB,\n quiet);\n\n\t\t\tif (*cluster_centres) {\n\t\t\t\tfree((*cluster_centres)[0]);\n\t\t\t\tfree(*cluster_centres);\n\t\t\t}\n\t\t\t*cluster_centres = tmp_cluster_centres;\n\t \n\t\t\t\t\t\n\t\t\t/* find the number of clusters with the best RMSE */\n\t\t\tif(isRMSE)\n\t\t\t{\n\t\t\t\trmse = rms_err(features,\n\t\t\t\t\t\t\t nfeatures,\n\t\t\t\t\t\t\t npoints,\n\t\t\t\t\t\t\t tmp_cluster_centres,\n\t\t\t\t\t\t\t nclusters);\n\t\t\t\t\n\t\t\t\tif(rmse < min_rmse_ref){\n\t\t\t\t\tmin_rmse_ref = rmse;\t\t\t//update reference min RMSE\n\t\t\t\t\t*min_rmse = min_rmse_ref;\t\t//update return min RMSE\n\t\t\t\t\t*best_nclusters = nclusters;\t//update optimum number of clusters\n\t\t\t\t\tindex = i;\t\t\t\t\t\t//update number of iteration to reach best RMSE\n\t\t\t\t}\n\t\t\t}\t\t\t\n\t\t}\n\t\t\n\t\tdeallocateMemory();\t\t\t\t\t\t\t/* free device memory (@ kmeans_cuda.cu) */\n\t}\n\n free(membership);\n\n return index;\n}\n\n" }, { "alpha_fraction": 0.7329522371292114, "alphanum_fraction": 0.737852156162262, "avg_line_length": 30.397436141967773, "blob_id": "2887747ae4fe8d5f053c8cf37fe01d8325657c3b", "content_id": "1b2e1520ddb3e824f7c7638a5fe6f33e36c7a217", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2449, "license_type": "no_license", "max_line_length": 83, "num_lines": 78, "path": "/src/cuda/level2/neuralnet/include/layer.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#ifndef _LAYER_H_\n#define _LAYER_H_\n\n#include \"mat_gpu.h\"\n#include \"weights.h\"\n#include \"params.h\"\n#include \"mex_util.h\"\n\n// ForwardLinear multiplies on the same matrix of gradients as the backward pass.\n// It uses activ_mat from the first pass stored in first_mat.\n// BackwardLinear is the same as Backward, but uses first_mat_ for nonlinearities.\nenum class PassNum {ForwardTest, Forward, Backward, ForwardLinear, BackwardLinear};\n\n// index of where the gradients are stored\nenum class GradInd {Nowhere, First, Second};\n\nclass Layer {\n\npublic:\n // activations, derivatives, first activations\n MatGPU activ_mat_, deriv_mat_, first_mat_;\n // batchsize, channels, height, width\n Dim dims_;\n std::string type_, function_;\n ftype lr_coef_;\n\n inline size_t length() const { return dims_[1] * dims_[2] * dims_[3]; }\n\n Layer();\n virtual ~Layer() {};\n virtual void Init(const mxArray *mx_layer, const Layer *prev_layer) = 0;\n virtual void TransformForward(Layer *prev_layer, PassNum passnum) = 0;\n virtual void TransformBackward(Layer *prev_layer) = 0;\n virtual void WeightGrads(Layer *prev_layer, GradInd gradind) = 0;\n\n void InitGeneral(const mxArray *mx_layer);\n void InitWeights(Weights &weights, size_t &offset, bool isgen);\n void ResizeActivMat(size_t batchsize, PassNum passnum);\n void ResizeDerivMat();\n void AddBias(PassNum passnum);\n void BiasGrads(PassNum passnum, GradInd gradind);\n void DropoutForward(PassNum passnum);\n void DropoutBackward();\n void UpdateWeights(const Params &params);\n void RestoreOrder();\n void Nonlinear(PassNum passnum);\n size_t NumWeights() const;\n\n Weights filters_, biases_;\n bool add_bias_;\n\nprotected:\n Pair padding_, stride_;\n ftype init_std_, bias_coef_, dropout_;\n MatGPU dropmat_;\n\n};\n\n#endif\n" }, { "alpha_fraction": 0.8484848737716675, "alphanum_fraction": 0.8484848737716675, "avg_line_length": 32, "blob_id": "54355c1e64df52d90631186f8bdefac7b23d5911", "content_id": "e2429b9c11ee660b66ad77f64f9acc5b80ed052e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 33, "license_type": "no_license", "max_line_length": 32, "num_lines": 1, "path": "/src/cuda/level1/Makefile.am", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "SUBDIRS=bfs gemm sort pathfinder\n" }, { "alpha_fraction": 0.6191536784172058, "alphanum_fraction": 0.6191536784172058, "avg_line_length": 41.761905670166016, "blob_id": "6950e173ea7ae3c8744d5fa9c148985584ac7a55", "content_id": "0430482f35ec58afbff0e94369cd43092decbef7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 898, "license_type": "no_license", "max_line_length": 81, "num_lines": 21, "path": "/src/cuda/level2/kmeans/kmeans_cuda.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "#ifndef _KMEANS_CUDA_H_\n#define _KMEANS_CUDA_H_\n\n#include \"ResultDatabase.h\"\n\nint\t// delta -- had problems when return value was of float type\nkmeansCuda(float **feature,\t\t\t\t/* in: [npoints][nfeatures] */\n int nfeatures,\t\t\t\t/* number of attributes for each point */\n int npoints,\t\t\t\t/* number of data points */\n int nclusters,\t\t\t\t/* number of clusters */\n int *membership,\t\t\t\t/* which cluster the point belongs to */\n\t\t float **clusters,\t\t\t\t/* coordinates of cluster centers */\n\t\t int *new_centers_len,\t\t/* number of elements in each cluster */\n float **new_centers,\t\t\t/* sum of elements in each cluster */\n double &transferTime,\n double &kernelTime,\n\t\t ResultDatabase &resultDB);\nvoid allocateMemory(int npoints, int nfeatures, int nclusters, float **features);\nvoid deallocateMemory();\n\n#endif\n" }, { "alpha_fraction": 0.671875, "alphanum_fraction": 0.6890624761581421, "avg_line_length": 20.266666412353516, "blob_id": "99c606d8e45422d699b2088a036ce05f5e69c48c", "content_id": "7531c4cf381640f1f3a78f3959cee896a644926f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 640, "license_type": "no_license", "max_line_length": 100, "num_lines": 30, "path": "/src/cuda/level2/dwt2d/Makefile.am", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "include $(top_builddir)/config/config.mk\ninclude $(top_builddir)/config/targets.mk\n\n# How to find source files\nVPATH = $(srcdir):$(srcdir)/../../common:$(srcdir)/../../../common:$(srcdir)/dwt_cuda:$(srcdir)/data\n\nAM_LDFLAGS = $(CUDA_LDFLAGS)\nAM_CPPFLAGS = $(CUDA_INC)\n\n# What is the destination for programs built from this directory?\ncudadir = $(bindir)/CUDA\n\n# What programs should be installed to that destination?\ncuda_PROGRAMS = dwt2d\n\n# How to build those programs?\ndwt2d_SOURCES = \\\nbmp.cpp \\\nmain.cpp\n\ndwt2d_LDADD = \\\ndwt_main.o \\\ndwt.o \\\ncomponents.o \\\nfdwt53.o \\\nfdwt97.o \\\nrdwt53.o \\\nrdwt97.o \\\ncommon.o \\\n$(CUDA_LIBS) $(LIBS)\n\n\n" }, { "alpha_fraction": 0.6776754856109619, "alphanum_fraction": 0.6859608888626099, "avg_line_length": 34.46938705444336, "blob_id": "2090d8e5c296b4259282a44a94922f3de4a3c3aa", "content_id": "4af78d6978d70c1a9f04632fb85ae9db0f9ecf56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8690, "license_type": "no_license", "max_line_length": 104, "num_lines": 245, "path": "/src/cuda/level2/neuralnet/include/mex_util.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#include \"mex_util.h\"\n\nstatic clock_t _start_timer_time = 0;\n\nvoid StartTimer() {\n if (print < 2) return;\n _start_timer_time = std::clock();\n}\n\nvoid MeasureTime(std::string msg) {\n if (print < 2) return;\n clock_t t0 = _start_timer_time;\n clock_t t = std::clock();\n double d = double(t - t0);\n mexPrintMsg(msg, d);\n}\n\nbool mexIsStruct(const mxArray *mx_array) {\n mexAssertMsg(mx_array != NULL && !mxIsEmpty(mx_array), \"In 'mexIsStruct' the array is NULL or empty\");\n return mxIsStruct(mx_array);\n}\n\nbool mexIsCell(const mxArray *mx_array) {\n mexAssertMsg(mx_array != NULL && !mxIsEmpty(mx_array), \"In 'mexIsCell' the array is NULL or empty\");\n return mxIsCell(mx_array);\n}\n\nbool mexIsField(const mxArray *mx_array, const char *fieldname) {\n mexAssertMsg(mexIsStruct(mx_array), \"In 'mexIsField' the array in not a struct\");\n const mxArray* mx_field = mxGetField(mx_array, 0, fieldname);\n return (mx_field != NULL);\n}\n\nbool mexIsString(const mxArray *mx_array) {\n mexAssertMsg(mx_array != NULL && !mxIsEmpty(mx_array), \"In 'mexIsString' the array is NULL or empty\");\n return mxIsChar(mx_array);\n}\n\nconst mxArray* mexGetCell(const mxArray *mx_array, size_t ind) {\n mexAssertMsg(mexIsCell(mx_array), \"In 'mexGetCell' the array in not a cell array\");\n size_t numel = mexGetNumel(mx_array);\n mexAssertMsg(ind < numel, \"In 'mexGetCell' index is out of array\");\n return mxGetCell(mx_array, ind);\n}\n\nconst mxArray* mexGetField(const mxArray *mx_array, const char *fieldname) {\n mexAssertMsg(mexIsStruct(mx_array), \"In 'mexGetField' the array in not a struct\");\n const mxArray* mx_field = mxGetField(mx_array, 0, fieldname);\n std::string fieldname_str(fieldname);\n mexAssertMsg(mx_field != NULL, fieldname + std::string(\" field is missing!\\n\"));\n return mx_field;\n}\n\nsize_t mexGetDimensionsNum(const mxArray *mx_array) {\n mexAssertMsg(mx_array != NULL, \"mx_array in 'mexGetDimensionsNum' is NULL\");\n return (size_t) mxGetNumberOfDimensions(mx_array);\n}\n\nstd::vector<size_t> mexGetDimensions(const mxArray *mx_array) {\n //mexAssertMsg(mx_array != NULL, \"mx_array in 'mexGetDimentions' is NULL\");\n // IMPORTANT! Returning dimensions ordered from slowest to fastest!\n // This is opposite to Matlab ordering!\n size_t dimnum = mexGetDimensionsNum(mx_array);\n std::vector<size_t> dims(dimnum);\n const mwSize *pdim = mxGetDimensions(mx_array);\n for (size_t i = 0; i < dimnum; ++i) {\n dims[dimnum - i - 1] = (size_t) pdim[i];\n }\n return dims;\n}\n\nsize_t mexGetNumel(const mxArray *mx_array) {\n std::vector<size_t> dims = mexGetDimensions(mx_array);\n size_t numel = 1;\n for (size_t i = 0; i < dims.size(); ++i) {\n numel *= dims[i];\n }\n return numel;\n}\n\nstd::string mexGetString(const mxArray *mx_array) {\n const size_t kMaxFieldLength = 100;\n char s[kMaxFieldLength];\n mexAssertMsg(mexIsString(mx_array), \"In 'mexGetSting' mx_array in not a string!\");\n mexAssertMsg(!mxGetString(mx_array, s, kMaxFieldLength), \"Error when reading string field\");\n std::string str(s);\n return str;\n}\n\nftype* mexGetPointer(const mxArray *mx_array) {\n mexAssertMsg(mx_array != NULL, \"mx_array in 'mexGetPointer' is NULL\");\n mexAssertMsg(mxGetClassID(mx_array) == MEX_CLASS,\n \"In 'mexGetPointer' mx_array is of the wrong type\");\n return (ftype*) mxGetData(mx_array);\n}\n\nftype mexGetScalar(const mxArray *mx_array) {\n mexAssertMsg(mx_array != NULL, \"mx_array in 'mexGetScalar' is NULL\");\n mexAssertMsg(mxIsNumeric(mx_array), \"In 'mexGetScalar' mx_array is not numeric\");\n if (mxGetClassID(mx_array) == mxSINGLE_CLASS) {\n float *pdata = (float*) mxGetData(mx_array);\n return (ftype) pdata[0];\n } else if (mxGetClassID(mx_array) == mxDOUBLE_CLASS) {\n double *pdata = (double*) mxGetData(mx_array);\n return (ftype) pdata[0];\n }\n return 0;\n}\n\nstd::vector<ftype> mexGetVector(const mxArray *mx_array) {\n //mexAssertMsg(mx_array != NULL, \"mx_array in 'mexGetVector' is NULL\");\n size_t numel = mexGetNumel(mx_array);\n mexAssertMsg(mxIsNumeric(mx_array), \"In 'mexGetVector' mx_array is not numeric\" );\n mexAssertMsg(mxGetClassID(mx_array) == mxDOUBLE_CLASS, \"In mexGetVector wrong type\");\n std::vector<ftype> vect(numel);\n double *pdata = (double*) mxGetData(mx_array);\n for (size_t i = 0; i < numel; ++i) {\n vect[i] = (ftype) pdata[i];\n }\n return vect;\n}\n\nvoid mexGetMatrix(const mxArray *mx_array, MatCPU &mat) {\n //mexAssertMsg(mx_array != NULL, \"mx_array in 'mexGetMatrix' is NULL\");\n std::vector<size_t> dims = mexGetDimensions(mx_array);\n mexAssertMsg(dims.size() == 2, \"In 'mexGetMatrix' argument must be the 2D matrix\");\n mexAssertMsg(mxGetClassID(mx_array) == MEX_CLASS,\n \"In 'mexGetMatrix' mx_array is of the wrong type\");\n ftype *pdata = (ftype*) mxGetData(mx_array);\n // Order is opposite to Matlab order, i.e. true\n mat.attach(pdata, dims[0], dims[1], true);\n}\n\nDim mexGetTensor(const mxArray *mx_array, MatCPU &mat) {\n //mexAssertMsg(mx_array != NULL, \"mx_array in 'mexGetMatrix' is NULL\");\n // Notice that returned dims correspond to NCHW,\n // mexGetDimensions returns dimensions in reverse order, so\n // so the matrix in Matlab should be WHCN\n std::vector<size_t> mx_dims = mexGetDimensions(mx_array);\n mexAssertMsg(mx_dims.size() <= 4, \"The data array must have max 4 dimensions\");\n Dim dims = {1, 1, 1, 1};\n for (size_t i = 0; i < mx_dims.size(); ++i) {\n mexAssertMsg(mx_dims[i] < INT_MAX, \"Tensor is too large!\");\n dims[4 - mx_dims.size() + i] = (int) mx_dims[i];\n }\n mexAssertMsg(mxGetClassID(mx_array) == MEX_CLASS,\n \"In 'mexGetTensor' mx_array is of the wrong type\");\n ftype *pdata = (ftype*) mxGetData(mx_array);\n // Order is opposite to Matlab order, i.e. true\n mat.attach(pdata, dims[0], dims[1] * dims[2] * dims[3], true);\n return dims;\n}\n\nmxArray* mexNewArray(const std::vector<size_t> &dimvect) {\n mexAssertMsg(kInternalOrder == true, \"mexNewTensor assert\");\n mwSize ndims = dimvect.size(), dims[ndims];\n // opposite order!\n for (size_t i = 0; i < dimvect.size(); ++i) {\n dims[i] = dimvect[dimvect.size() - i - 1];\n }\n mxArray *mx_array = mxCreateNumericArray(ndims, dims, MEX_CLASS, mxREAL);\n return mx_array;\n}\n\nmxArray* mexNewMatrix(size_t size1, size_t size2) {\n std::vector<size_t> dimvect(2);\n dimvect[0] = size1;\n dimvect[1] = size2;\n return mexNewArray(dimvect);\n}\n\nmxArray* mexSetScalar(ftype scalar) {\n mxArray *mx_scalar = mexNewMatrix(1, 1);\n ftype *pdata = (ftype*) mxGetData(mx_scalar);\n pdata[0] = scalar;\n return mx_scalar;\n}\n\nmxArray* mexSetVector(const std::vector<ftype> &vect) {\n mxArray *mx_array = mexNewMatrix(1, vect.size());\n ftype *pdata = (ftype*) mxGetData(mx_array);\n for (size_t i = 0; i < vect.size(); ++i) {\n pdata[i] = vect[i];\n }\n return mx_array;\n}\n\nmxArray* mexSetMatrix(const MatCPU &mat) {\n mexAssertMsg(mat.order() == true, \"mexSetTensor assert\");\n mxArray *mx_array = mexNewMatrix(mat.size1(), mat.size2());\n ftype *pdata = (ftype*) mxGetData(mx_array);\n MatCPU mr;\n mr.attach(pdata, mat.size1(), mat.size2(), mat.order());\n mr = mat;\n return mx_array;\n}\n\nmxArray* mexSetTensor(const MatCPU &mat, const Dim& dims) {\n mexAssertMsg(mat.order() == true, \"mexSetTensor assert\");\n mexAssertMsg(mat.size1() * mat.size2() == dims[0] * dims[1] * dims[2] * dims[3],\n \"In mexSetTensor dimensions don't correspond to matrix\");\n std::vector<size_t> dimvect(4);\n for (size_t i = 0; i < 4; ++i) {\n dimvect[i] = dims[i];\n }\n mxArray *mx_array = mexNewArray(dimvect);\n ftype *pdata = (ftype*) mxGetData(mx_array);\n MatCPU mr;\n mr.attach(pdata, mat.size1(), mat.size2(), mat.order());\n mr = mat;\n return mx_array;\n}\n\nvoid mexSetCell(mxArray* mx_array, size_t ind, mxArray* mx_value) {\n size_t numel = mexGetNumel(mx_array);\n mexAssertMsg(ind < numel, \"In mexSetCell the index is out of range\");\n mxSetCell(mx_array, ind, mx_value);\n}\n\nmxArray* mexSetCellMat(size_t size1, size_t size2) {\n return mxCreateCellMatrix(size1, size2);\n}\n\nmxArray* mexDuplicateArray(const mxArray* mx_array) {\n return mxDuplicateArray(mx_array);\n}\n" }, { "alpha_fraction": 0.5813086628913879, "alphanum_fraction": 0.6063497066497803, "avg_line_length": 36.266666412353516, "blob_id": "1eb5bf5c0ef0004627c10848493d35c5ba0d0550", "content_id": "6a839f22f91baddfc5e8454ae6144946d2072aff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6709, "license_type": "no_license", "max_line_length": 108, "num_lines": 180, "path": "/src/cuda/level2/neuralnet/include/layer_jitt.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2014 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#include \"layer_jitt.h\"\n\nLayerJitt::LayerJitt() {\n function_ = \"none\";\n add_bias_ = false;\n shift_.resize(1, 2);\n shift_.assign(0);\n scale_.resize(1, 2);\n scale_.assign(1);\n mirror_.resize(1, 2);\n mirror_.assign(0);\n angle_ = 0;\n defval_ = 0;\n noise_std_ = 0;\n randtest_ = false;\n}\n\nvoid LayerJitt::Init(const mxArray *mx_layer, const Layer *prev_layer) {\n dims_[1] = prev_layer->dims_[1];\n\n std::vector<ftype> shift(2);\n shift[0] = 0; shift[1] = 0;\n if (mexIsField(mx_layer, \"shift\")) {\n shift = mexGetVector(mexGetField(mx_layer, \"shift\"));\n mexAssertMsg(shift.size() == 2, \"Length of jitter shift vector and maps dimensionality must coincide\");\n for (size_t i = 0; i < 2; ++i) {\n mexAssertMsg(0 <= shift[i] && shift[i] < dims_[i+2], \"Shift in 'jitt' layer is out of range\");\n }\n MatCPU shift_cpu(1, 2);\n shift_cpu.assign(shift);\n shift_ = shift_cpu;\n }\n\n std::vector<ftype> scale(2);\n scale[0] = 1; scale[1] = 1;\n if (mexIsField(mx_layer, \"scale\")) {\n scale = mexGetVector(mexGetField(mx_layer, \"scale\"));\n mexAssertMsg(scale.size() == 2, \"Length of jitter scale vector and maps dimensionality must coincide\");\n for (size_t i = 0; i < 2; ++i) {\n mexAssertMsg(1 <= scale[i] && scale[i] < dims_[i+2], \"Scale in 'j' layer is out of range\");\n }\n MatCPU scale_cpu(1, 2);\n scale_cpu.assign(scale);\n scale_ = scale_cpu;\n scale_.Log();\n }\n\n if (mexIsField(mx_layer, \"mirror\")) {\n std::vector<ftype> mirror = mexGetVector(mexGetField(mx_layer, \"mirror\"));\n mexAssertMsg(mirror.size() == 2, \"Length of jitter scale vector and maps dimensionality must coincide\");\n for (size_t i = 0; i < 2; ++i) {\n mexAssertMsg(mirror[i] == 0 || mirror[i] == 1, \"Mirror must be either 0 or 1\");\n }\n MatCPU mirror_cpu(1, 2);\n mirror_cpu.assign(mirror);\n mirror_ = mirror_cpu;\n }\n\n if (mexIsField(mx_layer, \"angle\")) {\n angle_ = mexGetScalar(mexGetField(mx_layer, \"angle\"));\n mexAssertMsg(0 <= angle_ && angle_ <= 1, \"Angle in 'j' layer must be between 0 and 1\");\n }\n\n if (mexIsField(mx_layer, \"defval\")) {\n defval_ = mexGetScalar(mexGetField(mx_layer, \"defval\"));\n } else {\n // check that the transformed image is always inside the original one\n std::vector<ftype> maxsize(2, 0);\n for (size_t i = 0; i < 2; ++i) {\n maxsize[i] = (ftype) (dims_[i+2] - 1) * scale[i];\n }\n if (angle_ > 0) {\n ftype angle_inn = atan2((ftype) dims_[2], (ftype) dims_[3]) / kPi;\n ftype maxsin = 1;\n if (angle_inn + angle_ < 0.5) {\n maxsin = sin(kPi * (angle_inn + angle_));\n }\n ftype maxcos = 1;\n if (angle_inn > angle_) {\n maxcos = cos(kPi * (angle_inn - angle_));\n }\n ftype maxrad = (ftype) sqrt((double) (maxsize[0]*maxsize[0] + maxsize[1]*maxsize[1]));\n maxsize[0] = maxrad * maxsin;\n maxsize[1] = maxrad * maxcos;\n }\n std::vector<ftype> oldmapsize(2, 0);\n for (size_t i = 0; i < 2; ++i) {\n oldmapsize[i] = (ftype) prev_layer->dims_[i+2];\n }\n ftype min0 = ((ftype) oldmapsize[0] / 2 - (ftype) 0.5) - (ftype) maxsize[0] / 2 - shift[0];\n ftype max0 = ((ftype) oldmapsize[0] / 2 - (ftype) 0.5) + (ftype) maxsize[0] / 2 + shift[0];\n ftype min1 = ((ftype) oldmapsize[1] / 2 - (ftype) 0.5) - (ftype) maxsize[1] / 2 - shift[1];\n ftype max1 = ((ftype) oldmapsize[1] / 2 - (ftype) 0.5) + (ftype) maxsize[1] / 2 + shift[1];\n if (!(0 <= min0 && max0 < oldmapsize[0] && 0 <= min1 && max1 < oldmapsize[1])) {\n mexPrintMsg(\"min1\", min0); mexPrintMsg(\"max1\", max0);\n mexPrintMsg(\"min2\", min1); mexPrintMsg(\"max2\", max1);\n mexAssertMsg(false, \"For these jitter parameters the new image is out of the original image\");\n }\n }\n if (mexIsField(mx_layer, \"eigenvectors\")) {\n const mxArray* mx_ev = mexGetField(mx_layer, \"eigenvectors\");\n std::vector<size_t> ev_dim = mexGetDimensions(mx_ev);\n mexAssertMsg(ev_dim.size() == 2, \"The eigenvectors array must have 2 dimensions\");\n mexAssertMsg(ev_dim[0] == dims_[1] && ev_dim[1] == dims_[1],\n \"The eigenvector matrix size is wrong\");\n MatCPU ev_cpu(dims_[1], dims_[1]);\n mexGetMatrix(mx_ev, ev_cpu);\n eigenvectors_.resize(dims_[1], dims_[1]);\n eigenvectors_ = ev_cpu;\n if (mexIsField(mx_layer, \"noise_std\")) {\n noise_std_ = mexGetScalar(mexGetField(mx_layer, \"noise_std\"));\n mexAssertMsg(noise_std_ >= 0, \"noise_std must be nonnegative\");\n } else {\n mexAssertMsg(false, \"noise_std is required with eigenvalues\");\n }\n }\n if (mexIsField(mx_layer, \"randtest\")) {\n randtest_ = (mexGetScalar(mexGetField(mx_layer, \"randtest\")) > 0);\n }\n}\n\nvoid LayerJitt::TransformForward(Layer *prev_layer, PassNum passnum) {\n\n shift_mat_.resize(dims_[0], 2);\n scale_mat_.resize(dims_[0], 2);\n mirror_mat_.resize(dims_[0], 2);\n angle_mat_.resize(dims_[0], 1);\n if (passnum == PassNum::ForwardTest) {\n if (dims_[2] == prev_layer->dims_[2] &&\n dims_[3] == prev_layer->dims_[3]) {\n activ_mat_ = prev_layer->activ_mat_;\n return;\n }\n shift_mat_.assign(0);\n scale_mat_.assign(1);\n mirror_mat_.assign(0);\n angle_mat_.assign(0);\n } else if (passnum == PassNum::Forward) {\n ((shift_mat_.rand() *= 2) -= 1).MultVect(shift_, 1);\n (((scale_mat_.rand() *= 2) -= 1).MultVect(scale_, 1)).Exp();\n (mirror_mat_.rand()).MultVect(mirror_, 1);\n ((angle_mat_.rand() *= 2) -= 1) *= (kPi * angle_);\n } else {\n // if ForwardLinear, use the existing ones\n }\n AffineTransform(prev_layer->activ_mat_, activ_mat_,\n shift_mat_, scale_mat_, mirror_mat_, angle_mat_,\n defval_, true);\n /*\n if (noise_std_ > 0 && passnum != PassNum::ForwardTest) {\n VaryColors(activ_mat_, dims_, eigenvectors_, noise_std_);\n } */\n activ_mat_.Validate();\n}\n\nvoid LayerJitt::TransformBackward(Layer *prev_layer) {\n // spaces outside fill in with zeros\n AffineTransform(deriv_mat_, prev_layer->deriv_mat_,\n shift_mat_, scale_mat_, mirror_mat_, angle_mat_,\n 0, false);\n}\n\n" }, { "alpha_fraction": 0.6690370440483093, "alphanum_fraction": 0.6708978414535522, "avg_line_length": 61.565486907958984, "blob_id": "a86e273aa00c91bed7fecf36680112ae5e309cd7", "content_id": "cd3f0268d5e43337f63dfcdf439ab2d8454d11d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 30094, "license_type": "no_license", "max_line_length": 131, "num_lines": 481, "path": "/src/common/mkl_vsl_functions.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/* file: mkl_vsl_functions.h */\n/*\n// INTEL CONFIDENTIAL\n// Copyright(C) 2006-2008 Intel Corporation. All Rights Reserved.\n// The source code contained or described herein and all documents related to\n// the source code (\"Material\") are owned by Intel Corporation or its suppliers\n// or licensors. Title to the Material remains with Intel Corporation or its\n// suppliers and licensors. The Material contains trade secrets and proprietary\n// and confidential information of Intel or its suppliers and licensors. The\n// Material is protected by worldwide copyright and trade secret laws and\n// treaty provisions. No part of the Material may be used, copied, reproduced,\n// modified, published, uploaded, posted, transmitted, distributed or disclosed\n// in any way without Intel's prior express written permission.\n// No license under any patent, copyright, trade secret or other intellectual\n// property right is granted to or conferred upon you by disclosure or delivery\n// of the Materials, either expressly, by implication, inducement, estoppel or\n// otherwise. Any license under such intellectual property rights must be\n// express and approved by Intel in writing.\n*/\n/*\n//++\n// User-level VSL function declarations\n//--\n*/\n\n#ifndef __MKL_VSL_FUNCTIONS_H__\n#define __MKL_VSL_FUNCTIONS_H__\n\n#include \"mkl_vsl_types.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n/*\n//++\n// EXTERNAL API MACROS.\n// Used to construct VSL function declaration. Change them if you are going to\n// provide different API for VSL functions.\n//--\n*/\n#define _Vsl_Api(rtype,name,arg) extern rtype name arg;\n#define _vsl_api(rtype,name,arg) extern rtype name##_ arg;\n#define _VSL_API(rtype,name,arg) extern rtype name##_ arg;\n\n/*\n//++\n// VSL CONTINUOUS DISTRIBUTION GENERATOR FUNCTION DECLARATIONS.\n//--\n*/\n/* Cauchy distribution */\n_Vsl_Api(int,vdRngCauchy,(MKL_INT , VSLStreamStatePtr , MKL_INT , double [], double , double ))\n_VSL_API(int,VDRNGCAUCHY,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *))\n_vsl_api(int,vdrngcauchy,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *))\n_Vsl_Api(int,vsRngCauchy,(MKL_INT , VSLStreamStatePtr , MKL_INT , float [], float , float ))\n_VSL_API(int,VSRNGCAUCHY,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float * ))\n_vsl_api(int,vsrngcauchy,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float * ))\n\n/* Uniform distribution */\n_Vsl_Api(int,vdRngUniform,(MKL_INT , VSLStreamStatePtr , MKL_INT , double [], double , double ))\n_VSL_API(int,VDRNGUNIFORM,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *))\n_vsl_api(int,vdrnguniform,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *))\n_Vsl_Api(int,vsRngUniform,(MKL_INT , VSLStreamStatePtr , MKL_INT , float [], float , float ))\n_VSL_API(int,VSRNGUNIFORM,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float * ))\n_vsl_api(int,vsrnguniform,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float * ))\n\n/* Gaussian distribution */\n_Vsl_Api(int,vdRngGaussian,(MKL_INT , VSLStreamStatePtr , MKL_INT , double [], double , double ))\n_VSL_API(int,VDRNGGAUSSIAN,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *))\n_vsl_api(int,vdrnggaussian,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *))\n_Vsl_Api(int,vsRngGaussian,(MKL_INT , VSLStreamStatePtr , MKL_INT , float [], float , float ))\n_VSL_API(int,VSRNGGAUSSIAN,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float * ))\n_vsl_api(int,vsrnggaussian,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float * ))\n\n/* GaussianMV distribution */\n_Vsl_Api(int,vdRngGaussianMV,(MKL_INT , VSLStreamStatePtr , MKL_INT , double [], MKL_INT , MKL_INT , double *, double *))\n_VSL_API(int,VDRNGGAUSSIANMV,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], MKL_INT *, MKL_INT *, double *, double *))\n_vsl_api(int,vdrnggaussianmv,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], MKL_INT *, MKL_INT *, double *, double *))\n_Vsl_Api(int,vsRngGaussianMV,(MKL_INT , VSLStreamStatePtr , MKL_INT , float [], MKL_INT , MKL_INT , float *, float * ))\n_VSL_API(int,VSRNGGAUSSIANMV,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], MKL_INT *, MKL_INT *, float *, float * ))\n_vsl_api(int,vsrnggaussianmv,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], MKL_INT *, MKL_INT *, float *, float * ))\n\n/* Exponential distribution */\n_Vsl_Api(int,vdRngExponential,(MKL_INT , VSLStreamStatePtr , MKL_INT , double [], double , double ))\n_VSL_API(int,VDRNGEXPONENTIAL,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *))\n_vsl_api(int,vdrngexponential,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *))\n_Vsl_Api(int,vsRngExponential,(MKL_INT , VSLStreamStatePtr , MKL_INT , float [], float , float ))\n_VSL_API(int,VSRNGEXPONENTIAL,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float * ))\n_vsl_api(int,vsrngexponential,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float * ))\n\n/* Laplace distribution */\n_Vsl_Api(int,vdRngLaplace,(MKL_INT , VSLStreamStatePtr , MKL_INT , double [], double , double ))\n_VSL_API(int,VDRNGLAPLACE,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *))\n_vsl_api(int,vdrnglaplace,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *))\n_Vsl_Api(int,vsRngLaplace,(MKL_INT , VSLStreamStatePtr , MKL_INT , float [], float , float ))\n_VSL_API(int,VSRNGLAPLACE,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float * ))\n_vsl_api(int,vsrnglaplace,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float * ))\n\n/* Weibull distribution */\n_Vsl_Api(int,vdRngWeibull,(MKL_INT , VSLStreamStatePtr , MKL_INT , double [], double , double , double ))\n_VSL_API(int,VDRNGWEIBULL,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *, double *))\n_vsl_api(int,vdrngweibull,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *, double *))\n_Vsl_Api(int,vsRngWeibull,(MKL_INT , VSLStreamStatePtr , MKL_INT , float [], float , float , float ))\n_VSL_API(int,VSRNGWEIBULL,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float *, float * ))\n_vsl_api(int,vsrngweibull,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float *, float * ))\n\n/* Rayleigh distribution */\n_Vsl_Api(int,vdRngRayleigh,(MKL_INT , VSLStreamStatePtr , MKL_INT , double [], double , double ))\n_VSL_API(int,VDRNGRAYLEIGH,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *))\n_vsl_api(int,vdrngrayleigh,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *))\n_Vsl_Api(int,vsRngRayleigh,(MKL_INT , VSLStreamStatePtr , MKL_INT , float [], float , float ))\n_VSL_API(int,VSRNGRAYLEIGH,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float * ))\n_vsl_api(int,vsrngrayleigh,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float * ))\n\n/* Lognormal distribution */\n_Vsl_Api(int,vdRngLognormal,(MKL_INT , VSLStreamStatePtr , MKL_INT , double [], double , double , double , double ))\n_VSL_API(int,VDRNGLOGNORMAL,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *, double *, double *))\n_vsl_api(int,vdrnglognormal,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *, double *, double *))\n_Vsl_Api(int,vsRngLognormal,(MKL_INT , VSLStreamStatePtr , MKL_INT , float [], float , float , float , float ))\n_VSL_API(int,VSRNGLOGNORMAL,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float *, float *, float * ))\n_vsl_api(int,vsrnglognormal,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float *, float *, float * ))\n\n/* Gumbel distribution */\n_Vsl_Api(int,vdRngGumbel,(MKL_INT , VSLStreamStatePtr , MKL_INT , double [], double , double ))\n_VSL_API(int,VDRNGGUMBEL,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *))\n_vsl_api(int,vdrnggumbel,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *))\n_Vsl_Api(int,vsRngGumbel,(MKL_INT , VSLStreamStatePtr , MKL_INT , float [], float , float ))\n_VSL_API(int,VSRNGGUMBEL,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float * ))\n_vsl_api(int,vsrnggumbel,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float * ))\n\n/* Gamma distribution */\n_Vsl_Api(int,vdRngGamma,(MKL_INT , VSLStreamStatePtr , MKL_INT , double [], double , double , double ))\n_VSL_API(int,VDRNGGAMMA,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *, double *))\n_vsl_api(int,vdrnggamma,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *, double *))\n_Vsl_Api(int,vsRngGamma,(MKL_INT , VSLStreamStatePtr , MKL_INT , float [], float , float , float ))\n_VSL_API(int,VSRNGGAMMA,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float *, float * ))\n_vsl_api(int,vsrnggamma,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float *, float * ))\n\n/* Beta distribution */\n_Vsl_Api(int,vdRngBeta,(MKL_INT , VSLStreamStatePtr , MKL_INT , double [], double , double , double , double ))\n_VSL_API(int,VDRNGBETA,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *, double *, double *))\n_vsl_api(int,vdrngbeta,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, double [], double *, double *, double *, double *))\n_Vsl_Api(int,vsRngBeta,(MKL_INT , VSLStreamStatePtr , MKL_INT , float [], float , float , float , float ))\n_VSL_API(int,VSRNGBETA,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float *, float *, float * ))\n_vsl_api(int,vsrngbeta,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, float [], float *, float *, float *, float * ))\n\n/*\n//++\n// VSL DISCRETE DISTRIBUTION GENERATOR FUNCTION DECLARATIONS.\n//--\n*/\n/* Bernoulli distribution */\n_Vsl_Api(int,viRngBernoulli,(MKL_INT , VSLStreamStatePtr , MKL_INT , int [], double ))\n_VSL_API(int,VIRNGBERNOULLI,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], double *))\n_vsl_api(int,virngbernoulli,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], double *))\n\n/* Uniform distribution */\n_Vsl_Api(int,viRngUniform,(MKL_INT , VSLStreamStatePtr , MKL_INT , int [], int , int ))\n_VSL_API(int,VIRNGUNIFORM,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], int *, int *))\n_vsl_api(int,virnguniform,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], int *, int *))\n\n/* UniformBits distribution */\n_Vsl_Api(int,viRngUniformBits,(MKL_INT , VSLStreamStatePtr , MKL_INT , unsigned int []))\n_VSL_API(int,VIRNGUNIFORMBITS,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, unsigned int []))\n_vsl_api(int,virnguniformbits,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, unsigned int []))\n\n/* Geometric distribution */\n_Vsl_Api(int,viRngGeometric,(MKL_INT , VSLStreamStatePtr , MKL_INT , int [], double ))\n_VSL_API(int,VIRNGGEOMETRIC,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], double *))\n_vsl_api(int,virnggeometric,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], double *))\n\n/* Binomial distribution */\n_Vsl_Api(int,viRngBinomial,(MKL_INT , VSLStreamStatePtr , MKL_INT , int [], int , double ))\n_VSL_API(int,VIRNGBINOMIAL,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], int *, double *))\n_vsl_api(int,virngbinomial,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], int *, double *))\n\n/* Hypergeometric distribution */\n_Vsl_Api(int,viRngHypergeometric,(MKL_INT , VSLStreamStatePtr , MKL_INT , int [], int , int , int ))\n_VSL_API(int,VIRNGHYPERGEOMETRIC,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], int *, int *, int *))\n_vsl_api(int,virnghypergeometric,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], int *, int *, int *))\n\n/* Negbinomial distribution */\n_Vsl_Api(int,viRngNegbinomial,(MKL_INT , VSLStreamStatePtr , MKL_INT , int [], double , double ))\n_Vsl_Api(int,viRngNegBinomial,(MKL_INT , VSLStreamStatePtr , MKL_INT , int [], double , double ))\n_VSL_API(int,VIRNGNEGBINOMIAL,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], double *, double *))\n_vsl_api(int,virngnegbinomial,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], double *, double *))\n\n/* Poisson distribution */\n_Vsl_Api(int,viRngPoisson,(MKL_INT , VSLStreamStatePtr , MKL_INT , int [], double ))\n_VSL_API(int,VIRNGPOISSON,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], double *))\n_vsl_api(int,virngpoisson,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], double *))\n\n/* PoissonV distribution */\n_Vsl_Api(int,viRngPoissonV,(MKL_INT , VSLStreamStatePtr , MKL_INT , int [], double []))\n_VSL_API(int,VIRNGPOISSONV,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], double []))\n_vsl_api(int,virngpoissonv,(MKL_INT *, VSLStreamStatePtr , MKL_INT *, int [], double []))\n\n\n/*\n//++\n// VSL SERVICE FUNCTION DECLARATIONS.\n//--\n*/\n/* NewStream - stream creation/initialization */\n_Vsl_Api(int,vslNewStream,(VSLStreamStatePtr* , MKL_INT , unsigned MKL_INT ))\n_vsl_api(int,vslnewstream,(VSLStreamStatePtr* , MKL_INT *, unsigned MKL_INT *))\n_VSL_API(int,VSLNEWSTREAM,(VSLStreamStatePtr* , MKL_INT *, unsigned MKL_INT *))\n\n/* NewStreamEx - advanced stream creation/initialization */\n_Vsl_Api(int,vslNewStreamEx,(VSLStreamStatePtr* , MKL_INT , MKL_INT , const unsigned int[]))\n_vsl_api(int,vslnewstreamex,(VSLStreamStatePtr* , MKL_INT *, MKL_INT *, const unsigned int[]))\n_VSL_API(int,VSLNEWSTREAMEX,(VSLStreamStatePtr* , MKL_INT *, MKL_INT *, const unsigned int[]))\n\n_Vsl_Api(int,vsliNewAbstractStream,(VSLStreamStatePtr* , MKL_INT , unsigned int[], iUpdateFuncPtr))\n_vsl_api(int,vslinewabstractstream,(VSLStreamStatePtr* , MKL_INT *, unsigned int[], iUpdateFuncPtr))\n_VSL_API(int,VSLINEWABSTRACTSTREAM,(VSLStreamStatePtr* , MKL_INT *, unsigned int[], iUpdateFuncPtr))\n\n_Vsl_Api(int,vsldNewAbstractStream,(VSLStreamStatePtr* , MKL_INT , double[], double , double , dUpdateFuncPtr))\n_vsl_api(int,vsldnewabstractstream,(VSLStreamStatePtr* , MKL_INT *, double[], double *, double *, dUpdateFuncPtr))\n_VSL_API(int,VSLDNEWABSTRACTSTREAM,(VSLStreamStatePtr* , MKL_INT *, double[], double *, double *, dUpdateFuncPtr))\n\n_Vsl_Api(int,vslsNewAbstractStream,(VSLStreamStatePtr* , MKL_INT , float[], float , float , sUpdateFuncPtr))\n_vsl_api(int,vslsnewabstractstream,(VSLStreamStatePtr* , MKL_INT *, float[], float *, float *, sUpdateFuncPtr))\n_VSL_API(int,VSLSNEWABSTRACTSTREAM,(VSLStreamStatePtr* , MKL_INT *, float[], float *, float *, sUpdateFuncPtr))\n\n/* DeleteStream - delete stream */\n_Vsl_Api(int,vslDeleteStream,(VSLStreamStatePtr*))\n_vsl_api(int,vsldeletestream,(VSLStreamStatePtr*))\n_VSL_API(int,VSLDELETESTREAM,(VSLStreamStatePtr*))\n\n/* CopyStream - copy all stream information */\n_Vsl_Api(int,vslCopyStream,(VSLStreamStatePtr*, VSLStreamStatePtr))\n_vsl_api(int,vslcopystream,(VSLStreamStatePtr*, VSLStreamStatePtr))\n_VSL_API(int,VSLCOPYSTREAM,(VSLStreamStatePtr*, VSLStreamStatePtr))\n\n/* CopyStreamState - copy stream state only */\n_Vsl_Api(int,vslCopyStreamState,(VSLStreamStatePtr, VSLStreamStatePtr))\n_vsl_api(int,vslcopystreamstate,(VSLStreamStatePtr, VSLStreamStatePtr))\n_VSL_API(int,VSLCOPYSTREAMSTATE,(VSLStreamStatePtr, VSLStreamStatePtr))\n\n/* LeapfrogStream - leapfrog method */\n_Vsl_Api(int,vslLeapfrogStream,(VSLStreamStatePtr, MKL_INT , MKL_INT ))\n_vsl_api(int,vslleapfrogstream,(VSLStreamStatePtr, MKL_INT *, MKL_INT *))\n_VSL_API(int,VSLLEAPFROGSTREAM,(VSLStreamStatePtr, MKL_INT *, MKL_INT *))\n\n/* SkipAheadStream - skip-ahead method */\n_Vsl_Api(int,vslSkipAheadStream,(VSLStreamStatePtr, long long int ))\n_vsl_api(int,vslskipaheadstream,(VSLStreamStatePtr, long long int *))\n_VSL_API(int,VSLSKIPAHEADSTREAM,(VSLStreamStatePtr, long long int *))\n\n/* GetStreamStateBrng - get BRNG associated with given stream */\n_Vsl_Api(int,vslGetStreamStateBrng,(VSLStreamStatePtr ))\n_vsl_api(int,vslgetstreamstatebrng,(VSLStreamStatePtr *))\n_VSL_API(int,VSLGETSTREAMSTATEBRNG,(VSLStreamStatePtr *))\n\n/* GetNumRegBrngs - get number of registered BRNGs */\n_Vsl_Api(int,vslGetNumRegBrngs,(void))\n_vsl_api(int,vslgetnumregbrngs,(void))\n_VSL_API(int,VSLGETNUMREGBRNGS,(void))\n\n/* RegisterBrng - register new BRNG */\n_Vsl_Api(int,vslRegisterBrng,(const VSLBRngProperties* ))\n_vsl_api(int,vslregisterbrng,(const VSLBRngProperties* ))\n_VSL_API(int,VSLREGISTERBRNG,(const VSLBRngProperties* ))\n\n/* GetBrngProperties - get BRNG properties */\n_Vsl_Api(int,vslGetBrngProperties,(int , VSLBRngProperties* ))\n_vsl_api(int,vslgetbrngproperties,(int *, VSLBRngProperties* ))\n_VSL_API(int,VSLGETBRNGPROPERTIES,(int *, VSLBRngProperties* ))\n\n\n_Vsl_Api(int,vslSaveStreamF,(VSLStreamStatePtr , char* ))\n_vsl_api(int,vslsavestreamf,(VSLStreamStatePtr *, char* , int ))\n_VSL_API(int,VSLSAVESTREAMF,(VSLStreamStatePtr *, char* , int ))\n\n_Vsl_Api(int,vslLoadStreamF,(VSLStreamStatePtr *, char* ))\n_vsl_api(int,vslloadstreamf,(VSLStreamStatePtr *, char* , int ))\n_VSL_API(int,VSLLOADSTREAMF,(VSLStreamStatePtr *, char* , int ))\n\n\n/*\n//++\n// VSL CONVOLUTION AND CORRELATION FUNCTION DECLARATIONS.\n//--\n*/\n\n_Vsl_Api(int,vsldConvNewTask,(VSLConvTaskPtr* , MKL_INT , MKL_INT , MKL_INT [], MKL_INT [], MKL_INT []));\n_vsl_api(int,vsldconvnewtask,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT []));\n_VSL_API(int,VSLDCONVNEWTASK,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT []));\n\n_Vsl_Api(int,vslsConvNewTask,(VSLConvTaskPtr* , MKL_INT , MKL_INT , MKL_INT [], MKL_INT [], MKL_INT []));\n_vsl_api(int,vslsconvnewtask,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT []));\n_VSL_API(int,VSLSCONVNEWTASK,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT []));\n\n_Vsl_Api(int,vsldCorrNewTask,(VSLCorrTaskPtr* , MKL_INT , MKL_INT , MKL_INT [], MKL_INT [], MKL_INT []));\n_vsl_api(int,vsldcorrnewtask,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT []));\n_VSL_API(int,VSLDCORRNEWTASK,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT []));\n\n_Vsl_Api(int,vslsCorrNewTask,(VSLCorrTaskPtr* , MKL_INT , MKL_INT , MKL_INT [], MKL_INT [], MKL_INT []));\n_vsl_api(int,vslscorrnewtask,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT []));\n_VSL_API(int,VSLSCORRNEWTASK,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT []));\n\n\n_Vsl_Api(int,vsldConvNewTask1D,(VSLConvTaskPtr* , MKL_INT , MKL_INT , MKL_INT , MKL_INT ));\n_vsl_api(int,vsldconvnewtask1d,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* ));\n_VSL_API(int,VSLDCONVNEWTASK1D,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* ));\n\n_Vsl_Api(int,vslsConvNewTask1D,(VSLConvTaskPtr* , MKL_INT , MKL_INT , MKL_INT , MKL_INT ));\n_vsl_api(int,vslsconvnewtask1d,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* ));\n_VSL_API(int,VSLSCONVNEWTASK1D,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* ));\n\n_Vsl_Api(int,vsldCorrNewTask1D,(VSLCorrTaskPtr* , MKL_INT , MKL_INT , MKL_INT , MKL_INT ));\n_vsl_api(int,vsldcorrnewtask1d,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* ));\n_VSL_API(int,VSLDCORRNEWTASK1D,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* ));\n\n_Vsl_Api(int,vslsCorrNewTask1D,(VSLCorrTaskPtr* , MKL_INT , MKL_INT , MKL_INT , MKL_INT ));\n_vsl_api(int,vslscorrnewtask1d,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* ));\n_VSL_API(int,VSLSCORRNEWTASK1D,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* ));\n\n\n_Vsl_Api(int,vsldConvNewTaskX,(VSLConvTaskPtr* , MKL_INT , MKL_INT , MKL_INT [], MKL_INT [], MKL_INT [], double [], MKL_INT []));\n_vsl_api(int,vsldconvnewtaskx,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT [], double [], MKL_INT []));\n_VSL_API(int,VSLDCONVNEWTASKX,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT [], double [], MKL_INT []));\n\n_Vsl_Api(int,vslsConvNewTaskX,(VSLConvTaskPtr* , MKL_INT , MKL_INT , MKL_INT [], MKL_INT [], MKL_INT [], float [], MKL_INT []));\n_vsl_api(int,vslsconvnewtaskx,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT [], float [], MKL_INT []));\n_VSL_API(int,VSLSCONVNEWTASKX,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT [], float [], MKL_INT []));\n\n_Vsl_Api(int,vsldCorrNewTaskX,(VSLCorrTaskPtr* , MKL_INT , MKL_INT , MKL_INT [], MKL_INT [], MKL_INT [], double [], MKL_INT []));\n_vsl_api(int,vsldcorrnewtaskx,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT [], double [], MKL_INT []));\n_VSL_API(int,VSLDCORRNEWTASKX,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT [], double [], MKL_INT []));\n\n_Vsl_Api(int,vslsCorrNewTaskX,(VSLCorrTaskPtr* , MKL_INT , MKL_INT , MKL_INT [], MKL_INT [], MKL_INT [], float [], MKL_INT []));\n_vsl_api(int,vslscorrnewtaskx,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT [], float [], MKL_INT []));\n_VSL_API(int,VSLSCORRNEWTASKX,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT [], MKL_INT [], MKL_INT [], float [], MKL_INT []));\n\n\n_Vsl_Api(int,vsldConvNewTaskX1D,(VSLConvTaskPtr* , MKL_INT , MKL_INT , MKL_INT , MKL_INT , double [], MKL_INT ));\n_vsl_api(int,vsldconvnewtaskx1d,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* , double [], MKL_INT* ));\n_VSL_API(int,VSLDCONVNEWTASKX1D,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* , double [], MKL_INT* ));\n\n_Vsl_Api(int,vslsConvNewTaskX1D,(VSLConvTaskPtr* , MKL_INT , MKL_INT , MKL_INT , MKL_INT , float [], MKL_INT ));\n_vsl_api(int,vslsconvnewtaskx1d,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* , float [], MKL_INT* ));\n_VSL_API(int,VSLSCONVNEWTASKX1D,(VSLConvTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* , float [], MKL_INT* ));\n\n_Vsl_Api(int,vsldCorrNewTaskX1D,(VSLCorrTaskPtr* , MKL_INT , MKL_INT , MKL_INT , MKL_INT , double [], MKL_INT ));\n_vsl_api(int,vsldcorrnewtaskx1d,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* , double [], MKL_INT* ));\n_VSL_API(int,VSLDCORRNEWTASKX1D,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* , double [], MKL_INT* ));\n\n_Vsl_Api(int,vslsCorrNewTaskX1D,(VSLCorrTaskPtr* , MKL_INT , MKL_INT , MKL_INT , MKL_INT , float [], MKL_INT ));\n_vsl_api(int,vslscorrnewtaskx1d,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* , float [], MKL_INT* ));\n_VSL_API(int,VSLSCORRNEWTASKX1D,(VSLCorrTaskPtr* , MKL_INT* , MKL_INT* , MKL_INT* , MKL_INT* , float [], MKL_INT* ));\n\n\n_Vsl_Api(int,vslConvDeleteTask,(VSLConvTaskPtr* ));\n_vsl_api(int,vslconvdeletetask,(VSLConvTaskPtr* ));\n_VSL_API(int,VSLCONVDeleteTask,(VSLConvTaskPtr* ));\n\n_Vsl_Api(int,vslCorrDeleteTask,(VSLCorrTaskPtr* ));\n_vsl_api(int,vslcorrdeletetask,(VSLCorrTaskPtr* ));\n_VSL_API(int,VSLCORRDeleteTask,(VSLCorrTaskPtr* ));\n\n\n_Vsl_Api(int,vslConvCopyTask,(VSLConvTaskPtr* , VSLConvTaskPtr ));\n_vsl_api(int,vslconvcopytask,(VSLConvTaskPtr* , VSLConvTaskPtr ));\n_VSL_API(int,VSLCONVCopyTask,(VSLConvTaskPtr* , VSLConvTaskPtr ));\n\n_Vsl_Api(int,vslCorrCopyTask,(VSLCorrTaskPtr* , VSLCorrTaskPtr ));\n_vsl_api(int,vslcorrcopytask,(VSLCorrTaskPtr* , VSLCorrTaskPtr ));\n_VSL_API(int,VSLCORRCopyTask,(VSLCorrTaskPtr* , VSLCorrTaskPtr ));\n\n\n_Vsl_Api(int,vslConvSetMode,(VSLConvTaskPtr , MKL_INT ));\n_vsl_api(int,vslconvsetmode,(VSLConvTaskPtr , MKL_INT* ));\n_VSL_API(int,VSLCONVSETMOME,(VSLConvTaskPtr , MKL_INT* ));\n\n_Vsl_Api(int,vslCorrSetMode,(VSLCorrTaskPtr , MKL_INT ));\n_vsl_api(int,vslcorrsetmode,(VSLCorrTaskPtr , MKL_INT* ));\n_VSL_API(int,VSLCORRSETMODE,(VSLCorrTaskPtr , MKL_INT* ));\n\n\n_Vsl_Api(int,vslConvSetInternalPrecision,(VSLConvTaskPtr , MKL_INT ));\n_vsl_api(int,vslconvsetinternalprecision,(VSLConvTaskPtr , MKL_INT* ));\n_VSL_API(int,VSLCONVSETINTERNALPRECISION,(VSLConvTaskPtr , MKL_INT* ));\n\n_Vsl_Api(int,vslCorrSetInternalPrecision,(VSLCorrTaskPtr , MKL_INT ));\n_vsl_api(int,vslcorrsetinternalprecision,(VSLCorrTaskPtr , MKL_INT* ));\n_VSL_API(int,VSLCORRSETINTERNALPRECISION,(VSLCorrTaskPtr , MKL_INT* ));\n\n\n_Vsl_Api(int,vslConvSetStart,(VSLConvTaskPtr , MKL_INT []));\n_vsl_api(int,vslconvsetstart,(VSLConvTaskPtr , MKL_INT []));\n_VSL_API(int,VSLCONVSETSTART,(VSLConvTaskPtr , MKL_INT []));\n\n_Vsl_Api(int,vslCorrSetStart,(VSLCorrTaskPtr , MKL_INT []));\n_vsl_api(int,vslcorrsetstart,(VSLCorrTaskPtr , MKL_INT []));\n_VSL_API(int,VSLCORRSETSTART,(VSLCorrTaskPtr , MKL_INT []));\n\n\n_Vsl_Api(int,vslConvSetDecimation,(VSLConvTaskPtr , MKL_INT []));\n_vsl_api(int,vslconvsetdecimation,(VSLConvTaskPtr , MKL_INT []));\n_VSL_API(int,VSLCONVSETDECIMATION,(VSLConvTaskPtr , MKL_INT []));\n\n_Vsl_Api(int,vslCorrSetDecimation,(VSLCorrTaskPtr , MKL_INT []));\n_vsl_api(int,vslcorrsetdecimation,(VSLCorrTaskPtr , MKL_INT []));\n_VSL_API(int,VSLCORRSETDECIMATION,(VSLCorrTaskPtr , MKL_INT []));\n\n\n_Vsl_Api(int,vsldConvExec,(VSLConvTaskPtr , double [], MKL_INT [], double [], MKL_INT [], double [], MKL_INT []));\n_vsl_api(int,vsldconvexec,(VSLConvTaskPtr , double [], MKL_INT [], double [], MKL_INT [], double [], MKL_INT []));\n_VSL_API(int,VSLDCONVEXEC,(VSLConvTaskPtr , double [], MKL_INT [], double [], MKL_INT [], double [], MKL_INT []));\n\n_Vsl_Api(int,vslsConvExec,(VSLConvTaskPtr , float [], MKL_INT [], float [], MKL_INT [], float [], MKL_INT []));\n_vsl_api(int,vslsconvexec,(VSLConvTaskPtr , float [], MKL_INT [], float [], MKL_INT [], float [], MKL_INT []));\n_VSL_API(int,VSLSCONVEXEC,(VSLConvTaskPtr , float [], MKL_INT [], float [], MKL_INT [], float [], MKL_INT []));\n\n_Vsl_Api(int,vsldCorrExec,(VSLCorrTaskPtr , double [], MKL_INT [], double [], MKL_INT [], double [], MKL_INT []));\n_vsl_api(int,vsldcorrexec,(VSLCorrTaskPtr , double [], MKL_INT [], double [], MKL_INT [], double [], MKL_INT []));\n_VSL_API(int,VSLDCORREXEC,(VSLCorrTaskPtr , double [], MKL_INT [], double [], MKL_INT [], double [], MKL_INT []));\n\n_Vsl_Api(int,vslsCorrExec,(VSLCorrTaskPtr , float [], MKL_INT [], float [], MKL_INT [], float [], MKL_INT []));\n_vsl_api(int,vslscorrexec,(VSLCorrTaskPtr , float [], MKL_INT [], float [], MKL_INT [], float [], MKL_INT []));\n_VSL_API(int,VSLSCORREXEC,(VSLCorrTaskPtr , float [], MKL_INT [], float [], MKL_INT [], float [], MKL_INT []));\n\n\n_Vsl_Api(int,vsldConvExec1D,(VSLConvTaskPtr , double [], MKL_INT , double [], MKL_INT , double [], MKL_INT ));\n_vsl_api(int,vsldconvexec1d,(VSLConvTaskPtr , double [], MKL_INT* , double [], MKL_INT* , double [], MKL_INT* ));\n_VSL_API(int,VSLDCONVEXEC1D,(VSLConvTaskPtr , double [], MKL_INT* , double [], MKL_INT* , double [], MKL_INT* ));\n\n_Vsl_Api(int,vslsConvExec1D,(VSLConvTaskPtr , float [], MKL_INT , float [], MKL_INT , float [], MKL_INT ));\n_vsl_api(int,vslsconvexec1d,(VSLConvTaskPtr , float [], MKL_INT* , float [], MKL_INT* , float [], MKL_INT* ));\n_VSL_API(int,VSLSCONVEXEC1D,(VSLConvTaskPtr , float [], MKL_INT* , float [], MKL_INT* , float [], MKL_INT* ));\n\n_Vsl_Api(int,vsldCorrExec1D,(VSLCorrTaskPtr , double [], MKL_INT , double [], MKL_INT , double [], MKL_INT ));\n_vsl_api(int,vsldcorrexec1d,(VSLCorrTaskPtr , double [], MKL_INT* , double [], MKL_INT* , double [], MKL_INT* ));\n_VSL_API(int,VSLDCORREXEC1D,(VSLCorrTaskPtr , double [], MKL_INT* , double [], MKL_INT* , double [], MKL_INT* ));\n\n_Vsl_Api(int,vslsCorrExec1D,(VSLCorrTaskPtr , float [], MKL_INT , float [], MKL_INT , float [], MKL_INT ));\n_vsl_api(int,vslscorrexec1d,(VSLCorrTaskPtr , float [], MKL_INT* , float [], MKL_INT* , float [], MKL_INT* ));\n_VSL_API(int,VSLSCORREXEC1D,(VSLCorrTaskPtr , float [], MKL_INT* , float [], MKL_INT* , float [], MKL_INT* ));\n\n\n_Vsl_Api(int,vsldConvExecX,(VSLConvTaskPtr , double [], MKL_INT [], double [], MKL_INT []));\n_vsl_api(int,vsldconvexecx,(VSLConvTaskPtr , double [], MKL_INT [], double [], MKL_INT []));\n_VSL_API(int,VSLDCONVEXECX,(VSLConvTaskPtr , double [], MKL_INT [], double [], MKL_INT []));\n\n_Vsl_Api(int,vslsConvExecX,(VSLConvTaskPtr , float [], MKL_INT [], float [], MKL_INT []));\n_vsl_api(int,vslsconvexecx,(VSLConvTaskPtr , float [], MKL_INT [], float [], MKL_INT []));\n_VSL_API(int,VSLSCONVEXECX,(VSLConvTaskPtr , float [], MKL_INT [], float [], MKL_INT []));\n\n_Vsl_Api(int,vsldCorrExecX,(VSLCorrTaskPtr , double [], MKL_INT [], double [], MKL_INT []));\n_vsl_api(int,vsldcorrexecx,(VSLCorrTaskPtr , double [], MKL_INT [], double [], MKL_INT []));\n_VSL_API(int,VSLDCORREXECX,(VSLCorrTaskPtr , double [], MKL_INT [], double [], MKL_INT []));\n\n_Vsl_Api(int,vslsCorrExecX,(VSLCorrTaskPtr , float [], MKL_INT [], float [], MKL_INT []));\n_vsl_api(int,vslscorrexecx,(VSLCorrTaskPtr , float [], MKL_INT [], float [], MKL_INT []));\n_VSL_API(int,VSLSCORREXECX,(VSLCorrTaskPtr , float [], MKL_INT [], float [], MKL_INT []));\n\n\n_Vsl_Api(int,vsldConvExecX1D,(VSLConvTaskPtr , double [], MKL_INT , double [], MKL_INT ));\n_vsl_api(int,vsldconvexecx1d,(VSLConvTaskPtr , double [], MKL_INT* , double [], MKL_INT* ));\n_VSL_API(int,VSLDCONVEXECX1D,(VSLConvTaskPtr , double [], MKL_INT* , double [], MKL_INT* ));\n\n_Vsl_Api(int,vslsConvExecX1D,(VSLConvTaskPtr , float [], MKL_INT , float [], MKL_INT ));\n_vsl_api(int,vslsconvexecx1d,(VSLConvTaskPtr , float [], MKL_INT* , float [], MKL_INT* ));\n_VSL_API(int,VSLSCONVEXECX1D,(VSLConvTaskPtr , float [], MKL_INT* , float [], MKL_INT* ));\n\n_Vsl_Api(int,vsldCorrExecX1D,(VSLCorrTaskPtr , double [], MKL_INT , double [], MKL_INT ));\n_vsl_api(int,vsldcorrexecx1d,(VSLCorrTaskPtr , double [], MKL_INT* , double [], MKL_INT* ));\n_VSL_API(int,VSLDCORREXECX1D,(VSLCorrTaskPtr , double [], MKL_INT* , double [], MKL_INT* ));\n\n_Vsl_Api(int,vslsCorrExecX1D,(VSLCorrTaskPtr , float [], MKL_INT , float [], MKL_INT ));\n_vsl_api(int,vslscorrexecx1d,(VSLCorrTaskPtr , float [], MKL_INT* , float [], MKL_INT* ));\n_VSL_API(int,VSLSCORREXECX1D,(VSLCorrTaskPtr , float [], MKL_INT* , float [], MKL_INT* ));\n\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n\n#endif /* __MKL_VSL_FUNCTIONS_H__ */\n" }, { "alpha_fraction": 0.6896744966506958, "alphanum_fraction": 0.6997755169868469, "avg_line_length": 33.269229888916016, "blob_id": "a74c46422f736a9b35851aea48ad7f930577765d", "content_id": "914d121d00db3785de767619f25bb8fcdee93d4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1782, "license_type": "no_license", "max_line_length": 108, "num_lines": 52, "path": "/src/cuda/level2/neuralnet/include/layer_full.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#include \"layer_full.h\"\n\nLayerFull::LayerFull() {\n dims_[2] = 1;\n dims_[3] = 1;\n filters_.dims(2) = 1;\n filters_.dims(3) = 1;\n}\n\nvoid LayerFull::Init(const mxArray *mx_layer, const Layer *prev_layer) {\n mexAssertMsg(mexIsField(mx_layer, \"channels\"), \"The 'full' type layer must contain the 'channels' field\");\n filters_.dims(1) = prev_layer->length();\n}\n\nvoid LayerFull::TransformForward(Layer *prev_layer, PassNum passnum) {\n Prod(prev_layer->activ_mat_, false, filters_.get(), true, activ_mat_);\n activ_mat_.Validate();\n}\n\nvoid LayerFull::TransformBackward(Layer *prev_layer) {\n Prod(deriv_mat_, false, filters_.get(), false, prev_layer->deriv_mat_);\n prev_layer->deriv_mat_.Validate();\n}\n\nvoid LayerFull::WeightGrads(Layer *prev_layer, GradInd gradind) {\n if (gradind == GradInd::First) {\n Prod(deriv_mat_, true, prev_layer->activ_mat_, false, filters_.der());\n filters_.der() *= (lr_coef_ / dims_[0]);\n } else if (gradind == GradInd::Second) {\n Prod(deriv_mat_, true, prev_layer->activ_mat_, false, filters_.der2());\n filters_.der2() *= (lr_coef_ / dims_[0]);\n }\n}\n" }, { "alpha_fraction": 0.6529850959777832, "alphanum_fraction": 0.6828358173370361, "avg_line_length": 17.14285659790039, "blob_id": "f79880c79d4fe6db8ce3f6cbf850b00c8a622c04", "content_id": "b9ecb0d82eb551c2089276b23238d60d675719d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 268, "license_type": "no_license", "max_line_length": 33, "num_lines": 14, "path": "/src/cuda/level2/nw/needle.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "#ifndef _NEEDLE_H_\r\n#define _NEEDLE_H_\r\n\r\n#ifdef RD_WG_SIZE_0_0\r\n#define BLOCK_SIZE RD_WG_SIZE_0_0\r\n#elif defined(RD_WG_SIZE_0)\r\n#define BLOCK_SIZE RD_WG_SIZE_0\r\n#elif defined(RD_WG_SIZE)\r\n#define BLOCK_SIZE RD_WG_SIZE\r\n#else\r\n#define BLOCK_SIZE 16\r\n#endif\r\n\r\n#endif\r\n" }, { "alpha_fraction": 0.7161458134651184, "alphanum_fraction": 0.7204861044883728, "avg_line_length": 20.735849380493164, "blob_id": "c744c19d6e8503fd68398172924d6685cbb41a06", "content_id": "2eb1dcd9072601c29a43196702d1776de77c92b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1152, "license_type": "no_license", "max_line_length": 69, "num_lines": 53, "path": "/src/cuda/level2/neuralnet/include/params.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#ifndef _PARAMS_H_\n#define _PARAMS_H_\n\n#include \"mex_util.h\"\n\nclass Params {\n\npublic:\n size_t batchsize_;\n size_t epochs_;\n size_t test_epochs_;\n ftype alpha_;\n ftype shift_;\n ftype testshift_;\n ftype beta_;\n ftype decay_;\n ftype momentum_;\n bool shuffle_;\n std::string lossfun_;\n int normfun_;\n int verbose_;\n int seed_;\n bool fast_;\n size_t memory_;\n int gpu_;\n MatCPU classcoefs_;\n\n Params();\n\n void Init(const mxArray *params);\n\n};\n\n#endif\n" }, { "alpha_fraction": 0.5180073976516724, "alphanum_fraction": 0.5348367691040039, "avg_line_length": 39.14864730834961, "blob_id": "19456772a925dc2874dc58af92a0dfab75135741", "content_id": "8e02cf3afee72cf15d11c9ed6dbac9a30d60076b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2971, "license_type": "no_license", "max_line_length": 544, "num_lines": 74, "path": "/analysis/zemaitis_1/process_metrics.py", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nbenchmarks = ['devicememory', 'maxflops', 'bfs', 'gemm', 'sort', 'pathfinder', 'cfd', 'dwt2d', 'kmeans', 'lavamd', 'mandelbrot', 'nw', 'particlefilter_float', 'particlefilter_naive', 'srad', 'where']\nmetrics = ['flop_count_dp','flop_count_sp','inst_fp_32','inst_fp_64','inst_integer','inst_bit_convert','inst_control','inst_compute_ld_st','inst_misc','inst_inter_thread_communication', 'sm_efficiency','achieved_occupancy','ipc','branch_efficiency','warp_execution_efficiency','shared_store_transactions','shared_load_transactions','local_load_transactions','local_store_transactions','gld_transactions','gst_transactions','dram_read_transactions','dram_write_transactions','flop_count_sp_special','inst_executed','cf_executed','ldst_executed']\n\nkernel_delim = 'Kernel: '\ncol_1 = (0,0+len('Invocations'))\ncol_2 = (col_1[1], col_1[1]+len(' Metric Name'))\ncol_3 = (col_2[1], col_2[1]+len(' Metric Description'))\ncol_4 = (col_3[1], col_3[1]+len(' Min'))\ncol_5 = (col_4[1], col_4[1]+len(' Max'))\ncol_6 = (col_5[1], col_5[1]+len(' Avg'))\n\ndef is_kernel(line):\n return kernel_delim in line\n\ndef parse_kernel(line):\n return line.split(kernel_delim)[1].split('(')[0].strip()\n\ndef parse_val(val):\n try:\n return float(val[:-1])\n except:\n return int(val[:-1])\n\nres = pd.DataFrame()\nres['metric'] = metrics\nres.set_index('metric', inplace=True)\n\nfor benchmark in benchmarks:\n for size in range(1,5):\n name = '%s_%s' % (benchmark, size)\n print(name)\n res[name] = 'n/a'\n # Open file\n try:\n if 'particlefilter' in name:\n f = open('particlefilter/%s/%d' % (benchmark.split('_')[1], size))\n else:\n f = open('%s/%d' % (benchmark,size))\n except:\n print('cant open %s %d' % (benchmark,size))\n continue\n # Read intro lines\n [f.readline() for i in range(0,7)]\n # Start parsing\n kernel = ''\n for line in f:\n # Parse kernel\n if is_kernel(line):\n kernel = parse_kernel(line)\n continue\n if not any([metric in line for metric in metrics]):\n print('\\t' + line)\n continue\n # Parse metric\n metric = line[col_2[0]:col_2[1]].strip()\n val = line[col_6[0]:col_6[1]].strip()\n # Parse metric value\n if not val.isdigit():\n val = parse_val(val)\n else:\n val = int(val)\n #print('\\t' + name)\n #print('%s, %s, %s'%(kernel,metric,str(type(val))), end='\\t')\n #print(val)\n if res.at[metric, name] == 'n/a':\n res.at[metric, name] = val\n elif res.at[metric, name] < val:\n res.at[metric, name] = val\n\nres.to_csv(open('analysis.csv', 'w'))\n\nprint('Done.')\n" }, { "alpha_fraction": 0.6390255093574524, "alphanum_fraction": 0.6515422463417053, "avg_line_length": 35.6721305847168, "blob_id": "03cb1129bf7459082f661879da6e74e71f55bb95", "content_id": "a5eb9826ef5f1ef24ed2d45aff410a86b3043ae0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4474, "license_type": "no_license", "max_line_length": 86, "num_lines": 122, "path": "/src/cuda/level2/neuralnet/include/params.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#include \"params.h\"\n\nParams::Params() {\n batchsize_ = 32;\n epochs_ = 1;\n test_epochs_ = 1;\n alpha_ = 1.0;\n shift_ = 0;\n testshift_ = 0;\n beta_ = 0;\n momentum_ = 0;\n decay_ = 0;\n shuffle_ = false;\n lossfun_ = \"logreg\";\n normfun_ = 1;\n verbose_ = 0;\n seed_ = 0;\n fast_ = true;\n memory_ = 512; // Megabytes of additional memory for CUDNN operations\n gpu_ = 0;\n}\n\nvoid Params::Init(const mxArray *mx_params) {\n mexAssertMsg(mexIsStruct(mx_params), \"In 'Params::Init' the array in not a struct\");\n\n if (mexIsField(mx_params, \"batchsize\")) {\n batchsize_ = (size_t) mexGetScalar(mexGetField(mx_params, \"batchsize\"));\n mexAssertMsg(batchsize_ > 0, \"Batchsize must be positive\");\n }\n if (mexIsField(mx_params, \"epochs\")) {\n epochs_ = (size_t) mexGetScalar(mexGetField(mx_params, \"epochs\"));\n mexAssertMsg(epochs_ > 0, \"Epochs number must be positive\");\n }\n if (mexIsField(mx_params, \"testepochs\")) {\n test_epochs_ = (size_t) mexGetScalar(mexGetField(mx_params, \"testepochs\"));\n mexAssertMsg(test_epochs_ > 0, \"Epochs-test number must be positive\");\n }\n if (mexIsField(mx_params, \"alpha\")) {\n alpha_ = mexGetScalar(mexGetField(mx_params, \"alpha\"));\n mexAssertMsg(alpha_ >= 0, \"alpha must be nonnegative\");\n }\n if (mexIsField(mx_params, \"shift\")) {\n shift_ = mexGetScalar(mexGetField(mx_params, \"shift\"));\n mexAssertMsg(shift_ >= 0, \"Shift must be nonnegative\");\n }\n if (mexIsField(mx_params, \"testshift\")) {\n testshift_ = (mexGetScalar(mexGetField(mx_params, \"testshift\")));\n mexAssertMsg(testshift_ >= 0, \"Testshift must be nonnegative\");\n }\n if (mexIsField(mx_params, \"beta\")) {\n beta_ = mexGetScalar(mexGetField(mx_params, \"beta\"));\n mexAssertMsg(beta_ >= 0, \"beta must be nonnegative\");\n mexAssertMsg(shift_ * beta_ == 0, \"Both Shift and Beta cannot be positive\");\n }\n if (mexIsField(mx_params, \"momentum\")) {\n momentum_ = mexGetScalar(mexGetField(mx_params, \"momentum\"));\n mexAssertMsg(0 <= momentum_ && momentum_ < 1, \"Momentum is out of range [0, 1)\");\n }\n if (mexIsField(mx_params, \"decay\")) {\n decay_ = mexGetScalar(mexGetField(mx_params, \"decay\"));\n mexAssertMsg(0 <= decay_ && decay_ < 1, \"Decay is out of range [0, 1)\");\n }\n if (mexIsField(mx_params, \"shuffle\")) {\n shuffle_ = (mexGetScalar(mexGetField(mx_params, \"shuffle\")) > 0);\n }\n if (mexIsField(mx_params, \"lossfun\")) {\n lossfun_ = mexGetString(mexGetField(mx_params, \"lossfun\"));\n mexAssertMsg(lossfun_ == \"logreg\" || lossfun_ == \"L-norm\",\n \"Unknown loss function in params\");\n }\n if (mexIsField(mx_params, \"normfun\")) {\n normfun_ = (int) mexGetScalar(mexGetField(mx_params, \"normfun\"));\n mexAssertMsg(normfun_ == 1 || normfun_ == 2,\n \"Normfun might be equal to 1 or 2\");\n }\n if (mexIsField(mx_params, \"verbose\")) {\n verbose_ = (int) mexGetScalar(mexGetField(mx_params, \"verbose\"));\n mexAssertMsg(0 <= verbose_ && verbose_ <= 5,\n \"Verbose must be from 0 to 4\");\n }\n if (mexIsField(mx_params, \"seed\")) {\n seed_ = (int) mexGetScalar(mexGetField(mx_params, \"seed\"));\n }\n if (mexIsField(mx_params, \"fast\")) {\n fast_ = (mexGetScalar(mexGetField(mx_params, \"fast\")) > 0);\n // I don't know why the below is needed\n /* if (!fast_ && shift_ > 0) { // beta = 0\n beta_ = 1;\n } */\n }\n if (mexIsField(mx_params, \"memory\")) {\n memory_ = (size_t) mexGetScalar(mexGetField(mx_params, \"memory\"));\n }\n if (mexIsField(mx_params, \"gpu\")) {\n gpu_ = (int) mexGetScalar(mexGetField(mx_params, \"gpu\"));\n mexAssertMsg(0 <= gpu_ , \"GPU index should be non-negative\");\n }\n if (mexIsField(mx_params, \"classcoefs\")) {\n mexGetMatrix(mexGetField(mx_params, \"classcoefs\"), classcoefs_);\n mexAssertMsg(classcoefs_.size2() == 1,\n \"Classcoefs should be an 1xN vector\");\n }\n}\n" }, { "alpha_fraction": 0.48863810300827026, "alphanum_fraction": 0.5086167454719543, "avg_line_length": 36.255680084228516, "blob_id": "659b97e17831a3c38e4d1321faeb4d0479ef8839", "content_id": "8130db56ba9a1b79691eb8358741b834404523df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6557, "license_type": "no_license", "max_line_length": 173, "num_lines": 176, "path": "/src/cuda/level2/lavamd/lavaMD.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "//\t14 APR 2011 Lukasz G. Szafaryn\n\n#include <stdio.h>\t\t\t\t\t// (in path known to compiler)\t\t\tneeded by printf\n#include <stdlib.h>\t\t\t\t\t// (in path known to compiler)\t\t\tneeded by malloc\n#include <stdbool.h>\t\t\t\t// (in path known to compiler)\t\t\tneeded by true/false\n#include \"./util/num/num.h\"\t\t\t\t// (in path specified here)\n#include \"./lavaMD.h\"\t\t\t\t\t\t// (in the current directory)\n#include \"./kernel/kernel_gpu_cuda_wrapper.h\"\t// (in library path specified here)\n\n\nvoid addBenchmarkSpecOptions(OptionParser &op) {\n op.addOption(\"boxes1d\", OPT_INT, \"0\",\n \"specify number of boxes in single dimension, total box number is that^3\");\n}\n\nvoid RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {\n printf(\"Running LavaMD\\n\");\n\n bool quiet = op.getOptionBool(\"quiet\");\n\n // get boxes1d arg value\n int boxes1d = op.getOptionInt(\"boxes1d\");\n if(boxes1d == 0) {\n int probSizes[4] = {1, 8, 64, 64};\n boxes1d = probSizes[op.getOptionInt(\"size\") - 1];\n }\n\n if(!quiet) {\n printf(\"Thread block size of kernel = %d \\n\", NUMBER_THREADS);\n printf(\"Configuration used: boxes1d = %d\\n\", boxes1d);\n }\n\n int passes = op.getOptionInt(\"passes\");\n for(int i = 0; i < passes; i++) {\n if(!quiet) { printf(\"Pass %d: \", i); }\n runTest(resultDB, op, boxes1d);\n if(!quiet) { printf(\"Done.\\n\"); }\n }\n}\n\nvoid runTest(ResultDatabase &resultDB, OptionParser &op, int boxes1d) {\n // random generator seed set to random value - time in this case\n srand(SEED);\n\n // counters\n int i, j, k, l, m, n;\n\n // system memory\n par_str par_cpu;\n dim_str dim_cpu;\n box_str* box_cpu;\n FOUR_VECTOR* rv_cpu;\n fp* qv_cpu;\n FOUR_VECTOR* fv_cpu;\n int nh;\n\n dim_cpu.boxes1d_arg = boxes1d;\n par_cpu.alpha = 0.5;\n\n // total number of boxes\n dim_cpu.number_boxes = dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg;\n\n // how many particles space has in each direction\n dim_cpu.space_elem = dim_cpu.number_boxes * NUMBER_PAR_PER_BOX;\n dim_cpu.space_mem = dim_cpu.space_elem * sizeof(FOUR_VECTOR);\n dim_cpu.space_mem2 = dim_cpu.space_elem * sizeof(fp);\n\n // box array\n dim_cpu.box_mem = dim_cpu.number_boxes * sizeof(box_str);\n\n // allocate boxes\n box_cpu = (box_str*)malloc(dim_cpu.box_mem);\n\n // initialize number of home boxes\n nh = 0;\n\n // home boxes in z direction\n for(i=0; i<dim_cpu.boxes1d_arg; i++){\n // home boxes in y direction\n for(j=0; j<dim_cpu.boxes1d_arg; j++){\n // home boxes in x direction\n for(k=0; k<dim_cpu.boxes1d_arg; k++){\n\n // current home box\n box_cpu[nh].x = k;\n box_cpu[nh].y = j;\n box_cpu[nh].z = i;\n box_cpu[nh].number = nh;\n box_cpu[nh].offset = nh * NUMBER_PAR_PER_BOX;\n\n // initialize number of neighbor boxes\n box_cpu[nh].nn = 0;\n\n // neighbor boxes in z direction\n for(l=-1; l<2; l++){\n // neighbor boxes in y direction\n for(m=-1; m<2; m++){\n // neighbor boxes in x direction\n for(n=-1; n<2; n++){\n\n // check if (this neighbor exists) and (it is not the same as home box)\n if(\t\t(((i+l)>=0 && (j+m)>=0 && (k+n)>=0)==true && ((i+l)<dim_cpu.boxes1d_arg && (j+m)<dim_cpu.boxes1d_arg && (k+n)<dim_cpu.boxes1d_arg)==true)\t&&\n (l==0 && m==0 && n==0)==false\t){\n\n // current neighbor box\n box_cpu[nh].nei[box_cpu[nh].nn].x = (k+n);\n box_cpu[nh].nei[box_cpu[nh].nn].y = (j+m);\n box_cpu[nh].nei[box_cpu[nh].nn].z = (i+l);\n box_cpu[nh].nei[box_cpu[nh].nn].number =\t(box_cpu[nh].nei[box_cpu[nh].nn].z * dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg) + \n (box_cpu[nh].nei[box_cpu[nh].nn].y * dim_cpu.boxes1d_arg) + \n box_cpu[nh].nei[box_cpu[nh].nn].x;\n box_cpu[nh].nei[box_cpu[nh].nn].offset = box_cpu[nh].nei[box_cpu[nh].nn].number * NUMBER_PAR_PER_BOX;\n\n // increment neighbor box\n box_cpu[nh].nn = box_cpu[nh].nn + 1;\n\n }\n\n } // neighbor boxes in x direction\n } // neighbor boxes in y direction\n } // neighbor boxes in z direction\n\n // increment home box\n nh = nh + 1;\n\n } // home boxes in x direction\n } // home boxes in y direction\n } // home boxes in z direction\n\n // input (distances)\n rv_cpu = (FOUR_VECTOR*)malloc(dim_cpu.space_mem);\n for(i=0; i<dim_cpu.space_elem; i=i+1){\n rv_cpu[i].v = (rand()%10 + 1) / 10.0;\t\t\t// get a number in the range 0.1 - 1.0\n rv_cpu[i].x = (rand()%10 + 1) / 10.0;\t\t\t// get a number in the range 0.1 - 1.0\n rv_cpu[i].y = (rand()%10 + 1) / 10.0;\t\t\t// get a number in the range 0.1 - 1.0\n rv_cpu[i].z = (rand()%10 + 1) / 10.0;\t\t\t// get a number in the range 0.1 - 1.0\n }\n\n // input (charge)\n qv_cpu = (fp*)malloc(dim_cpu.space_mem2);\n for(i=0; i<dim_cpu.space_elem; i=i+1){\n qv_cpu[i] = (rand()%10 + 1) / 10.0;\t\t\t// get a number in the range 0.1 - 1.0\n }\n\n // output (forces)\n fv_cpu = (FOUR_VECTOR*)malloc(dim_cpu.space_mem);\n for(i=0; i<dim_cpu.space_elem; i=i+1){\n fv_cpu[i].v = 0;\t\t\t\t\t\t\t\t// set to 0, because kernels keeps adding to initial value\n fv_cpu[i].x = 0;\t\t\t\t\t\t\t\t// set to 0, because kernels keeps adding to initial value\n fv_cpu[i].y = 0;\t\t\t\t\t\t\t\t// set to 0, because kernels keeps adding to initial value\n fv_cpu[i].z = 0;\t\t\t\t\t\t\t\t// set to 0, because kernels keeps adding to initial value\n }\n\n kernel_gpu_cuda_wrapper(par_cpu,\n dim_cpu,\n box_cpu,\n rv_cpu,\n qv_cpu,\n fv_cpu,\n resultDB);\n\n string outfile = op.getOptionString(\"outputFile\");\n if (outfile != \"\") {\n FILE *fptr;\n fptr = fopen(\"result.txt\", \"w\");\t\n for(i=0; i<dim_cpu.space_elem; i=i+1){\n fprintf(fptr, \"%f, %f, %f, %f\\n\", fv_cpu[i].v, fv_cpu[i].x, fv_cpu[i].y, fv_cpu[i].z);\n }\n fclose(fptr);\n }\n\n free(rv_cpu);\n free(qv_cpu);\n free(fv_cpu);\n free(box_cpu);\n}\n" }, { "alpha_fraction": 0.5734801292419434, "alphanum_fraction": 0.5849688649177551, "avg_line_length": 31.640625, "blob_id": "b77a785bd99df30f7408ac54f1d5cfcc79086c7f", "content_id": "3abc3c565aaeae5a9594b97f389a054e82393446", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2089, "license_type": "no_license", "max_line_length": 103, "num_lines": 64, "path": "/data/bfs/datagen.py", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom optparse import OptionParser\nimport numpy as np\nimport random\nimport sys\n\nMIN_NODES = 20\nMAX_NODES = 2**31\nMIN_EDGES = 2\nMAX_EDGES = 8\nMIN_WEIGHT = 1\nMAX_WEIGHT = 10\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option('-n', '--nodes', type=\"int\", default=100000, help='Number of nodes in the graph')\n (options, args) = parser.parse_args()\n\n # check options\n if options.nodes < MIN_NODES:\n print(\"Error: Number of nodes must be greater than 20.\")\n sys.exit()\n if options.nodes > MAX_NODES:\n print(\"Error: Number of nodes must be less than 2^31.\")\n sys.exit()\n\n print(\"Generating graph with %d nodes\" % options.nodes)\n\n # seed random number generator\n random.seed(datetime.now())\n edge_map = {}\n cost_map = {}\n\n # for each node, generate number of edges\n for i in range(options.nodes):\n edge_map[i] = []\n cost_map[i] = []\n num_edges = random.randint(MIN_EDGES, MAX_EDGES)\n # for each edge, generate destination and cost\n for j in range(num_edges):\n dest = np.random.uniform(0, options.nodes)\n cost = random.randint(MIN_WEIGHT, MAX_WEIGHT)\n edge_map[i].append(dest)\n cost_map[i].append(cost)\n\n total_edges = 0\n # write graph to file\n with open(\"bfs_%d\" % options.nodes, 'w') as f:\n # write total number of nodes\n f.write(\"%d\\n\" % options.nodes)\n # for each node, write number of edges\n for i in range(options.nodes):\n curr_edges = len(edge_map[i])\n f.write(\"%d %d\\n\" % (total_edges, curr_edges))\n total_edges += curr_edges\n source = np.random.uniform(0, options.nodes)\n # write source node\n f.write(\"%d\\n\" % source)\n # write total number of edges\n f.write(\"%d\\n\" % total_edges)\n # for each edge, write destination and cost\n for i in range(options.nodes):\n for j in range(len(edge_map[i])):\n f.write(\"%d %d\\n\" % (edge_map[i][j], cost_map[i][j]))\n" }, { "alpha_fraction": 0.4588833451271057, "alphanum_fraction": 0.4653368592262268, "avg_line_length": 43.0818977355957, "blob_id": "dccf6fd20ba750dcef7649644b59e19da132875b", "content_id": "8bea6baa14139e9fb4917e16fed47f609b42cd8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10227, "license_type": "no_license", "max_line_length": 119, "num_lines": 232, "path": "/src/cuda/level2/kmeans/kmeans.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*****************************************************************************/\n/*IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. */\n/*By downloading, copying, installing or using the software you agree */\n/*to this license. If you do not agree to this license, do not download, */\n/*install, copy or use the software. */\n/* */\n/* */\n/*Copyright (c) 2005 Northwestern University */\n/*All rights reserved. */\n\n/*Redistribution of the software in source and binary forms, */\n/*with or without modification, is permitted provided that the */\n/*following conditions are met: */\n/* */\n/*1 Redistributions of source code must retain the above copyright */\n/* notice, this list of conditions and the following disclaimer. */\n/* */\n/*2 Redistributions in binary form must reproduce the above copyright */\n/* notice, this list of conditions and the following disclaimer in the */\n/* documentation and/or other materials provided with the distribution.*/\n/* */\n/*3 Neither the name of Northwestern University nor the names of its */\n/* contributors may be used to endorse or promote products derived */\n/* from this software without specific prior written permission. */\n/* */\n/*THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS */\n/*IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */\n/*TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT AND */\n/*FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL */\n/*NORTHWESTERN UNIVERSITY OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, */\n/*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */\n/*(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR */\n/*SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) */\n/*HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, */\n/*STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */\n/*ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */\n/*POSSIBILITY OF SUCH DAMAGE. */\n/******************************************************************************/\n\n/*************************************************************************/\n/** File: example.c **/\n/** Description: Takes as input a file: **/\n/** ascii file: containing 1 data point per line **/\n/** binary file: first int is the number of objects **/\n/** 2nd int is the no. of features of each **/\n/** object **/\n/** This example performs a fuzzy c-means clustering **/\n/** on the data. Fuzzy clustering is performed using **/\n/** min to max clusters and the clustering that gets **/\n/** the best score according to a compactness and **/\n/** separation criterion are returned. **/\n/** Author: Wei-keng Liao **/\n/** ECE Department Northwestern University **/\n/** email: [email protected] **/\n/** **/\n/** Edited by: Jay Pisharath **/\n/** Northwestern University. **/\n/** **/\n/** ================================================================ **/\n/** **/\n/** Edited by: Shuai Che, David Tarjan, Sang-Ha Lee **/\n/**\t\t\t\t University of Virginia **/\n/** **/\n/** Description:\tNo longer supports fuzzy c-means clustering; **/\n/**\t\t\t\t\tonly regular k-means clustering. **/\n/**\t\t\t\t\tNo longer performs \"validity\" function to analyze\t**/\n/**\t\t\t\t\tcompactness and separation crietria; instead **/\n/**\t\t\t\t\tcalculate root mean squared error. **/\n/** **/\n/*************************************************************************/\n#define _CRT_SECURE_NO_DEPRECATE 1\n#define SEED 7\n\n#include <fcntl.h>\n#include <limits.h>\n#include <math.h>\n#include <omp.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\n#include \"OptionParser.h\"\n#include \"ResultDatabase.h\"\n#include \"kmeans.h\"\n\nextern double wtime(void);\n\n/*---< main() >-------------------------------------------------------------*/\nint setup(ResultDatabase &resultDB, OptionParser &op) {\n srand(SEED); /* seed for future random number generator */\n\n bool verbose = op.getOptionBool(\"verbose\");\n bool quiet = op.getOptionBool(\"quiet\");\n int nloops = op.getOptionInt(\"passes\");\n bool isRMSE = op.getOptionBool(\"rmse\");\n bool isOutput = op.getOptionBool(\"outputCenters\");\n\n float *buf;\n char line[1024];\n\n float threshold = op.getOptionFloat(\"threshold\");\n int max_nclusters = op.getOptionInt(\"maxClusters\");\n int min_nclusters = op.getOptionInt(\"minClusters\");\n int best_nclusters = 0;\n int nfeatures = 0;\n int npoints = 0;\n float len;\n float **features;\n float **cluster_centres = NULL;\n int i, j, index;\n float rmse;\n\n /* ============== I/O begin ==============*/\n // open file if filename is given\n FILE *fp = NULL;\n string infile = op.getOptionString(\"inputFile\");\n if(infile.size() > 0) {\n fp = fopen(infile.c_str(),\"r\");\n if(!fp)\n {\n printf(\"Error: Unable to read graph file %s.\\n\", infile.c_str());\n exit(0);\n }\n }\n\n // set npoints and nfeatures\n if(fp) {\n if(!quiet) {\n printf(\"Reading input file...\");\n }\n int n = fscanf(fp, \"%d %d\", &npoints, &nfeatures);\n } else {\n if(!quiet) {\n printf(\"Generating graph with a preset problem size %d\", (int)op.getOptionInt(\"size\"));\n }\n int npointsPresets[4] = {1, 10, 200, 200};\n npoints = npointsPresets[op.getOptionInt(\"size\") - 1] * 10000;\n int nfeaturesPresets[4] = {10, 20, 35, 50};\n nfeatures = nfeaturesPresets[op.getOptionInt(\"size\") - 1];\n }\n\n // allocate space for features[] and read attributes of all objects\n buf = (float *)malloc(npoints * nfeatures * sizeof(float));\n features = (float **)malloc(npoints * sizeof(float *));\n features[0] = (float *)malloc(npoints * nfeatures * sizeof(float));\n // starting index for each point\n for (i = 1; i < npoints; i++) {\n features[i] = features[i - 1] + nfeatures;\n }\n i = 0;\n int id;\n // read/generate features for each point\n for (int point = 0; point < npoints; point++) {\n if(fp) {\n int n = fscanf(fp, \"%d\", &id);\n }\n for (j = 0; j < nfeatures; j++) {\n if(fp) {\n int n = fscanf(fp, \"%f\", &buf[i++]);\n } else {\n buf[i++] = rand() % 256;\n }\n }\n }\n\n // close file\n if(fp) {\n fclose(fp);\n }\n\n if(!quiet) {\n printf(\"\\nNumber of objects: %d\\n\", npoints);\n printf(\"Number of features: %d\\n\", nfeatures);\n }\n /* ============== I/O end ==============*/\n\n // error check for clusters\n if (npoints < min_nclusters) {\n printf(\"Error: min_nclusters(%d) > npoints(%d) -- cannot proceed\\n\",\n min_nclusters, npoints);\n exit(0);\n }\n\n memcpy(features[0], buf,npoints * nfeatures *sizeof(float)); /* now features holds 2-dimensional array of features */\n free(buf);\n\n /* ======================= core of the clustering ===================*/\n\n cluster_centres = NULL;\n index = cluster(npoints, /* number of data points */\n nfeatures, /* number of features for each point */\n features, /* array: [npoints][nfeatures] */\n min_nclusters, /* range of min to max number of clusters */\n max_nclusters, threshold, /* loop termination factor */\n &best_nclusters, /* return: number between min and max */\n &cluster_centres, /* return: [best_nclusters][nfeatures] */\n &rmse, /* Root Mean Squared Error */\n isRMSE, /* calculate RMSE */\n nloops,\n resultDB,\n quiet); /* number of iteration for each number of clusters */\n\n /* =============== Command Line Output =============== */\n\n /* cluster center coordinates\n :displayed only for when k=1*/\n if(!quiet) {\n if ((min_nclusters == max_nclusters) && (isOutput == 1)) {\n printf(\"\\n================= Centroid Coordinates =================\\n\");\n for (i = 0; i < max_nclusters; i++) {\n printf(\"%d:\", i);\n for (j = 0; j < nfeatures; j++) {\n printf(\" %.2f\", cluster_centres[i][j]);\n }\n printf(\"\\n\\n\");\n }\n }\n\n if (min_nclusters != max_nclusters) {\n printf(\"Best number of clusters is %d\\n\", best_nclusters);\n }\n if (isRMSE) { // if calculated RMSE\n printf(\"Best Root Mean Squared Error: %.3f\\n\", rmse);\n }\n }\n\n /* free up memory */\n free(features[0]);\n free(features);\n return (0);\n}\n" }, { "alpha_fraction": 0.7580782175064087, "alphanum_fraction": 0.7627550959587097, "avg_line_length": 31.21917724609375, "blob_id": "68f4859c91afc769137ad5c5f13d7b2bbaa5bfc6", "content_id": "5af87771de07597b9517e08f401f0190043cdc26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2352, "license_type": "no_license", "max_line_length": 75, "num_lines": 73, "path": "/src/cuda/level2/neuralnet/include/mex_util.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#ifndef _MEX_UTIL_H_\n#define _MEX_UTIL_H_\n\n#include \"mat_cpu.h\"\n#include <ctime>\n\n#if defined (_WIN32)\n #define NOMINMAX\n #include <windows.h>\n#elif defined (__linux__)\n #include <unistd.h>\n#endif\n\n#ifdef __cplusplus\n extern \"C\" bool utIsInterruptPending();\n#else\n extern bool utIsInterruptPending();\n#endif\n\nvoid StartTimer();\nvoid MeasureTime(std::string msg);\n\nbool mexIsStruct(const mxArray *mx_array);\nbool mexIsCell(const mxArray *mx_array);\nbool mexIsField(const mxArray *mx_array, const char *fieldname);\nbool mexIsString(const mxArray *mx_array);\n\nconst mxArray* mexGetCell(const mxArray *mx_array, size_t ind);\nconst mxArray* mexGetField(const mxArray *mx_array, const char *fieldname);\n\nsize_t mexGetDimensionsNum(const mxArray *mx_array);\nstd::vector<size_t> mexGetDimensions(const mxArray *mx_array);\nsize_t mexGetNumel(const mxArray *mx_array);\n\nstd::string mexGetString(const mxArray *mx_array);\n\nftype* mexGetPointer(const mxArray *mx_array);\nftype mexGetScalar(const mxArray *mx_array);\nstd::vector<ftype> mexGetVector(const mxArray *mx_array);\nvoid mexGetMatrix(const mxArray *mx_array, MatCPU &mat);\nDim mexGetTensor(const mxArray *mx_array, MatCPU &mat);\n\nmxArray* mexNewArray(const std::vector<size_t> &dimvect);\nmxArray* mexNewMatrix(size_t size1, size_t size2);\nmxArray* mexSetScalar(ftype scalar);\nmxArray* mexSetVector(const std::vector<ftype> &vect);\nmxArray* mexSetMatrix(const MatCPU &mat);\nmxArray* mexSetTensor(const MatCPU &mat, const Dim& dims);\nmxArray* mexSetCellMat(size_t size1, size_t size2);\nvoid mexSetCell(mxArray* mx_array, size_t ind, mxArray* mx_value);\n\nmxArray* mexDuplicateArray(const mxArray* mx_array);\n\n#endif\n" }, { "alpha_fraction": 0.6875957250595093, "alphanum_fraction": 0.6875957250595093, "avg_line_length": 23.148147583007812, "blob_id": "3597f53b9c645902d4c7e7752ff24ed523bf581a", "content_id": "8e44c44f6f989f18b0b47f1eb78ae13bf2b421da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 653, "license_type": "no_license", "max_line_length": 76, "num_lines": 27, "path": "/src/cuda/level2/srad/Makefile.am", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "include $(top_builddir)/config/config.mk\ninclude $(top_builddir)/config/targets.mk\n\nEXTRAFLAGS = -rdc=true -dc\n\n# How to find source files\nVPATH = $(srcdir):$(srcdir)/../../common:$(srcdir)/../../../common\n\nAM_LDFLAGS = $(CUDA_LDFLAGS)\nAM_CPPFLAGS = $(CUDA_INC)\n\n# What is the destination for programs built from this directory?\ncudadir = $(bindir)/CUDA\n\n# What programs should be installed to that destination?\ncuda_PROGRAMS = srad\n\nsrad_link.o: srad.o\n\t$(NVCC) ${CUDA_CPPFLAGS} $(CPPFLAGS) $(NVCXXFLAGS) -dlink $< -o srad_link.o\n\n# How to build those programs?\nsrad_SOURCES = \\\nmain.cpp\nsrad_LDADD = \\\nsrad.o \\\nsrad_link.o \\\n$(CUDA_LIBS) $(LIBS) -lm\n\n" }, { "alpha_fraction": 0.6031017303466797, "alphanum_fraction": 0.6136476397514343, "avg_line_length": 34.043479919433594, "blob_id": "87544e90dc1213703501fa3e0fc6c57685c49006", "content_id": "56787c387fd12f10f89d0ae6c504a56d108bbd27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8060, "license_type": "no_license", "max_line_length": 99, "num_lines": 230, "path": "/src/cuda/level2/neuralnet/include/layer.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#include \"layer.h\"\n\nLayer::Layer() {\n dims_ = {0, 0, 0, 0};\n function_ = \"relu\";\n add_bias_ = true;\n padding_ = {0, 0};\n stride_ = {1, 1};\n init_std_ = 0;\n lr_coef_ = 1.0;\n bias_coef_ = 1.0;\n dropout_ = 0;\n}\n\nvoid Layer::InitGeneral(const mxArray *mx_layer) {\n type_ = mexGetString(mexGetField(mx_layer, \"type\"));\n if (mexIsField(mx_layer, \"function\")) {\n function_ = mexGetString(mexGetField(mx_layer, \"function\"));\n mexAssertMsg(function_ == \"relu\" ||\n function_ == \"sigm\" ||\n function_ == \"soft\" ||\n function_ == \"none\",\n \"Unknown function code\");\n }\n if (mexIsField(mx_layer, \"add_bias\")) {\n // actual value if defined\n add_bias_ = (mexGetScalar(mexGetField(mx_layer, \"add_bias\")) > 0);\n }\n if (mexIsField(mx_layer, \"mapsize\")) {\n std::vector<ftype> mapsize = mexGetVector(mexGetField(mx_layer, \"mapsize\"));\n mexAssertMsg(mapsize.size() == 2, \"Input mapsize length must be 2\");\n for (size_t i = 0; i < 2; ++i) {\n mexAssertMsg(1 <= mapsize[i] && mapsize[i] < INT_MAX, \"Mapsize must be >= 1\");\n dims_[i+2] = (int) mapsize[i];\n }\n }\n if (mexIsField(mx_layer, \"channels\")) {\n ftype channels = mexGetScalar(mexGetField(mx_layer, \"channels\"));\n mexAssertMsg(1 <= channels && channels < INT_MAX, \"Channels num must be >= 1\");\n dims_[1] = (int) channels;\n filters_.dims(0) = dims_[1];\n if (add_bias_) { // using actual value here\n biases_.dims() = {1, dims_[1], 1, 1};\n }\n }\n if (mexIsField(mx_layer, \"filtersize\")) {\n std::vector<ftype> filtersize = mexGetVector(mexGetField(mx_layer, \"filtersize\"));\n mexAssertMsg(filtersize.size() == 2, \"Filtersize must contain 2 values\");\n for (size_t i = 0; i < 2; ++i) {\n mexAssertMsg(1 <= filtersize[i] && filtersize[i] < INT_MAX, \"Filtersize must be >= 1\");\n filters_.dims(i+2) = (int) filtersize[i];\n }\n }\n if (mexIsField(mx_layer, \"padding\")) {\n std::vector<ftype> padding = mexGetVector(mexGetField(mx_layer, \"padding\"));\n mexAssertMsg(padding.size() == 2, \"Padding vector must have 2 values\");\n for (size_t i = 0; i < 2; ++i) {\n mexAssertMsg(0 <= padding[i] && padding[i] < INT_MAX, \"Padding must be non-negative\");\n padding_[i] = (int) padding[i];\n }\n }\n if (mexIsField(mx_layer, \"stride\")) {\n std::vector<ftype> stride = mexGetVector(mexGetField(mx_layer, \"stride\"));\n mexAssertMsg(stride.size() == 2, \"Stride vector has the wrong length\");\n for (size_t i = 0; i < 2; ++i) {\n mexAssertMsg(1 <= stride[i] && stride[i] < INT_MAX, \"Stride must be >= 1\");\n stride_[i] = (int) stride[i];\n }\n }\n if (mexIsField(mx_layer, \"init_std\")) {\n init_std_ = mexGetScalar(mexGetField(mx_layer, \"init_std\"));\n mexAssertMsg(0 <= init_std_, \"init_std must be non-negative\");\n }\n if (mexIsField(mx_layer, \"bias_coef\")) {\n bias_coef_ = mexGetScalar(mexGetField(mx_layer, \"bias_coef\"));\n mexAssertMsg(0 <= bias_coef_, \"bias_coef must be non-negative\");\n }\n if (mexIsField(mx_layer, \"lr_coef\")) {\n lr_coef_ = mexGetScalar(mexGetField(mx_layer, \"lr_coef\"));\n mexAssertMsg(0 <= lr_coef_, \"lr_coef must be non-negative\");\n }\n if (mexIsField(mx_layer, \"dropout\")) {\n dropout_ = mexGetScalar(mexGetField(mx_layer, \"dropout\"));\n mexAssertMsg(0 <= dropout_ && dropout_ < 1, \"dropout must be in the range [0, 1)\");\n }\n}\n\nvoid Layer::ResizeActivMat(size_t batchsize, PassNum passnum) {\n dims_[0] = batchsize;\n if (passnum == PassNum::ForwardLinear) {\n // save activ_mat_ from the first pass for Nonlinear function\n Swap(activ_mat_, first_mat_);\n }\n activ_mat_.resize_tensor(dims_);\n}\n\nvoid Layer::ResizeDerivMat() {\n deriv_mat_.resize_tensor(dims_);\n}\n\nvoid Layer::Nonlinear(PassNum passnum) {\n if (function_ == \"relu\") {\n if (passnum == PassNum::ForwardTest || passnum == PassNum::Forward) { // test and train forward\n activ_mat_.CondAssign(activ_mat_, false, kEps, 0);\n } else if (passnum == PassNum::Backward) {\n deriv_mat_.CondAssign(activ_mat_, false, kEps, 0);\n } else if (passnum == PassNum::ForwardLinear) {\n activ_mat_.CondAssign(first_mat_, false, kEps, 0);\n } else if (passnum == PassNum::BackwardLinear) {\n deriv_mat_.CondAssign(first_mat_, false, kEps, 0);\n }\n } else if (function_ == \"soft\") {\n if (passnum == PassNum::ForwardTest || passnum == PassNum::Forward) { // test and train forward\n activ_mat_.SoftMax();\n } else if (passnum == PassNum::Backward) {\n deriv_mat_.SoftDer(activ_mat_);\n } else if (passnum == PassNum::ForwardLinear) { // third pass\n activ_mat_.SoftDer(first_mat_);\n } else if (passnum == PassNum::BackwardLinear) {\n deriv_mat_.SoftDer(first_mat_);\n }\n } else if (function_ == \"sigm\") {\n if (passnum == PassNum::ForwardTest || passnum == PassNum::Forward) { // test and train forward\n (activ_mat_.Sigmoid()).Validate();\n } else if (passnum == PassNum::Backward) {\n (deriv_mat_.SigmDer(first_mat_)).Validate();\n } else if (passnum == PassNum::ForwardLinear) { // third pass\n (activ_mat_.SigmDer(first_mat_)).Validate();\n } else if (passnum == PassNum::BackwardLinear) {\n (deriv_mat_.SigmDer(first_mat_)).Validate();\n }\n } else if (function_ == \"none\") {\n return;\n } else {\n mexAssertMsg(false, \"Unknown function name in Nonlinear\");\n }\n}\n\nvoid Layer::AddBias(PassNum passnum) {\n if (add_bias_ == false) return;\n if (passnum == PassNum::ForwardTest || passnum == PassNum::Forward) {\n activ_mat_.AddTensor(biases_.get());\n }\n}\n\nvoid Layer::BiasGrads(PassNum passnum, GradInd gradind) {\n if (add_bias_ == false) return;\n if (passnum == PassNum::Backward) {\n if (gradind == GradInd::First) {\n ConvolutionBackwardBias(deriv_mat_, biases_.der());\n biases_.der() *= (lr_coef_ * bias_coef_ / dims_[0]);\n } else if (gradind == GradInd::Second) {\n ConvolutionBackwardBias(deriv_mat_, biases_.der2());\n biases_.der2() *= (lr_coef_ * bias_coef_ / dims_[0]);\n }\n }\n}\n\nvoid Layer::DropoutForward(PassNum passnum) {\n if (dropout_ > 0) { // dropout\n if (passnum == PassNum::Forward) {\n dropmat_.resize(dims_[0], length());\n dropmat_.rand();\n dropmat_.CondAssign(dropmat_, false, dropout_, 0);\n dropmat_.CondAssign(dropmat_, true, 0, 1);\n activ_mat_ *= dropmat_;\n } else if (passnum == PassNum::ForwardLinear) {\n activ_mat_ *= dropmat_;\n } else if (passnum == PassNum::ForwardTest) {\n activ_mat_ *= (1 - dropout_);\n }\n }\n}\n\nvoid Layer::DropoutBackward() {\n if (dropout_ > 0) {\n deriv_mat_ *= dropmat_;\n }\n}\n\nvoid Layer::InitWeights(Weights &weights, size_t &offset, bool isgen) {\n filters_.AttachFilters(weights, offset);\n offset += filters_.Num();\n if (filters_.Num() > 0 && isgen) {\n if (init_std_ == 0) {\n size_t n_in = filters_.dims(1) * filters_.dims(2) * filters_.dims(3);\n init_std_ = std::sqrt((ftype) 2 / (ftype) n_in);\n }\n (filters_.get().rand() -= 0.5) *= init_std_;\n //filters_.get().randnorm() *= init_std_;\n }\n if (add_bias_) {\n biases_.AttachBiases(weights, offset);\n offset += biases_.Num();\n if (isgen) {\n biases_.get().assign(0);\n }\n }\n}\n\nvoid Layer::RestoreOrder() {\n filters_.RestoreOrder();\n}\n\nsize_t Layer::NumWeights() const {\n size_t num = filters_.Num();\n if (add_bias_) {\n num += biases_.Num();\n }\n return num;\n}\n" }, { "alpha_fraction": 0.49391305446624756, "alphanum_fraction": 0.7321739196777344, "avg_line_length": 22.4489803314209, "blob_id": "2939004d03f26695df20958dae68ee2a0eb5bc31", "content_id": "76f534c4a32c7242146c94464a5fdab71d2428d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1150, "license_type": "no_license", "max_line_length": 32, "num_lines": 49, "path": "/data/kmeans/gen_dataset.sh", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#./datagen 100\n#./datagen 300\n#./datagen 1000\n#./datagen 3000\n#./datagen 10000\n#./datagen 30000\n#./datagen 100000\n#./datagen 300000\n#./datagen 1000000\n#./datagen 3000000\n#./datagen 10000000\n#./datagen 100 -f\n#./datagen 300 -f\n#./datagen 1000 -f\n#./datagen 3000 -f\n#./datagen 10000 -f\n#./datagen 30000 -f\n#./datagen 100000 -f\n#./datagen 300000 -f\n#./datagen 1000000 -f\n#./datagen 3000000 -f\n#./datagen 10000000 -f\n\npython3 datagen.py -n 1024\npython3 datagen.py -n 2048\npython3 datagen.py -n 4096\npython3 datagen.py -n 8192\npython3 datagen.py -n 65536\npython3 datagen.py -n 131072\npython3 datagen.py -n 261444\npython3 datagen.py -n 524288\npython3 datagen.py -n 1048576\npython3 datagen.py -n 2097152\npython3 datagen.py -n 4194304\npython3 datagen.py -n 8388608\npython3 datagen.py -n 1024 -f\npython3 datagen.py -n 2048 -f\npython3 datagen.py -n 4096 -f\npython3 datagen.py -n 8192 -f\npython3 datagen.py -n 65536 -f\npython3 datagen.py -n 131072 -f\npython3 datagen.py -n 261444 -f\npython3 datagen.py -n 524288 -f\npython3 datagen.py -n 1048576 -f\npython3 datagen.py -n 2097152 -f\npython3 datagen.py -n 4194304 -f\npython3 datagen.py -n 8388608 -f\n\n" }, { "alpha_fraction": 0.7008599638938904, "alphanum_fraction": 0.7057739496231079, "avg_line_length": 26.593219757080078, "blob_id": "eb6ebf3143ba9bfe1d45303f2e2961851e18d00f", "content_id": "6143eafd9263a878266a0fd48116894df4ec607f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1628, "license_type": "no_license", "max_line_length": 69, "num_lines": 59, "path": "/src/cuda/level2/neuralnet/include/weights.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#ifndef _WEIGHTS_H_\n#define _WEIGHTS_H_\n\n#include \"mat_gpu.h\"\n#include \"params.h\"\n\nclass Weights {\n\nprivate:\n MatGPU weights_;\n MatGPU weights_der_;\n MatGPU weights_der2_;\n MatGPU weights_der_prev_;\n Dim dims_;\n\n void attach(Weights &w, size_t offset);\n\npublic:\n Weights();\n ~Weights() { Clear(); };\n void Init(const MatCPU &w);\n void AttachFilters(Weights &w, size_t offset);\n void AttachBiases(Weights &w, size_t offset);\n void RestoreOrder();\n size_t Num() const;\n void Update(const Params &params);\n void Clear();\n\n inline MatGPU& get() { return weights_; }\n inline const MatGPU& get() const { return weights_; }\n inline MatGPU& der() { return weights_der_; }\n inline MatGPU& der2() { return weights_der2_; }\n inline Dim& dims() { return dims_; }\n inline const Dim& dims() const { return dims_; }\n inline int& dims(int i) { return dims_[i]; }\n inline const int& dims(int i) const { return dims_[i]; }\n\n};\n\n#endif\n" }, { "alpha_fraction": 0.921875, "alphanum_fraction": 0.921875, "avg_line_length": 63, "blob_id": "9db2bfdafa083f48c6fe55640efa5b0b46cec065", "content_id": "637b36df5dda8267b8d212b69f232e19f89c38b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 64, "license_type": "no_license", "max_line_length": 63, "num_lines": 1, "path": "/src/cuda/level0/Makefile.am", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "SUBDIRS=busspeeddownload busspeedreadback devicememory maxflops\n" }, { "alpha_fraction": 0.7123397588729858, "alphanum_fraction": 0.723557710647583, "avg_line_length": 28.023256301879883, "blob_id": "afc4e04e6d4d6026bcc7b91fd3ce66eb315056a6", "content_id": "190f23d74284e41d39fca09e2d5befcc4f727b1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1248, "license_type": "no_license", "max_line_length": 78, "num_lines": 43, "path": "/src/cuda/level2/neuralnet/classify_mex.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#include \"net.h\"\n\n#define NARGIN 5\n#define IN_L pRhs[0] // layers\n#define IN_W pRhs[1] // weights\n#define IN_P pRhs[2] // params\n#define IN_X pRhs[3] // data\n#define IN_Y pRhs[4] // labels\n\n#define NARGOUT 1\n#define OUT_Y pLhs[0] // predictions\n\nint print = 0;\n\nvoid mexFunction(int nLhs, mxArray* pLhs[], int nRhs, const mxArray* pRhs[]) {\n\n mexAssertMsg(nRhs == NARGIN && nLhs == NARGOUT,\n \"Number of input and/or output arguments is not correct!\");\n\n Net net(IN_P);\n net.InitLayers(IN_L);\n net.InitWeights(IN_W);\n net.Classify(IN_X, IN_Y, OUT_Y);\n}\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.7572463750839233, "avg_line_length": 22.600000381469727, "blob_id": "562e7eca98774c17800a6d97fb70fc523003b888", "content_id": "966adc58b3826a9a655e5dbb300094d91db7bff5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 828, "license_type": "no_license", "max_line_length": 29, "num_lines": 35, "path": "/data/bfs/gen_dataset.sh", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#./graphgen 1024 1k\n#./graphgen 2048 2k\n#./graphgen 4096 4k\n#./graphgen 8192 8k\n#./graphgen 16384 16k\n#./graphgen 32768 32k\n#./graphgen 65536 64k\n#./graphgen 131072 128k\n#./graphgen 261444 256k\n#./graphgen 524288 512k\n#./graphgen 1048576 1M\n#./graphgen 2097152 2M\n#./graphgen 4194304 4M\n#./graphgen 8388608 8M\n#./graphgen 16777216 16M\n\npython3 datagen.py -n 128\npython3 datagen.py -n 256\npython3 datagen.py -n 512\npython3 datagen.py -n 1024\npython3 datagen.py -n 2048\npython3 datagen.py -n 4096\npython3 datagen.py -n 8192\npython3 datagen.py -n 16384\npython3 datagen.py -n 32768\npython3 datagen.py -n 65536\npython3 datagen.py -n 131072\npython3 datagen.py -n 261444\npython3 datagen.py -n 524288\npython3 datagen.py -n 1048576\npython3 datagen.py -n 2097152\npython3 datagen.py -n 4194304\npython3 datagen.py -n 8388608\n\n\n" }, { "alpha_fraction": 0.5997474789619446, "alphanum_fraction": 0.6148989796638489, "avg_line_length": 28.296297073364258, "blob_id": "1f1a89e78296950b8280a87d3e96b734c9816ad0", "content_id": "3d039ec352ef6bfacb793f9aab69c06cb7da4945", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 792, "license_type": "no_license", "max_line_length": 103, "num_lines": 27, "path": "/data/sort/datagen.py", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom optparse import OptionParser\nimport random\nimport sys\n\nMIN_VAL = 0\nMAX_VAL = 1024\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option('-s', '--size', type=\"int\", default=1024, help='Number of elements in the array')\n (options, args) = parser.parse_args()\n\n # check options\n if options.size <= 0:\n print(\"Error: Number of elements must be positive.\")\n sys.exit()\n\n print(\"Generating array with %d elements\" % options.size)\n\n random.seed(datetime.now())\n with open('sort_%d' % options.size, 'w') as f:\n # write header line\n f.write('%d\\n' % options.size)\n # number of floats\n for i in range(options.size):\n f.write('%0.4f\\n' % random.uniform(MIN_VAL, MAX_VAL))\n\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.703278660774231, "avg_line_length": 23.31999969482422, "blob_id": "452e41e9cccc60d4e341d69fde6b11c7395b5a14", "content_id": "dfad456ac0f7333dbd30cabaecf467de1bc6421a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 610, "license_type": "no_license", "max_line_length": 84, "num_lines": 25, "path": "/src/cuda/level2/neuralnet/Makefile.am", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "include $(top_builddir)/config/config.mk\ninclude $(top_builddir)/config/targets.mk\n\n# How to find source files\nVPATH = $(srcdir):$(srcdir)/../../common:$(srcdir)/../../../common:$(srcdir)/include\n\nAM_LDFLAGS = $(CUDA_LDFLAGS)\nAM_CPPFLAGS = $(CUDA_INC)\nEXTRAFLAGS = -std=c++11\n\n# What is the destination for programs built from this directory?\ncudadir = $(bindir)/CUDA\n\n# What programs should be installed to that destination?\ncuda_PROGRAMS = convolution\n\n# How to build those programs?\nconvolution_SOURCES = \\\nmain.cpp\n\nconvolution_LDADD = \\\nmat_gpu.o \\\ncuda_util.o \\\ncuda_print.o \\\n$(CUDA_LIBS) $(LIBS) -lm\n\n\n" }, { "alpha_fraction": 0.6677132248878479, "alphanum_fraction": 0.6703296899795532, "avg_line_length": 27.522388458251953, "blob_id": "3d58139ace46ea1498f39721ed7c90415f320df6", "content_id": "65db434be38594733b423563bb38372bf34068f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1911, "license_type": "no_license", "max_line_length": 87, "num_lines": 67, "path": "/src/cuda/level2/neuralnet/include/mex_print.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#ifndef _MEX_PRINT_H_\n#define _MEX_PRINT_H_\n\n#include <mex.h>\n#include <string>\n\nextern int print;\n\ninline void mexPrintMsg(std::string msg) {\n mexPrintf((msg + \"\\n\").c_str());\n mexEvalString(\"drawnow;\");\n}\n\ninline void mexPrintMsg(std::string msg, double x) {\n mexPrintf((msg + \": \" + std::to_string((long double) x) + \"\\n\").c_str());\n mexEvalString(\"drawnow;\");\n}\n\ninline void mexPrintMsg(std::string msg, std::string s) {\n mexPrintf((msg + \": \" + s + \"\\n\").c_str());\n mexEvalString(\"drawnow;\");\n}\n\ninline void mexPrintInt(std::string msg, size_t x) {\n mexPrintf((msg + \": \" + std::to_string((long long) x) + \"\\n\").c_str());\n mexEvalString(\"drawnow;\");\n}\n\ninline void _assertFunction(bool cond, std::string msg, const char *file, int line) {\n if (!(cond)) {\n if (!msg.empty()) {\n mexPrintf((msg + \"\\n\").c_str());\n }\n mexPrintf((std::string(file) + \": \" + std::to_string(line) + \"\\n\").c_str());\n mexEvalString(\"drawnow;\");\n mexErrMsgTxt(\"Assertion Failed!\");\n }\n}\n\n#ifndef mexAssert\n#define mexAssert(cond) { _assertFunction((cond), \"\", __FILE__, __LINE__); }\n#endif\n\n#ifndef mexAssertMsg\n#define mexAssertMsg(cond, msg) { _assertFunction((cond), (msg), __FILE__, __LINE__); }\n#endif\n\n#endif\n" }, { "alpha_fraction": 0.5778523683547974, "alphanum_fraction": 0.5929530262947083, "avg_line_length": 28.50494956970215, "blob_id": "3ff6e0c1dfb58e77c333b844560aa944ad6e77b6", "content_id": "d4bff7c7dcedd1df54c2458c9084090212fb7db6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2980, "license_type": "no_license", "max_line_length": 80, "num_lines": 101, "path": "/src/common/mkl_types.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*******************************************************************************\n! INTEL CONFIDENTIAL\n! Copyright(C) 1999-2008 Intel Corporation. All Rights Reserved.\n! The source code contained or described herein and all documents related to\n! the source code (\"Material\") are owned by Intel Corporation or its suppliers\n! or licensors. Title to the Material remains with Intel Corporation or its\n! suppliers and licensors. The Material contains trade secrets and proprietary\n! and confidential information of Intel or its suppliers and licensors. The\n! Material is protected by worldwide copyright and trade secret laws and\n! treaty provisions. No part of the Material may be used, copied, reproduced,\n! modified, published, uploaded, posted, transmitted, distributed or disclosed\n! in any way without Intel's prior express written permission.\n! No license under any patent, copyright, trade secret or other intellectual\n! property right is granted to or conferred upon you by disclosure or delivery\n! of the Materials, either expressly, by implication, inducement, estoppel or\n! otherwise. Any license under such intellectual property rights must be\n! express and approved by Intel in writing.\n!\n!*******************************************************************************\n! Content:\n! Intel(R) Math Kernel Library (MKL) types definition\n!******************************************************************************/\n\n#ifndef _MKL_TYPES_H_\n#define _MKL_TYPES_H_\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n/* Complex type (single precision). */\ntypedef\nstruct _MKL_Complex8 {\n float real;\n float imag;\n} MKL_Complex8;\n\n/* Complex type (double precision). */\ntypedef\nstruct _MKL_Complex16 {\n double real;\n double imag;\n} MKL_Complex16;\n\ntypedef\nstruct {\n int MajorVersion;\n int MinorVersion;\n int BuildNumber;\n char * ProductStatus;\n char * Build;\n char * Processor;\n char * Platform;\n} MKLVersion;\n\n#if (!defined(__INTEL_COMPILER)) & defined(_MSC_VER)\n #define MKL_INT64 __int64\n#else\n #define MKL_INT64 long long int\n#endif\n\n#ifdef MKL_ILP64\n #define MKL_INT MKL_INT64\n #define MKL_LONG MKL_INT64\n#else\n #define MKL_INT int\n #define MKL_LONG long int\n#endif\n\n/** CPU codes for int MKL_CPU_DETECT(void) **/\n#ifdef _IPF\n #define ITP 0\n#else\n\t#ifdef _EM64T\n\t #define NI 0\n\t #define CT 1\n\t #define MNI 2\n \t #define PNR 3\n\t#else\n \t#define DEF 0\n\t #define PIII 1\n\t #define P4 2\n\t #define P4P 3\n\t #define P4M 4\n\t#endif\n#endif\n/********************************************/\n\n/** MKL threading stuff **/\n/** MKL Domains codes **/\n#define MKL_ALL 0\n#define MKL_BLAS 1\n#define MKL_FFT 2\n#define MKL_VML 3\n/**************************/\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n\n#endif /* _MKL_TYPES_H_ */\n" }, { "alpha_fraction": 0.426718533039093, "alphanum_fraction": 0.722438395023346, "avg_line_length": 31.08333396911621, "blob_id": "3b3bebd151699038299cb0cbc3b8e618bb510b0a", "content_id": "67b08bfafd1800fffcc817641d391f0d667c2124", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 771, "license_type": "no_license", "max_line_length": 112, "num_lines": 24, "path": "/features.sh", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "#bfs\n#for i in 128 256 512 1024 2048 4096 8192 16384 32768 65536 131072 262144 524288 1048576 2097152 4194304 8388608\n#do\n #./src/cuda/level1/bfs/bfs -i data/bfs/input/bfs_$i -m features/bfs/$i\n#done\n\n#pathfinder\n#for i in 2 4 8 16 32 64 128 256 512 1024 2048\nfor i in 4096 8192 16384 32768 65536\ndo\n ./src/cuda/level1/pathfinder/pathfinder -s 1 -m features/pathfinder/$i --instances $i\ndone\n\n#srad\n#for i in 16 32 48 64 80 96 112 128 144 160 176 192 208 224 240 256\n#do\n #./src/cuda/level2/srad/srad --imageSize $i --speckleSize 8 --iterations 50 -m features/srad/$i\n#done\n\n#mandelbrot\n#for i in 32 64 128 256 512 1024 2048 4096 8192 16384 32768\n#do\n #./src/cuda/level2/mandelbrot/mandelbrot --imageSize $i --iterations 2048 -m features/mandelbrot/$i\n#done\n\n" }, { "alpha_fraction": 0.7345505356788635, "alphanum_fraction": 0.738061785697937, "avg_line_length": 26.921567916870117, "blob_id": "af9a1bee3b6944214acd106a491fc6f1d631faf4", "content_id": "04d11b89f02b6600b77eab650f6b602902924db8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1424, "license_type": "no_license", "max_line_length": 73, "num_lines": 51, "path": "/src/cuda/level2/neuralnet/include/layer_jitt.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#ifndef _LAYER_JITT_H_\n#define _LAYER_JITT_H_\n\n#include \"layer.h\"\n\nclass LayerJitt : public Layer {\n\npublic:\n LayerJitt();\n ~LayerJitt() {};\n void Init(const mxArray *mx_layer, const Layer *prev_layer);\n void TransformForward(Layer *prev_layer, PassNum passnum);\n void TransformBackward(Layer *prev_layer);\n void WeightGrads(Layer *prev_layer, GradInd gradind) {};\n\nprivate:\n // maximal values of distortion\n MatGPU shift_, scale_, mirror_;\n // default value to fill in if the new image is out of the original one\n ftype angle_, defval_;\n // actual values of distorion for all batch images\n MatGPU shift_mat_, scale_mat_, mirror_mat_, angle_mat_;\n\n MatGPU eigenvectors_;\n ftype noise_std_;\n bool randtest_;\n\n\n\n};\n\n#endif\n" }, { "alpha_fraction": 0.7372793555259705, "alphanum_fraction": 0.7445482611656189, "avg_line_length": 31.100000381469727, "blob_id": "baba6f3c9550af55769e6d9f897db4861782bcf0", "content_id": "da0609cc06c9bee9d3f675dc8ee91ae2edce23b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 963, "license_type": "no_license", "max_line_length": 83, "num_lines": 30, "path": "/src/cuda/level2/neuralnet/include/layer_input.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#include \"layer_input.h\"\n\nLayerInput::LayerInput() {\n function_ = \"none\";\n add_bias_ = false;\n dims_[1] = 1;\n}\n\nvoid LayerInput::Init(const mxArray *mx_layer, const Layer *prev_layer) {\n mexAssertMsg(prev_layer == NULL, \"The 'input' type layer must be the first one\");\n}\n" }, { "alpha_fraction": 0.6629819273948669, "alphanum_fraction": 0.6708007454872131, "avg_line_length": 29.15447235107422, "blob_id": "017d2e6107649d69f36b9b5462880747ececbac7", "content_id": "1bb2f539476432de3b481b0c2b66784cf558b79d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3709, "license_type": "no_license", "max_line_length": 91, "num_lines": 123, "path": "/src/cuda/level2/neuralnet/include/mat_cpu.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#ifndef _MAT_CPU_H_\n#define _MAT_CPU_H_\n\n#include \"settings.h\"\n#include \"mex_print.h\"\n#include <array>\n#include <vector>\n#include <algorithm>\n#include <random>\n//#include <climits>\n\ntypedef std::array<int, 2> Pair;\ntypedef std::array<int, 4> Dim;\n\nclass MatCPU {\n\nprotected:\n//public:\n /* pointer to the first matrix element */\n ftype *data_;\n\n /* define the maximum values for first and second indices accordingly */\n size_t size1_, size2_;\n\n /* indicates the major dimension in this matrix */\n bool order_;\n\n /* owner_ = true means that memory was allocated for this matrix */\n bool owner_;\n\nprivate:\n inline size_t index(size_t i, size_t j) const {\n if (order_ == false) {\n return j * size1_ + i;\n } else {\n return i * size2_ + j;\n }\n }\n inline ftype& data(size_t ind) { return data_[ind]; }\n inline const ftype& data(size_t ind) const { return data_[ind]; }\n\npublic:\n\n // data access\n inline ftype& operator () (size_t i, size_t j) { return data_[index(i, j)]; }\n inline const ftype& operator () (size_t i, size_t j) const { return data_[index(i, j)]; }\n inline ftype& data(size_t i, size_t j) { return data_[index(i, j)]; }\n inline const ftype& data(size_t i, size_t j) const { return data_[index(i, j)]; }\n inline bool empty() const { return (data_ == NULL); }\n\n // public\n inline size_t size1() const { return size1_; }\n inline size_t size2() const { return size2_; }\n inline size_t size() const { return size1_ * size2_; }\n inline bool order() const { return order_; }\n\n //inline ftype operator () (size_t ind) const { return data_[ind]; }\n //MatCPU operator () (size_t ind);\n\n\n // memory functions\n MatCPU();\n MatCPU(size_t size1, size_t size2);\n MatCPU(const MatCPU &b);\n MatCPU(MatCPU &&b);\n ~MatCPU();\n MatCPU& init();\n MatCPU& operator = (const MatCPU &b);\n MatCPU& resize(size_t size1, size_t size2);\n MatCPU& reshape(size_t size1, size_t size2);\n MatCPU& set_order(bool order);\n MatCPU& attach(const MatCPU &b);\n MatCPU& attach(const MatCPU &b, size_t offset, size_t size1, size_t size2, bool order);\n MatCPU& attach(ftype *ptr, size_t size1, size_t size2);\n MatCPU& attach(ftype *ptr, size_t size1, size_t size2, bool order);\n MatCPU& clear();\n friend void Swap(MatCPU &a, MatCPU &b);\n\n // data functions\n MatCPU& ident();\n MatCPU& assign(ftype c);\n MatCPU& assign(const std::vector<ftype> &vect);\n MatCPU& operator += (const MatCPU &b);\n MatCPU& operator -= (const MatCPU &b);\n MatCPU& operator *= (const MatCPU &b);\n MatCPU& operator /= (const MatCPU &b);\n MatCPU& operator += (ftype c);\n MatCPU& operator -= (ftype c);\n MatCPU& operator *= (ftype c);\n MatCPU& operator /= (ftype c);\n MatCPU& Reorder(bool order);\n\n // friend functions\n friend void Sum(const MatCPU &a, MatCPU &vect, int dim);\n friend void Mean(const MatCPU &a, MatCPU &vect, int dim);\n friend void Trans(const MatCPU &a, MatCPU &b);\n friend void Shuffle(MatCPU &a, MatCPU &b);\n\n // const functions\n ftype sum() const;\n bool hasZeros() const;\n};\n\n#endif\n" }, { "alpha_fraction": 0.5526992082595825, "alphanum_fraction": 0.7789202928543091, "avg_line_length": 24.799999237060547, "blob_id": "1517ff370e990507557383e364ed6443b04f12fc", "content_id": "8080fe3cb5cf6af2b897a281318221b5d71f8529", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 389, "license_type": "no_license", "max_line_length": 30, "num_lines": 15, "path": "/data/sort/gen_dataset.sh", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\npython3 datagen.py -n 1024\npython3 datagen.py -n 2048\npython3 datagen.py -n 4096\npython3 datagen.py -n 8192\npython3 datagen.py -n 65536\npython3 datagen.py -n 131072\npython3 datagen.py -n 261444\npython3 datagen.py -n 524288\npython3 datagen.py -n 1048576\npython3 datagen.py -n 2097152\npython3 datagen.py -n 4194304\npython3 datagen.py -n 8388608\npython3 datagen.py -n 16777216\n\n\n" }, { "alpha_fraction": 0.6647727489471436, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 27.799999237060547, "blob_id": "b20cd4f2a7ecbfd273cfa26794a51d5d3524c146", "content_id": "33de3346bb465d9e1869ccc526f686b8007bd92c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3168, "license_type": "no_license", "max_line_length": 86, "num_lines": 110, "path": "/src/cuda/level2/neuralnet/include/weights.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#include \"weights.h\"\n\nWeights::Weights() {\n dims_ = {0, 0, 0, 0};\n}\n\nvoid Weights::Init(const MatCPU &w) {\n mexAssert(w.size() < INT_MAX);\n dims_ = {(int) w.size1(), (int) w.size2(), 1, 1};\n if (Num() == 0) return;\n\n weights_.resize_filter(dims_);\n weights_der_.resize_filter(dims_);\n weights_der2_.resize_filter(dims_);\n weights_der_prev_.resize_filter(dims_);\n\n weights_ = w;\n weights_der_.assign(0);\n weights_der2_.assign(0);\n weights_der_prev_.assign(0);\n}\n\nvoid Weights::attach(Weights &w, size_t offset) {\n size_t size1 = dims_[0], size2 = dims_[1] * dims_[2] * dims_[3];\n weights_.attach(w.weights_, offset, size1, size2, kExternalOrder);\n weights_der_.attach(w.weights_der_, offset, size1, size2, kInternalOrder);\n weights_der2_.attach(w.weights_der2_, offset, size1, size2, kInternalOrder);\n weights_der_prev_.attach(w.weights_der_prev_, offset, size1, size2, kInternalOrder);\n}\n\nvoid Weights::AttachFilters(Weights &w, size_t offset) {\n\n if (Num() == 0) return;\n attach(w, offset);\n weights_.reshape_filter(dims_);\n weights_der_.reshape_filter(dims_);\n weights_der2_.reshape_filter(dims_);\n weights_der_prev_.reshape_filter(dims_);\n\n weights_.Reorder(kInternalOrder);\n weights_.ReorderMaps(kExternalOrder, kInternalOrder);\n}\n\nvoid Weights::AttachBiases(Weights &w, size_t offset) {\n\n if (Num() == 0) return;\n attach(w, offset);\n weights_.set_order(kInternalOrder);\n\n weights_.reshape_tensor(dims_);\n weights_der_.reshape_tensor(dims_);\n weights_der2_.reshape_tensor(dims_);\n weights_der_prev_.reshape_tensor(dims_);\n}\n\nvoid Weights::RestoreOrder() {\n weights_.ReorderMaps(kInternalOrder, kExternalOrder);\n weights_.Reorder(kExternalOrder);\n}\n\nsize_t Weights::Num() const {\n return dims_[0] * dims_[1] * dims_[2] * dims_[3];\n}\n\nvoid Weights::Update(const Params &params) {\n if (params.beta_ > 0) {\n weights_der2_ *= params.beta_;\n weights_der_ += weights_der2_;\n }\n if (params.momentum_ > 0) {\n weights_der_prev_ *= params.momentum_;\n weights_der_ *= (1 - params.momentum_);\n weights_der_ += weights_der_prev_;\n weights_der_prev_ = weights_der_;\n }\n weights_der_ *= params.alpha_;\n if (params.decay_ > 0) {\n weights_ *= (1 - params.decay_ * params.alpha_);\n }\n // direction that decreases the error\n weights_ -= weights_der_;\n weights_der_.assign(0);\n weights_der2_.assign(0);\n}\n\nvoid Weights::Clear() {\n weights_.clear();\n weights_der_.clear();\n weights_der2_.clear();\n weights_der_prev_.clear();\n}\n" }, { "alpha_fraction": 0.5640243887901306, "alphanum_fraction": 0.5731707215309143, "avg_line_length": 33.87234115600586, "blob_id": "c6b4e7b874fe84a385ec7921247f86b341c3141f", "content_id": "6cbd16455566cc16a1fd5dc53927c9181bf4df4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1640, "license_type": "no_license", "max_line_length": 124, "num_lines": 47, "path": "/data/kmeans/datagen.py", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom optparse import OptionParser\nimport numpy as np\nimport random\nimport sys\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option('-n', '--numObjects', type=\"int\", default=10000, help='Number of objects')\n parser.add_option('-a', '--numAttributes', type=\"int\", default=30, help='Number of attributes of each object')\n parser.add_option('-f', '--float', action=\"store_true\", default=False, help=\"Features are floats (integers by default)\")\n (options, args) = parser.parse_args()\n\n # check options\n if options.numObjects <= 0:\n print(\"Error: Number of objects must be positive.\")\n sys.exit()\n if options.numAttributes <= 0:\n print(\"Error: Number of attributes must be positive.\")\n sys.exit()\n\n n = options.numObjects\n a = options.numAttributes\n f = options.float\n print(\"Generating %d objects with %d %s attributes each\" % (n,a, 'float' if f else 'integer'))\n\n # seed random number generator\n random.seed(datetime.now())\n\n if(f):\n name = 'kmeans_f_%d_%d' % (n,a)\n else:\n name = 'kmeans_%d_%d' % (n,a)\n\n with open(name, 'w') as fn:\n # write number of objects and number of features\n fn.write('%d %d\\n' % (n,a))\n for i in range(n):\n # write ID of object (ignored in benchmark)\n fn.write('%d' % i)\n for j in range(a):\n # write value of attribute\n if(f):\n fn.write(' %0.4f' % random.random())\n else:\n fn.write(' %d' % random.randint(0, 255))\n fn.write('\\n')\n\n" }, { "alpha_fraction": 0.5295950174331665, "alphanum_fraction": 0.5534787178039551, "avg_line_length": 32.17241287231445, "blob_id": "a61eb81759485f15a13080e3f156dd5bede8454f", "content_id": "75088458efc9d8c7a57144dd0b04ac132f385e3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 963, "license_type": "no_license", "max_line_length": 92, "num_lines": 29, "path": "/data/cfd/datagen.py", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom optparse import OptionParser\nimport random\nimport sys\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option('-s', '--size', type=\"int\", default=100000, help='Number of elements')\n (options, args) = parser.parse_args()\n\n # check options\n if options.size <= 0:\n print(\"Error: Number of elements must be positive.\")\n sys.exit()\n\n print(\"Generating input with %d elements\" % options.size)\n\n random.seed(datetime.now())\n with open('cfd_%d' % options.size, 'w') as f:\n # write header line\n f.write('%d\\n' % options.size)\n # number of floats\n for i in range(options.size):\n f.write('%0.7f ' % random.uniform(0, 1))\n for j in range(4):\n f.write('%d ' % random.uniform(i - 10, i + 10))\n for k in range(3):\n f.write('%0.7f ' % random.uniform(-0.5, 0.5))\n f.write('\\n')\n\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7529069781303406, "avg_line_length": 34.1020393371582, "blob_id": "a2d133815948b056b80cac9a9e1cd7b4d003058b", "content_id": "746357c76b79cf49c3ef8bbfce57be2fb34d6b5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1720, "license_type": "no_license", "max_line_length": 98, "num_lines": 49, "path": "/src/cuda/level2/neuralnet/cuda_util.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#ifndef _CUDA_UTIL_H_\n#define _CUDA_UTIL_H_\n\n#include \"mat_gpu.h\"\n\nvoid cuda_validate(MatGPU &mat);\nvoid cuda_sign(MatGPU &mat);\nvoid cuda_sqrt(MatGPU &mat);\nvoid cuda_log(MatGPU &mat);\nvoid cuda_exp(MatGPU &mat);\nvoid cuda_sigmoid(MatGPU &mat);\n\nvoid cuda_ident(MatGPU &mat);\nvoid cuda_assval(MatGPU &mat, float val);\nvoid cuda_addval(MatGPU &mat, float val);\nvoid cuda_subval(MatGPU &mat, float val);\nvoid cuda_multval(MatGPU &mat, float val);\nvoid cuda_divval(MatGPU &mat, float val);\n\nvoid cuda_addmat(MatGPU &mat, const MatGPU &b);\nvoid cuda_submat(MatGPU &mat, const MatGPU &b);\nvoid cuda_multmat(MatGPU &mat, const MatGPU &b);\nvoid cuda_divmat(MatGPU &mat, const MatGPU &b);\nvoid cuda_sigmder(MatGPU &mat, const MatGPU &b);\n\nvoid cuda_condassign(MatGPU& mat, const MatGPU& condmat, bool incase, float threshold, float val);\nvoid cuda_condadd(MatGPU& mat, const MatGPU& condmat, bool incase, float threshold, float val);\nvoid cuda_condmult(MatGPU& mat, const MatGPU& condmat, bool incase, float threshold, float val);\n\n#endif\n" }, { "alpha_fraction": 0.600185215473175, "alphanum_fraction": 0.6100648641586304, "avg_line_length": 33.21479034423828, "blob_id": "1fa6439c0403fca4b8821966149dd60df3271cd3", "content_id": "8b3443d58d90643078c83d823517b76bd0aa230a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 19434, "license_type": "no_license", "max_line_length": 92, "num_lines": 568, "path": "/src/cuda/level2/neuralnet/include/net.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#include \"net.h\"\n\nNet::Net(const mxArray *mx_params) {\n mexAssertMsg(PRECISION == 1, \"In the GPU version PRECISION should be 1\");\n if (params_.verbose_ >= 1) {\n mexPrintMsg(\"Start params initialization...\");\n }\n params_.Init(mx_params);\n MatGPU::InitCuda(params_.gpu_);\n MatGPU::SetMemoryLimit(params_.memory_);\n MatGPU::InitRand(params_.seed_);\n std::srand((unsigned int) params_.seed_);\n if (!params_.classcoefs_.empty()) {\n classcoefs_.resize(params_.classcoefs_.size1(), params_.classcoefs_.size2());\n classcoefs_ = params_.classcoefs_;\n }\n if (params_.verbose_ >= 1) {\n mexPrintMsg(\"Params initialization finished\");\n }\n}\n\nvoid Net::InitLayers(const mxArray *mx_layers) {\n if (params_.verbose_ >= 1) {\n mexPrintMsg(\"Start layers initialization...\");\n }\n size_t layers_num = mexGetNumel(mx_layers);\n layers_.resize(layers_num);\n first_trained_ = layers_num;\n Layer *prev_layer = NULL;\n for (size_t i = 0; i < layers_num; ++i) {\n const mxArray *mx_layer = mexGetCell(mx_layers, i);\n std::string layer_type = mexGetString(mexGetField(mx_layer, \"type\"));\n if (i == 0) {\n mexAssertMsg(layer_type == \"input\", \"The first layer must be the 'input' type\");\n }\n if (layer_type == \"input\") {\n layers_[i] = new LayerInput();\n } else if (layer_type == \"full\") {\n layers_[i] = new LayerFull();\n } else if (layer_type == \"jitt\") {\n layers_[i] = new LayerJitt();\n } else if (layer_type == \"conv\") {\n layers_[i] = new LayerConv();\n } else if (layer_type == \"deconv\") {\n layers_[i] = new LayerDeconv();\n } else if (layer_type == \"pool\") {\n layers_[i] = new LayerPool();\n } else {\n mexAssertMsg(false, layer_type + \" - unknown layer type\");\n }\n if (params_.verbose_ >= 1) {\n mexPrintMsg(\"\\nInitializing layer of type\", layer_type);\n }\n layers_[i]->InitGeneral(mx_layer);\n layers_[i]->Init(mx_layer, prev_layer);\n mexAssertMsg(layers_[i]->function_ != \"soft\" || i == layers_num - 1,\n \"Softmax function may be only on the last layer\");\n if (layers_[i]->NumWeights() > 0 && layers_[i]->lr_coef_ > 0) {\n if (first_trained_ > i) {\n first_trained_ = i;\n }\n }\n prev_layer = layers_[i];\n if (params_.verbose_ >= 1) {\n if (layers_[i]->NumWeights() > 0) {\n mexPrintMsg(\"Kernels:\");\n mexPrintInt(\"Output channels\", layers_[i]->filters_.dims(0));\n mexPrintInt(\"Input channels\", layers_[i]->filters_.dims(1));\n mexPrintInt(\"Height\", layers_[i]->filters_.dims(2));\n mexPrintInt(\"Width\", layers_[i]->filters_.dims(3));\n }\n if (layers_[i]->add_bias_) {\n mexPrintMsg(\"Bias added\");\n } else {\n mexPrintMsg(\"Bias not added\");\n }\n if (layers_[i]->NumWeights() > 0 && layers_[i]->lr_coef_ > 0) {\n mexPrintMsg(\"Trainable\");\n } else {\n mexPrintMsg(\"Fixed\");\n }\n mexPrintMsg(\"Mapsize:\");\n mexPrintInt(\"Channels\", layers_[i]->dims_[1]);\n mexPrintInt(\"Height\", layers_[i]->dims_[2]);\n mexPrintInt(\"Width\", layers_[i]->dims_[3]);\n }\n }\n if (params_.verbose_ >= 1) {\n mexPrintInt(\"First trained layer\", first_trained_);\n }\n if (params_.lossfun_ == \"logreg\") {\n Layer *lastlayer = layers_.back();\n if (lastlayer->function_ != \"soft\") {\n mexPrintMsg(\"WARNING: logreg loss is used with non-softmax last layer\");\n }\n }\n //mexAssertMsg(layer_type == \"full\", \"The last layer must be the type of 'f'\");\n //mexAssertMsg(layers_.back()->function_ == \"soft\" || layers_.back()->function_ == \"sigm\",\n // \"The last layer function must be either 'soft' or 'sigm'\");\n if (params_.verbose_ >= 1) {\n mexPrintMsg(\"Layers initialization finished\");\n }\n}\n\nvoid Net::Classify(const mxArray *mx_data, const mxArray *mx_labels, mxArray *&mx_pred) {\n\n if (params_.verbose_ >= 3) {\n mexPrintMsg(\"Start classification...\");\n }\n ReadData(mx_data);\n ReadLabels(mx_labels);\n size_t test_num = data_.size1();\n preds_.resize(test_num, layers_.back()->length());\n preds_.set_order(true);\n MatCPU curpreds;\n if (params_.test_epochs_ > 1) {\n curpreds.resize(test_num, layers_.back()->length());\n curpreds.set_order(true);\n preds_.assign(0);\n } else {\n curpreds.attach(preds_);\n }\n size_t numbatches = DIVUP(test_num, params_.batchsize_);\n MatGPU data_batch, labels_batch, pred_batch, coef_batch;\n for (size_t epoch = 0; epoch < params_.test_epochs_; ++epoch) {\n size_t offset = 0;\n for (size_t batch = 0; batch < numbatches; ++batch) {\n size_t batchsize = std::min(test_num - offset, params_.batchsize_);\n data_batch.resize(batchsize, data_.size2());\n SubSet(data_, data_batch, offset, true);\n InitActiv(data_batch);\n Forward(pred_batch, PassNum::ForwardTest, GradInd::Nowhere);\n if (params_.testshift_ > 0) {\n ftype loss1;\n labels_batch.resize(batchsize, labels_.size2());\n SubSet(labels_, labels_batch, offset, true);\n if (!classcoefs_.empty()) {\n coef_batch.resize(batchsize, 1);\n Prod(labels_batch, false, classcoefs_, false, coef_batch);\n }\n InitDeriv(labels_batch, coef_batch, loss1);\n // shift and beta cannot be positive together\n Backward(PassNum::Backward, GradInd::Nowhere);\n InitActivAT(params_.testshift_, 1); // L1-norm adversarial loss\n Forward(pred_batch, PassNum::ForwardTest, GradInd::Nowhere);\n }\n SubSet(curpreds, pred_batch, offset, false);\n offset += batchsize;\n if (params_.verbose_ >= 4) {\n mexPrintInt(\"Test epoch\", epoch + 1);\n mexPrintInt(\"Test batch\", batch + 1);\n }\n }\n if (params_.test_epochs_ > 1) {\n preds_ += curpreds;\n }\n if (params_.verbose_ >= 3) {\n mexPrintInt(\"Test epoch\", epoch + 1);\n }\n }\n if (params_.test_epochs_ > 1) {\n preds_ /= (ftype) params_.test_epochs_;\n }\n //preds_.ReorderMaps(kInternalOrder, kExternalOrder);\n preds_.Reorder(kExternalOrder);\n Dim pred_dims = layers_.back()->dims_;\n pred_dims[0] = test_num;\n mx_pred = mexSetTensor(preds_, pred_dims);\n if (params_.verbose_ >= 3) {\n mexPrintMsg(\"Classification finished\");\n }\n}\n\nvoid Net::Train(const mxArray *mx_data, const mxArray *mx_labels) {\n\n if (params_.verbose_ >= 3) {\n mexPrintMsg(\"Start training...\");\n }\n ReadData(mx_data);\n ReadLabels(mx_labels);\n\n size_t train_num = data_.size1();\n size_t numbatches = DIVUP(train_num, params_.batchsize_);\n losses_.resize(2, params_.epochs_);\n losses_.assign(0);\n MatGPU data_batch, labels_batch, pred_batch, coef_batch, empty_batch;\n for (size_t epoch = 0; epoch < params_.epochs_; ++epoch) {\n //print = 1;\n if (params_.shuffle_) {\n Shuffle(data_, labels_);\n }\n StartTimer();\n //MatGPU::StartCudaTimer();\n size_t offset = 0;\n for (size_t batch = 0; batch < numbatches; ++batch) {\n size_t batchsize = std::min(train_num - offset, params_.batchsize_);\n data_batch.resize(batchsize, data_.size2());\n labels_batch.resize(batchsize, labels_.size2());\n SubSet(data_, data_batch, offset, true);\n SubSet(labels_, labels_batch, offset, true);\n if (!classcoefs_.empty()) {\n coef_batch.resize(batchsize, 1);\n Prod(labels_batch, false, classcoefs_, false, coef_batch);\n }\n ftype loss1 = 0, loss2 = 0;\n InitActiv(data_batch);\n Forward(pred_batch, PassNum::Forward, GradInd::Nowhere);\n InitDeriv(labels_batch, coef_batch, loss1);\n losses_(0, epoch) += loss1;\n // shift and beta cannot be positive together\n if (params_.shift_ > 0) {\n Backward(PassNum::Backward, GradInd::Nowhere);\n InitActivAT(params_.shift_, params_.normfun_);\n Forward(pred_batch, PassNum::Forward, GradInd::Nowhere);\n InitDeriv(labels_batch, coef_batch, loss2);\n }\n Backward(PassNum::Backward, GradInd::First);\n if (params_.shift_ == 0 && params_.beta_ > 0) {\n InitActivIBP(loss2, 2);\n if (params_.fast_) {\n Forward(pred_batch, PassNum::ForwardLinear, GradInd::Second);\n } else {\n Forward(pred_batch, PassNum::ForwardLinear, GradInd::Nowhere);\n labels_batch.assign(0);\n std::string lf = params_.lossfun_;\n params_.lossfun_ = \"L-norm\";\n // we don't multiply on the coef_batch again here\n // as the gradients are already multiplied on the first pass\n InitDeriv(labels_batch, empty_batch, loss2);\n Backward(PassNum::BackwardLinear, GradInd::Second);\n params_.lossfun_ = lf;\n }\n }\n losses_(1, epoch) += loss2;\n UpdateWeights();\n offset += batchsize;\n if (params_.verbose_ >= 4) {\n mexPrintInt(\"Epoch\", epoch + 1);\n mexPrintInt(\"Batch\", batch + 1);\n }\n } // batch\n //MatGPU::MeasureCudaTime(\"totaltime\");\n MeasureTime(\"totaltime\");\n if (params_.verbose_ >= 3) {\n mexPrintInt(\"Epoch\", epoch + 1);\n }\n } // epoch\n losses_ /= (ftype) numbatches;\n if (params_.verbose_ >= 3) {\n mexPrintMsg(\"Training finished\");\n }\n}\n\nvoid Net::Forward(MatGPU &pred, PassNum passnum, GradInd gradind) {\n if (params_.verbose_ >= 4) {\n mexPrintMsg(\"Forward pass started\");\n int gi = 0;\n if (gradind == GradInd::First) {\n gi = 1;\n } else if (gradind == GradInd::Second) {\n gi = 2;\n }\n mexPrintInt(\"Computing gradients\", gi);\n }\n size_t batchsize = layers_[0]->activ_mat_.size1();\n Layer *prev_layer = NULL;\n for (size_t i = first_layer_; i < layers_.size(); ++i) {\n /*\n if (layers_[i]->type_ == \"jitt\") {\n first_layer_ = i;\n } */\n if (params_.verbose_ >= 4) {\n mexPrintMsg(\"Forward pass for layer\", layers_[i]->type_);\n }\n if (gradind != GradInd::Nowhere && layers_[i]->lr_coef_ > 0) {\n layers_[i]->WeightGrads(prev_layer, gradind);\n // no BiasGrads on the forward pass\n }\n layers_[i]->ResizeActivMat(batchsize, passnum);\n layers_[i]->TransformForward(prev_layer, passnum);\n layers_[i]->AddBias(passnum);\n layers_[i]->Nonlinear(passnum);\n layers_[i]->DropoutForward(passnum);\n if (params_.verbose_ >= 5) {\n mexPrintMsg(\"ActivSum\", layers_[i]->activ_mat_.sum());\n }\n prev_layer = layers_[i];\n if (utIsInterruptPending()) {\n mexAssert(false);\n }\n }\n pred.attach(layers_.back()->activ_mat_);\n if (params_.verbose_ >= 4) {\n mexPrintMsg(\"Forward pass finished\");\n }\n}\n\nvoid Net::Backward(PassNum passnum, GradInd gradind) {\n if (params_.verbose_ >= 4) {\n mexPrintMsg(\"Backward pass started\");\n int gi = 0;\n if (gradind == GradInd::First) {\n gi = 1;\n } else if (gradind == GradInd::Second) {\n gi = 2;\n }\n mexPrintInt(\"Computing gradients\", gi);\n }\n for (size_t j = first_layer_; j < layers_.size(); ++j) {\n size_t i = first_layer_ + layers_.size() - 1 - j;\n if (params_.verbose_ >= 4) {\n mexPrintMsg(\"Backward pass for layer\", layers_[i]->type_);\n }\n if (params_.verbose_ >= 5) {\n mexPrintMsg(\"DerivSum\", layers_[i]->deriv_mat_.sum());\n }\n layers_[i]->DropoutBackward();\n if (layers_[i]->function_ != \"soft\" || params_.lossfun_ != \"logreg\") {\n // special case, final derivaties are already computed in InitDeriv\n layers_[i]->Nonlinear(passnum);\n }\n if (gradind != GradInd::Nowhere && layers_[i]->lr_coef_ > 0) {\n layers_[i]->BiasGrads(passnum, gradind);\n if (i > 0) {\n layers_[i]->WeightGrads(layers_[i-1], gradind);\n }\n }\n if (params_.beta_ == 0 && params_.shift_ == 0) {\n if (i <= first_trained_) break;\n }\n if (i > 0) {\n layers_[i-1]->ResizeDerivMat();\n layers_[i]->TransformBackward(layers_[i-1]);\n }\n }\n if (params_.verbose_ >= 4) {\n mexPrintMsg(\"Backward pass finished\");\n }\n}\n\nvoid Net::InitActiv(const MatGPU &data) {\n if (params_.verbose_ >= 4) {\n mexPrintMsg(\"Initializing activations\");\n }\n mexAssertMsg(layers_.size() >= 1 , \"The net is not initialized\");\n first_layer_ = 0;\n Layer *firstlayer = layers_[first_layer_];\n firstlayer->activ_mat_.attach(data);\n firstlayer->activ_mat_.Validate();\n if (params_.verbose_ >= 5) {\n mexPrintMsg(\"InitActivSum\", firstlayer->activ_mat_.sum());\n }\n if (params_.verbose_ >= 4) {\n mexPrintMsg(\"Activations initialized\");\n }\n}\n\nvoid Net::InitDeriv(const MatGPU &labels_batch, const MatGPU &coef_batch, ftype &loss) {\n if (params_.verbose_ >= 4) {\n mexPrintMsg(\"Initializing gradients\");\n }\n size_t batchsize = labels_batch.size1();\n size_t classes_num = labels_batch.size2();\n Layer *lastlayer = layers_.back();\n ftype matsum = lastlayer->activ_mat_.sum();\n mexAssertMsg(!std::isnan(matsum), \"NaNs in the network output\");\n mexAssertMsg(batchsize == lastlayer->dims_[0],\n \"The number of objects in data and label batches is different\");\n mexAssertMsg(classes_num == lastlayer->length(),\n \"Labels in batch and last layer must have equal number of classes\");\n lossmat_.resize(batchsize, classes_num);\n lastlayer->deriv_mat_.resize_tensor(lastlayer->dims_);\n if (params_.lossfun_ == \"logreg\") {\n lossmat_ = lastlayer->activ_mat_;\n // to get the log(1) = 0 after and to avoid 0/0;\n lossmat_.CondAssign(labels_batch, false, kEps, 1);\n // to avoid log(0) and division by 0 if there are still some;\n lossmat_.CondAssign(lossmat_, false, 0, kEps);\n if (lastlayer->function_ == \"soft\") {\n // directly compute final derivatives, so Nonlinear is not needed\n lastlayer->deriv_mat_ = lastlayer->activ_mat_;\n lastlayer->deriv_mat_ -= labels_batch;\n } else {\n lastlayer->deriv_mat_ = labels_batch;\n (lastlayer->deriv_mat_ /= lossmat_) *= -1;\n }\n lossmat_.Log() *= -1;\n } else if (params_.lossfun_ == \"L-norm\") {\n lastlayer->deriv_mat_ = lastlayer->activ_mat_;\n lastlayer->deriv_mat_ -= labels_batch;\n lossmat_ = lastlayer->deriv_mat_;\n if (params_.normfun_ == 1) {\n lastlayer->deriv_mat_.Sign();\n lossmat_ *= lastlayer->deriv_mat_; // |f(x)-y|\n } else if (params_.normfun_ == 2) {\n (lossmat_ *= lastlayer->deriv_mat_) /= 2; // (f(x)-y)^2 / 2\n }\n }\n if (!coef_batch.empty()) {\n lastlayer->deriv_mat_.MultVect(coef_batch, 2);\n lossmat_.MultVect(coef_batch, 2);\n }\n lastlayer->deriv_mat_.Validate();\n loss = lossmat_.sum() / batchsize;\n if (params_.verbose_ >= 5) {\n mexPrintMsg(\"InitDerivSum\", lastlayer->deriv_mat_.sum());\n }\n if (params_.verbose_ >= 4) {\n mexPrintMsg(\"Gradients initialized\");\n }\n}\n\nvoid Net::InitActivIBP(ftype &loss, int normfun) {\n if (params_.verbose_ >= 4) {\n mexPrintMsg(\"Initializing activations for the IBP pass\");\n }\n Layer *firstlayer = layers_[first_layer_];\n size_t batchsize = firstlayer->dims_[0];\n size_t length = firstlayer->length();\n lossmat2_.resize(batchsize, length);\n lossmat2_ = firstlayer->deriv_mat_;\n // fill in first_mat, because later it will be swapped with activ_mat\n firstlayer->first_mat_.resize_tensor(firstlayer->dims_);\n firstlayer->first_mat_ = firstlayer->deriv_mat_;\n if (normfun == 1) { // L1-norm\n firstlayer->first_mat_.Sign();\n lossmat2_ *= firstlayer->first_mat_; // abs\n } else if (normfun == 2) { // L2-norm\n (lossmat2_ *= firstlayer->deriv_mat_) /= 2;\n }\n loss = lossmat2_.sum() / batchsize;\n if (params_.verbose_ >= 5) {\n mexPrintMsg(\"InitActivIBPSum\", firstlayer->first_mat_.sum());\n }\n if (params_.verbose_ >= 4) {\n mexPrintMsg(\"Activations initialized\");\n }\n}\n\nvoid Net::InitActivAT(ftype coef, int normfun) {\n if (params_.verbose_ >= 4) {\n mexPrintMsg(\"Initializing activations for the AT pass\");\n }\n Layer *firstlayer = layers_[first_layer_];\n if (normfun == 1) {\n firstlayer->deriv_mat_.Sign();\n }\n firstlayer->deriv_mat_ *= coef;\n firstlayer->activ_mat_ += firstlayer->deriv_mat_;\n firstlayer->activ_mat_.Validate();\n if (params_.verbose_ >= 5) {\n mexPrintMsg(\"InitActivATSum\", firstlayer->first_mat_.sum());\n }\n if (params_.verbose_ >= 4) {\n mexPrintMsg(\"Activations initialized\");\n }\n}\n\nvoid Net::UpdateWeights() {\n weights_.Update(params_);\n}\n\nvoid Net::ReadData(const mxArray *mx_data) {\n Dim dims = mexGetTensor(mx_data, data_);\n mexAssertMsg(layers_[0]->dims_[1] == dims[1] &&\n layers_[0]->dims_[2] == dims[2] &&\n layers_[0]->dims_[3] == dims[3],\n \"Data dimensions don't correspond to the input layer\");\n}\n\nvoid Net::ReadLabels(const mxArray *mx_labels) {\n Dim dims = mexGetTensor(mx_labels, labels_);\n mexAssertMsg(layers_.back()->dims_[1] == dims[1] &&\n layers_.back()->dims_[2] == dims[2] &&\n layers_.back()->dims_[3] == dims[3],\n \"Label's dimensions don't correspond to the output layer\");\n if (!classcoefs_.empty()) {\n mexAssertMsg(classcoefs_.size1() == layers_.back()->dims_[1],\n \"Classcoefs vector length don't correspond to the label matrix\");\n }\n}\n\nvoid Net::InitWeights(const mxArray *mx_weights_in) {\n if (params_.verbose_ >= 1) {\n mexPrintMsg(\"Start init weights\");\n }\n bool isgen = false;\n size_t num_weights = NumWeights();\n MatCPU weights_cpu;\n if (mx_weights_in != NULL) { // training, testing\n if (params_.verbose_ >= 1) {\n mexPrintInt(\"Model weights num\", num_weights);\n mexPrintInt(\"Input weights num\", mexGetNumel(mx_weights_in));\n }\n mexAssertMsg(num_weights == mexGetNumel(mx_weights_in),\n \"The vector of weights has the wrong length!\");\n mexGetMatrix(mx_weights_in, weights_cpu);\n } else { // genweights\n isgen = true;\n weights_cpu.resize(1, num_weights);\n }\n weights_.Init(weights_cpu);\n size_t offset = 0;\n for (size_t i = 0; i < layers_.size(); ++i) {\n layers_[i]->InitWeights(weights_, offset, isgen);\n }\n if (params_.verbose_ >= 1) {\n mexPrintMsg(\"Finish init weights\");\n }\n}\n\nvoid Net::GetWeights(mxArray *&mx_weights) const {\n size_t num_weights = NumWeights();\n mx_weights = mexNewMatrix(1, num_weights);\n MatCPU weights_cpu;\n weights_cpu.attach(mexGetPointer(mx_weights), 1, num_weights);\n size_t offset = 0;\n for (size_t i = 0; i < layers_.size(); ++i) {\n layers_[i]->RestoreOrder();\n }\n DeviceToHost(weights_.get(), weights_cpu);\n}\n\nvoid Net::GetLosses(mxArray *&mx_losses) const {\n mx_losses = mexSetMatrix(losses_);\n}\n\nsize_t Net::NumWeights() const {\n size_t num_weights = 0;\n for (size_t i = 0; i < layers_.size(); ++i) {\n num_weights += layers_[i]->NumWeights();\n //mexPrintInt(\"i\", num_weights);\n }\n return num_weights;\n}\n\nNet::~Net() {\n for (size_t i = 0; i < layers_.size(); ++i) {\n delete layers_[i];\n }\n layers_.clear();\n // remove here all GPU allocated memory manually,\n // otherwise CudaReset causes crash\n weights_.Clear();\n classcoefs_.clear(); // in fact vector\n lossmat_.clear();\n lossmat2_.clear();\n MatGPU::CudaReset();\n}\n" }, { "alpha_fraction": 0.5637425780296326, "alphanum_fraction": 0.572952926158905, "avg_line_length": 29.382251739501953, "blob_id": "2e35314d7b00b4c3dbee257ca94740e7384a0d8a", "content_id": "1eacf2bb564a21edc09371b10d3c9393958283cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8903, "license_type": "no_license", "max_line_length": 105, "num_lines": 293, "path": "/src/cuda/level1/gemm/Gemm.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "#include \"OptionParser.h\"\n#include \"ResultDatabase.h\"\n#include \"Timer.h\"\n#include \"Utility.h\"\n#include \"cublas.h\"\n#include \"cuda.h\"\n#include \"cuda_runtime.h\"\n#include \"cudacommon.h\"\n#include \"cuda_fp16.h\"\n#include <fstream>\n#include <iostream>\n#include <sstream>\n#include <string>\n\n#define SEED 7\nstatic const int FIELD_LENGTH = 128;\n\nusing namespace std;\n\ntemplate <class T>\nvoid RunTest(string testName, ResultDatabase &resultDB, OptionParser &op);\n\ntemplate <class T>\ninline void devGEMM(char transa, char transb, int m, int n, int k, T alpha,\n const T *A, int lda, const T *B, int ldb, T beta, T *C,\n int ldc);\n\n\n// ********************************************************\n// Function: fill\n//\n// Purpose:\n// Simple routine to initialize input array\n//\n// Arguments:\n// A: pointer to the array to initialize\n// n: number of elements in the array\n//\n// ********************************************************\ntemplate <class T> void fill(T *A, int n, int maxi) {\n for (int j = 0; j < n; j++) {\n A[j] = T((rand() % (maxi * 2 + 1)) - maxi) / (maxi + 1.);\n }\n}\n\n// ********************************************************\n// Function: readMatrix\n//\n// Purpose:\n// Initialize input arrays from a data file\n//\n// Arguments:\n// A: pointer to matrix A\n// B: pointer to matrix B\n// C: pointer to matrix C\n// n: number of elements in the array\n//\n// ********************************************************\ntemplate <class T> void readMatrix(T *A, T *B, T *C, int n, string filename) {\n std::ifstream mfs(filename.c_str());\n string line;\n // Ignore header line because it was already checked\n getline(mfs, line);\n float a, b, c;\n for (int j = 0; j < n; j++) {\n sscanf(line.c_str(), \"%f %f %f\", &a, &b, &c);\n A[j] = T(a);\n B[j] = T(b);\n C[j] = T(c);\n }\n}\n\n// ****************************************************************************\n// Function: addBenchmarkSpecOptions\n//\n// Purpose:\n// Add benchmark specific options parsing. The user is allowed to specify\n// the size of the input data in kiB.\n//\n// Arguments:\n// op: the options parser / parameter database\n//\n// Programmer: Anthony Danalis\n// Creation: September 08, 2009\n// Returns: nothing\n//\n// ****************************************************************************\nvoid addBenchmarkSpecOptions(OptionParser &op) {}\n\n// ****************************************************************************\n// Function: runBenchmark\n//\n// Purpose:\n// This benchmark measures the performance of the single precision general\n// matrix multiplication (SGEMM) operation in GFLOPS. Data transfer time\n// over the PCIe bus is not included in this measurement.\n//\n// Arguments:\n// resultDB: the benchmark stores its results in this ResultDatabase\n// op: the options parser / parameter database\n//\n// Returns: nothing\n//\n// Programmer: Anthony Danalis\n// Creation: September 08, 2009\n//\n// Modifications:\n//\n// ****************************************************************************\nvoid RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {\n cout << \"Running GEMM\" << endl;\n int device;\n cudaGetDevice(&device);\n cudaDeviceProp deviceProp;\n cudaGetDeviceProperties(&deviceProp, device);\n\n srand(SEED);\n\n bool quiet = op.getOptionBool(\"quiet\");\n\n if(!quiet) {\n cout << \"Running single precision test\" << endl;\n }\n RunTest<float>(\"SGEMM\", resultDB, op);\n\n // Test to see if this device supports double precision\n if ((deviceProp.major == 1 && deviceProp.minor >= 3) ||\n (deviceProp.major >= 2)) {\n if(!quiet) {\n cout << \"Running double precision test\" << endl;\n }\n RunTest<double>(\"DGEMM\", resultDB, op);\n }\n}\n\ntemplate <class T>\nvoid RunTest(string testName, ResultDatabase &resultDB, OptionParser &op) {\n int passes = op.getOptionInt(\"passes\");\n int kib;\n\n // Use preset problem size or read data from input file\n string filename = op.getOptionString(\"inputFile\");\n if (filename == \"\") {\n int probSizes[4] = {1, 3, 40, 60};\n kib = probSizes[op.getOptionInt(\"size\") - 1];\n } else {\n std::ifstream mfs(filename.c_str());\n std::string line;\n char object[FIELD_LENGTH];\n sscanf(line.c_str(), \"%s %d\", object, &kib);\n }\n\n // Dimensions of matrix\n int N = kib * 1024 / sizeof(T);\n\n // Initialize the cublas library\n cublasInit();\n\n // Allocate GPU memory\n T *dA, *dB, *dC;\n CUDA_SAFE_CALL(cudaMalloc(&dA, N * N * sizeof(T)));\n CUDA_SAFE_CALL(cudaMalloc(&dB, N * N * sizeof(T)));\n CUDA_SAFE_CALL(cudaMalloc(&dC, N * N * sizeof(T)));\n\n // Initialize host memory\n T *A;\n T *B;\n T *C;\n\n CUDA_SAFE_CALL(cudaMallocHost(&A, N * N * sizeof(T)));\n CUDA_SAFE_CALL(cudaMallocHost(&B, N * N * sizeof(T)));\n CUDA_SAFE_CALL(cudaMallocHost(&C, N * N * sizeof(T)));\n\n // Fill matrix or read from input file\n if (filename == \"\") {\n fill<T>(A, N * N, 31);\n fill<T>(B, N * N, 31);\n fill<T>(C, N * N, 31);\n } else {\n readMatrix(A, B, C, N * N, filename);\n }\n\n // Copy input to GPU\n cudaEvent_t start, stop;\n cudaEventCreate(&start);\n cudaEventCreate(&stop);\n float elapsedTime;\n\n // Copy inputs to GPU\n double transferTime = 0;\n cudaEventRecord(start, 0);\n CUDA_SAFE_CALL(cudaMemcpy(dA, A, N * N * sizeof(T), cudaMemcpyHostToDevice));\n CUDA_SAFE_CALL(cudaMemcpy(dB, B, N * N * sizeof(T), cudaMemcpyHostToDevice));\n cudaEventRecord(stop, 0);\n cudaEventSynchronize(stop);\n cudaEventElapsedTime(&elapsedTime, start, stop);\n transferTime += elapsedTime * 1.e-3;\n\n bool first = true;\n for (int j = 0; j < passes; j++) {\n for (int i = 0; i < 2; i++) {\n const char transa = 'N';\n const char transb = i ? 'T' : 'N';\n const int nb = 128;\n const int idim = N / nb;\n\n int dim = idim * nb;\n\n const int m = dim;\n const int n = dim;\n const int k = dim;\n const int lda = dim;\n const int ldb = dim;\n const int ldc = dim;\n const T alpha = 1;\n const T beta = 0; //-1;\n\n // Warm Up\n devGEMM<T>(transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC,\n ldc);\n cudaThreadSynchronize();\n CHECK_CUDA_ERROR();\n\n double cublasTime;\n float kernelTime = 0.0f;\n for (int ii = 0; ii < 4; ++ii) {\n cudaEventRecord(start, 0);\n devGEMM<T>(transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC,\n ldc);\n cudaEventRecord(stop, 0);\n cudaEventSynchronize(stop);\n CHECK_CUDA_ERROR();\n float currTime = 0.0f;\n cudaEventElapsedTime(&currTime, start, stop);\n kernelTime += currTime;\n }\n cublasTime = (kernelTime / 4.0) * 1.e-3;\n\n cudaEventRecord(start, 0);\n CUDA_SAFE_CALL(\n cudaMemcpy(C, dC, N * N * sizeof(float), cudaMemcpyDeviceToHost));\n cudaEventRecord(stop, 0);\n cudaEventSynchronize(stop);\n float oTransferTime = 0.0f;\n cudaEventElapsedTime(&oTransferTime, start, stop);\n oTransferTime *= 1.e-3;\n\n // Add the PCIe transfer time to total transfer time only once\n if (first) {\n transferTime += oTransferTime;\n first = false;\n }\n\n double cublasGflops = 2. * m * n * k / cublasTime / 1e9;\n double pcieGflops = 2. * m * n * k / (cublasTime + transferTime) / 1e9;\n string atts = \"dim:\" + toString(dim);\n resultDB.AddResult(testName + \"-\" + transb + \"-TransferTime\", atts, \"sec\", transferTime);\n resultDB.AddResult(testName + \"-\" + transb + \"-KernelTime\", atts, \"sec\", cublasTime);\n resultDB.AddResult(testName + \"-\" + transb + \"-TotalTime\", atts, \"sec\", transferTime + cublasTime);\n resultDB.AddResult(testName + \"-\" + transb, atts, \"GFlops\", cublasGflops);\n resultDB.AddResult(testName + \"-\" + transb + \"_PCIe\", atts, \"GFlops\", pcieGflops);\n resultDB.AddResult(testName + \"-\" + transb + \"_Parity\", atts, \"N\", transferTime / cublasTime);\n resultDB.AddOverall(\"GFlops\", \"\", cublasGflops);\n }\n }\n\n // Clean Up\n CUDA_SAFE_CALL(cudaFree(dA));\n CUDA_SAFE_CALL(cudaFree(dB));\n CUDA_SAFE_CALL(cudaFree(dC));\n CUDA_SAFE_CALL(cudaFreeHost(A));\n CUDA_SAFE_CALL(cudaFreeHost(B));\n CUDA_SAFE_CALL(cudaFreeHost(C));\n CUDA_SAFE_CALL(cudaEventDestroy(start));\n CUDA_SAFE_CALL(cudaEventDestroy(stop));\n cublasShutdown();\n}\n\n\ntemplate <>\ninline void devGEMM<double>(char transa, char transb, int m, int n, int k,\n double alpha, const double *A, int lda,\n const double *B, int ldb, double beta, double *C,\n int ldc) {\n cublasDgemm(transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);\n}\n\ntemplate <>\ninline void devGEMM<float>(char transa, char transb, int m, int n, int k,\n float alpha, const float *A, int lda, const float *B,\n int ldb, float beta, float *C, int ldc) {\n cublasSgemm(transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);\n}\n\n" }, { "alpha_fraction": 0.6722618341445923, "alphanum_fraction": 0.6760386228561401, "avg_line_length": 31.793577194213867, "blob_id": "5d32d5224fd3eb81da55317e819fff198d09ccee", "content_id": "5f649398286c9d4b6d91dd9a1b76be19fcfeb8f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7149, "license_type": "no_license", "max_line_length": 98, "num_lines": 218, "path": "/src/cuda/level2/neuralnet/mat_gpu.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#ifndef _MAT_GPU_H_\n#define _MAT_GPU_H_\n\n#include \"include/mat_cpu.h\"\n#include \"cuda_print.h\"\n\n#include <map>\n\n#ifndef _Pragma // Windows\n#define _Pragma(x) __pragma(x)\n#endif\n\nclass MatGPU : public MatCPU {\n\nprivate:\n // static\n cudnnTensorDescriptor_t tensor_desc_;\n cudnnFilterDescriptor_t filter_desc_;\n void SetTensorDesc(const Dim &dims);\n void SetFilterDesc(const Dim &dims);\n cudnnTensorDescriptor_t GetTensorDesc();\n cudnnFilterDescriptor_t GetFilterDesc();\n void ClearTensorDesc();\n void ClearFilterDesc();\n\n static cudaStream_t _defaultStream;\n static curandGenerator_t _randGen;\n static cublasHandle_t _cublasHandle;\n static cudnnHandle_t _cudnnHandle;\n static int getDeviceID();\n static size_t _cudnnMemoryLimit;\n static MatGPU _workspace;\n\n static cudaEvent_t _start, _stop;\n\npublic:\n\n // static\n static void InitCuda(int gpu);\n static void InitRand(int seed);\n static void CudaReset();\n static void SetMemoryLimit(size_t memory);\n\n static void StartCudaTimer();\n static void MeasureCudaTime(std::string msg);\n\n // data access\n // private\n ftype operator () (size_t i, size_t j) const;\n size_t BytesNum() const;\n //cudaTextureObject_t getTextureObject();\n // public\n\n // memory functions\n MatGPU();\n MatGPU(size_t size1, size_t size2);\n MatGPU(const MatGPU &b);\n MatGPU(MatGPU &&b);\n ~MatGPU();\n MatGPU& init();\n MatGPU& operator = (const MatGPU &b);\n MatGPU& resize(size_t size1, size_t size2);\n MatGPU& resize_tensor(Dim dims);\n MatGPU& resize_filter(Dim dims);\n MatGPU& reshape(size_t size1, size_t size2);\n MatGPU& reshape_tensor(Dim dims);\n MatGPU& reshape_filter(Dim dims);\n MatGPU& attach(const MatGPU &b);\n MatGPU& attach(const MatGPU &b, size_t offset, size_t size1, size_t size2, bool order);\n MatGPU& attach(ftype *ptr, size_t size1, size_t size2);\n MatGPU& attach(ftype *ptr, size_t size1, size_t size2, bool order);\n MatGPU& clear();\n\n MatGPU& GetFromWorkspace(size_t size1, size_t size2);\n friend void Swap(MatGPU &a, MatGPU &b);\n\n // data functions\n MatGPU& ident();\n MatGPU& assign(ftype val);\n MatGPU& rand();\n MatGPU& randnorm();\n MatGPU& linear(ftype ca, ftype cb, const MatGPU &b, bool b_tr);\n MatGPU& operator += (const MatGPU &b);\n MatGPU& operator -= (const MatGPU &b);\n MatGPU& operator *= (const MatGPU &b);\n MatGPU& operator /= (const MatGPU &b);\n MatGPU& operator += (ftype c);\n MatGPU& operator -= (ftype c);\n MatGPU& operator *= (ftype c);\n MatGPU& operator /= (ftype c);\n MatGPU& Sign();\n MatGPU& Sqrt();\n MatGPU& Log();\n MatGPU& Exp();\n MatGPU& SoftMax();\n MatGPU& SoftDer(MatGPU& b);\n MatGPU& Sigmoid();\n MatGPU& SigmDer(const MatGPU& b);\n MatGPU& CondAssign(const MatGPU &condMatGPU, bool incase, ftype threshold, ftype a);\n MatGPU& CondAdd(const MatGPU &condMatGPU, bool incase, ftype threshold, ftype a);\n MatGPU& CondMult(const MatGPU &condMatGPU, bool incase, ftype threshold, ftype a);\n MatGPU& AddVect(MatGPU &vect, int dim);\n MatGPU& MultVect(const MatGPU &vect, int dim);\n MatGPU& Reorder(bool order);\n MatGPU& ReorderMaps(bool cur_order, bool order);\n MatGPU& Validate();\n\n // const functions\n Dim tensor_shape() const;\n Dim filter_shape() const;\n std::vector< std::vector<MatGPU> > InitMaps() const;\n ftype sum() const;\n\n // CPU <-> GPU functions\n MatGPU& operator = (const MatCPU &a); // HostToDevice\n friend void DeviceToHost(const MatGPU &b, MatCPU &a);\n friend void SubSet(MatCPU &a, MatGPU &b, size_t offset, bool dir);\n\n // friend functions\n\n friend void Sum(MatGPU &a, MatGPU &vect, int dim);\n friend void Mean(MatGPU &a, MatGPU &vect, int dim);\n //friend void Max(MatGPU &a, MatGPU &vect, int dim);\n\n friend void Trans(const MatGPU &a, MatGPU &b);\n\n\n // layer transformation functions\n friend void Prod(const MatGPU &a, bool a_tr, const MatGPU &b, bool b_tr, MatGPU &c);\n\n friend void AffineTransform(const MatGPU &images, MatGPU &targets,\n const MatGPU &shift_mat, const MatGPU &scale_mat,\n const MatGPU &mirror_mat, const MatGPU &angle_mat,\n ftype defval, bool dir);\n /*\n friend void VaryColors(MatGPU &images, const std::vector<int> &mapsize,\n const MatGPU &eigenvectors, ftype noise_std);\n\n */\n\n // CUDNN functions\n\n MatGPU& AddTensor(MatGPU &tensor);\n friend void ConvolutionForward(\n MatGPU& activs, MatGPU& filters, MatGPU& targets,\n const cudnnConvolutionDescriptor_t &conv_desc\n );\n friend void ConvolutionBackwardData(\n MatGPU& derivs, MatGPU& filters, MatGPU& targets,\n const cudnnConvolutionDescriptor_t &conv_desc\n );\n friend void ConvolutionBackwardFilter(\n MatGPU& activs, MatGPU& derivs, MatGPU& targets,\n const cudnnConvolutionDescriptor_t &conv_desc\n );\n friend void ConvolutionBackwardBias(\n MatGPU& derivs, MatGPU &targets\n );\n friend void Pooling(\n MatGPU& images, MatGPU& targets,\n cudnnPoolingDescriptor_t pool_desc\n );\n friend void PoolingUndo(\n MatGPU& activs, MatGPU& pool_activs,\n MatGPU& pool_derivs, MatGPU& targets,\n cudnnPoolingDescriptor_t pool_desc, bool dir\n );\n\n\nprivate:\n // cuda_util.cu\n friend float cuda_sum(const MatGPU &mat);\n friend void cuda_ident(MatGPU &mat);\n template <class Op>\n friend void _applyUnaryOp(MatGPU &mat, Op op);\n template <class Op>\n friend void _applyBinaryOp(MatGPU& mat, const MatGPU& b, Op op);\n template <class CondOp, class Op>\n friend void _applyCondOp(MatGPU& mat, const MatGPU& condmat, bool incase, CondOp condOp, Op op);\n friend void _affineTransform(const MatGPU &images, MatGPU &targets,\n int imgSize1, int imgSize2,\n int targSize1, int targSize2,\n const MatGPU &shift_mat, const MatGPU &scale_mat,\n const MatGPU &mirror_mat, const MatGPU &angle_mat,\n float defval, bool dir);\n\n friend void _maxPoolThirdPass(const MatGPU& activs, const MatGPU& pool_activs,\n const MatGPU& derivs, MatGPU& pool_derivs,\n int imgSize1, int imgSize2,\n int trgSize1, int trgSize2,\n Pair scale, Pair padding, Pair stride);\n\n template <class Op>\n friend void _applyRepMatOp(MatGPU& mat, const MatGPU& b, int dim, bool inner, Op op);\n friend void _varyColors(MatGPU &images, const MatGPU &add_mat);\n\n};\n\n#endif\n" }, { "alpha_fraction": 0.6484671235084534, "alphanum_fraction": 0.6575182676315308, "avg_line_length": 37.48314666748047, "blob_id": "3a15fd4acf48ddac03a640c2e75d1e5e976027f9", "content_id": "31cf96ac3f24863471b868fb0c6839a233678aa1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3425, "license_type": "no_license", "max_line_length": 104, "num_lines": 89, "path": "/src/cuda/level2/neuralnet/include/layer_pool.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#include \"layer_pool.h\"\n\nLayerPool::LayerPool() {\n function_ = \"none\";\n add_bias_ = false;\n pooling_ = \"max\";\n CUDNN_CALL(cudnnCreatePoolingDescriptor(&pool_desc_));\n}\n\nLayerPool::~LayerPool() {\n CUDNN_CALL(cudnnDestroyPoolingDescriptor(pool_desc_));\n}\n\nvoid LayerPool::Init(const mxArray *mx_layer, const Layer *prev_layer) {\n dims_[1] = prev_layer->dims_[1];\n\n if (mexIsField(mx_layer, \"pooling\")) {\n pooling_ = mexGetString(mexGetField(mx_layer, \"pooling\"));\n mexAssertMsg(pooling_ == \"max\" || pooling_ == \"avg\", \"Unknown pooling type\");\n }\n mexAssertMsg(mexIsField(mx_layer, \"scale\"), \"The 'pool' type layer must contain the 'scale' field\");\n std::vector<ftype> scale = mexGetVector(mexGetField(mx_layer, \"scale\"));\n mexAssertMsg(scale.size() == 2, \"Length of the scale vector and maps dimensionality must coincide\");\n for (size_t i = 0; i < 2; ++i) {\n mexAssertMsg(1 <= scale[i] && scale[i] <= prev_layer->dims_[i+2],\n \"Scale on the 's' layer must be in the range [1, previous_layer_mapsize]\");\n scale_[i] = (int) scale[i];\n }\n mexAssertMsg(mexIsField(mx_layer, \"stride\"), \"The 'pool' type layer must contain the 'stride' field\");\n for (size_t i = 0; i < 2; ++i) {\n mexAssertMsg(1 <= stride_[i] && stride_[i] <= prev_layer->dims_[i+2],\n \"Stride on the 'pool' layer must be in the range [1, previous_layer_mapsize]\");\n }\n // setting CUDNN parameters\n cudnnPoolingMode_t mode;\n if (pooling_ == \"max\") {\n mode = CUDNN_POOLING_MAX;\n } else if (pooling_ == \"avg\") {\n mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;\n }\n cudnnNanPropagation_t nan_prop_mode = CUDNN_PROPAGATE_NAN;\n CUDNN_CALL(cudnnSetPooling2dDescriptor(\n pool_desc_, mode, nan_prop_mode, scale_[0], scale_[1],\n padding_[0], padding_[1], stride_[0], stride_[1]\n ));\n for (size_t i = 0; i < 2; ++i) {\n dims_[i+2] = 1 + (prev_layer->dims_[i+2] + 2*padding_[i] - scale_[i]) / stride_[i];\n }\n}\n\nvoid LayerPool::TransformForward(Layer *prev_layer, PassNum passnum) {\n if (pooling_ == \"avg\") {\n Pooling(prev_layer->activ_mat_, activ_mat_, pool_desc_);\n } else if (pooling_ == \"max\") {\n if (passnum == PassNum::ForwardTest || passnum == PassNum::Forward) {\n Pooling(prev_layer->activ_mat_, activ_mat_, pool_desc_);\n } else if (passnum == PassNum::ForwardLinear) {\n // use deriv_mat_'s to identify which locations to propagate to activ_mat_\n PoolingUndo(prev_layer->deriv_mat_, deriv_mat_,\n activ_mat_, prev_layer->activ_mat_,\n pool_desc_, false);\n }\n }\n}\n\nvoid LayerPool::TransformBackward(Layer *prev_layer) {\n PoolingUndo(prev_layer->activ_mat_, activ_mat_,\n deriv_mat_, prev_layer->deriv_mat_,\n pool_desc_, true);\n}\n" }, { "alpha_fraction": 0.5511669516563416, "alphanum_fraction": 0.5780969262123108, "avg_line_length": 34.870967864990234, "blob_id": "f64bdc66a98bd97e008be41267555bbd56a8194b", "content_id": "51adb2e12fbcabaa3211d644371bdbe07edce6c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1114, "license_type": "no_license", "max_line_length": 106, "num_lines": 31, "path": "/data/gemm/datagen.py", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom optparse import OptionParser\nimport random\nimport sys\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option('-k', '--kib', type=\"int\", default=1, help='Length of square matrix (in Kibibytes)')\n (options, args) = parser.parse_args()\n\n # check options\n if options.kib <= 0:\n print(\"Error: Data size must be positive.\")\n sys.exit()\n\n n = options.kib * 1024 // 4\n print(\"Generating matrix with dimensions %dx%d\" % (n, n))\n\n # fill method in gemm.cpp uses this value to generate elements\n maxi = 31\n\n random.seed(datetime.now())\n with open('gemm_%d' % options.kib, 'w') as f:\n # write header line\n f.write('%d\\n' % options.kib)\n # number of floats\n for i in range(n*n):\n val_a = ((random.randint(0, sys.maxsize) % (maxi*2+1))-maxi)/(maxi+1.0)\n val_b = ((random.randint(0, sys.maxsize) % (maxi*2+1))-maxi)/(maxi+1.0)\n val_c = ((random.randint(0, sys.maxsize) % (maxi*2+1))-maxi)/(maxi+1.0)\n f.write('%0.4f %0.4f %0.4f\\n' % (val_a, val_b, val_c))\n\n\n" }, { "alpha_fraction": 0.48496562242507935, "alphanum_fraction": 0.4906572103500366, "avg_line_length": 46.74871826171875, "blob_id": "0f03a47d9b7a78c77b4e5b9e4f779eff6366c77d", "content_id": "c8f832c0ad624b6ecc114481e62afcbfba670e2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9312, "license_type": "no_license", "max_line_length": 95, "num_lines": 195, "path": "/src/cuda/level2/kmeans/kmeans_clustering.cpp", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*****************************************************************************/\n/*IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. */\n/*By downloading, copying, installing or using the software you agree */\n/*to this license. If you do not agree to this license, do not download, */\n/*install, copy or use the software. */\n/* */\n/* */\n/*Copyright (c) 2005 Northwestern University */\n/*All rights reserved. */\n\n/*Redistribution of the software in source and binary forms, */\n/*with or without modification, is permitted provided that the */\n/*following conditions are met: */\n/* */\n/*1 Redistributions of source code must retain the above copyright */\n/* notice, this list of conditions and the following disclaimer. */\n/* */\n/*2 Redistributions in binary form must reproduce the above copyright */\n/* notice, this list of conditions and the following disclaimer in the */\n/* documentation and/or other materials provided with the distribution.*/ \n/* */\n/*3 Neither the name of Northwestern University nor the names of its */\n/* contributors may be used to endorse or promote products derived */\n/* from this software without specific prior written permission. */\n/* */\n/*THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS */\n/*IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */\n/*TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT AND */\n/*FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL */\n/*NORTHWESTERN UNIVERSITY OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, */\n/*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */\n/*(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR */\n/*SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) */\n/*HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, */\n/*STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */\n/*ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */\n/*POSSIBILITY OF SUCH DAMAGE. */\n/******************************************************************************/\n\n/*************************************************************************/\n/** File: kmeans_clustering.c **/\n/** Description: Implementation of regular k-means clustering **/\n/** algorithm **/\n/** Author: Wei-keng Liao **/\n/** ECE Department, Northwestern University **/\n/** email: [email protected] **/\n/** **/\n/** Edited by: Jay Pisharath **/\n/** Northwestern University. **/\n/** **/\n/** ================================================================ **/\n/**\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t**/\n/** Edited by: Shuai Che, David Tarjan, Sang-Ha Lee\t\t\t\t\t**/\n/**\t\t\t\t University of Virginia\t\t\t\t\t\t\t\t\t**/\n/**\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t**/\n/** Description:\tNo longer supports fuzzy c-means clustering;\t \t**/\n/**\t\t\t\t\tonly regular k-means clustering.\t\t\t\t\t**/\n/**\t\t\t\t\tNo longer performs \"validity\" function to analyze\t**/\n/**\t\t\t\t\tcompactness and separation crietria; instead\t\t**/\n/**\t\t\t\t\tcalculate root mean squared error.\t\t\t\t\t**/\n/** **/\n/*************************************************************************/\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <float.h>\n#include <math.h>\n#include <omp.h>\n\n#include \"kmeans.h\"\n#include \"kmeans_cuda.h\"\n\n#define RANDOM_MAX 2147483647\n\nextern double wtime(void);\n\n/*----< kmeans_clustering() >---------------------------------------------*/\nfloat** kmeans_clustering(float **feature, /* in: [npoints][nfeatures] */\n int nfeatures,\n int npoints,\n int nclusters,\n float threshold,\n int *membership,\n ResultDatabase &resultDB,\n bool quiet) /* out: [npoints] */\n{ \n int i, j, n = 0;\t\t\t\t/* counters */\n\tint\t\t loop=0, temp;\n int *new_centers_len;\t/* [nclusters]: no. of points in each cluster */\n float delta;\t\t\t\t/* if the point moved */\n float **clusters;\t\t\t/* out: [nclusters][nfeatures] */\n float **new_centers;\t\t/* [nclusters][nfeatures] */\n\n\tint *initial;\t\t\t/* used to hold the index of points not yet selected\n\t\t\t\t\t\t\t\t prevents the \"birthday problem\" of dual selection (?)\n\t\t\t\t\t\t\t\t considered holding initial cluster indices, but changed due to\n\t\t\t\t\t\t\t\t possible, though unlikely, infinite loops */\n\tint initial_points;\n\tint\t\t c = 0;\n\n\t/* nclusters should never be > npoints\n\t that would guarantee a cluster without points */\n\tif (nclusters > npoints) {\n\t\tnclusters = npoints;\n }\n\n /* allocate space for and initialize returning variable clusters[] */\n clusters = (float**) malloc(nclusters * sizeof(float*));\n clusters[0] = (float*) malloc(nclusters * nfeatures * sizeof(float));\n for (i=1; i<nclusters; i++)\n clusters[i] = clusters[i-1] + nfeatures;\n\n\t/* initialize the random clusters */\n\tinitial = (int *) malloc (npoints * sizeof(int));\n\tfor (i = 0; i < npoints; i++) {\n\t\tinitial[i] = i;\n\t}\n\tinitial_points = npoints;\n\n /* randomly pick cluster centers */\n for (i=0; i<nclusters && initial_points >= 0; i++) {\n\t\t//n = (int)rand() % initial_points;\t\t\n for (j=0; j<nfeatures; j++)\n clusters[i][j] = feature[initial[n]][j];\t// remapped\n\t\t/* swap the selected index to the end (not really necessary,\n\t\t could just move the end up) */\n\t\ttemp = initial[n];\n\t\tinitial[n] = initial[initial_points-1];\n\t\tinitial[initial_points-1] = temp;\n\t\tinitial_points--;\n\t\tn++;\n }\n\n\t/* initialize the membership to -1 for all */\n for (i=0; i < npoints; i++)\n\t membership[i] = -1;\n\n /* allocate space for and initialize new_centers_len and new_centers */\n new_centers_len = (int*) calloc(nclusters, sizeof(int));\n\n new_centers = (float**) malloc(nclusters * sizeof(float*));\n new_centers[0] = (float*) calloc(nclusters * nfeatures, sizeof(float));\n for (i=1; i<nclusters; i++)\n new_centers[i] = new_centers[i-1] + nfeatures;\n\n\t/* iterate until convergence */\n do {\n delta = 0.0;\n // CUDA\n double transferTime = 0.;\n double kernelTime = 0;\n delta = (float) kmeansCuda(\n feature,\t\t/* in: [npoints][nfeatures] */\n nfeatures,\t\t/* number of attributes for each point */\n npoints,\t\t/* number of data points */\n nclusters,\t\t/* number of clusters */\n membership,\t\t/* which cluster the point belongs to */\n clusters,\t\t/* out: [nclusters][nfeatures] */\n new_centers_len,/* out: number of points in each cluster */\n new_centers,\t/* sum of points in each cluster */\n transferTime,\n kernelTime,\n resultDB);\n \n char tmp[32];\n sprintf(tmp, \"%dpoints,%dfeatures\", npoints, nfeatures);\n string atts = string(tmp);\n resultDB.AddResult(\"KMeans-TransferTime\", atts, \"sec/iteration\", transferTime);\n resultDB.AddResult(\"KMeans-KernelTime\", atts, \"sec/iteration\", kernelTime);\n resultDB.AddResult(\"KMeans-TotalTime\", atts, \"sec/iteration\", transferTime+kernelTime);\n resultDB.AddResult(\"KMeans-Rate_Parity\", atts, \"N\", transferTime/kernelTime);\n resultDB.AddOverall(\"Time\", \"sec\", kernelTime+transferTime);\n\n\t\t/* replace old cluster centers with new_centers */\n\t\t/* CPU side of reduction */\n\t\tfor (i=0; i<nclusters; i++) {\n\t\t\tfor (j=0; j<nfeatures; j++) {\n\t\t\t\tif (new_centers_len[i] > 0)\n\t\t\t\t\tclusters[i][j] = new_centers[i][j] / new_centers_len[i];\t/* take average i.e. sum/n */\n\t\t\t\tnew_centers[i][j] = 0.0;\t/* set back to 0 */\n\t\t\t}\n\t\t\tnew_centers_len[i] = 0;\t\t\t/* set back to 0 */\n\t\t}\t \n\t\tc++;\n } while ((delta > threshold) && (loop++ < 500));\t/* makes sure loop terminates */\n if(!quiet) {\n printf(\"Iterated %d times\\n\", c);\n }\n\n free(new_centers[0]);\n free(new_centers);\n free(new_centers_len);\n\n return clusters;\n}\n\n" }, { "alpha_fraction": 0.711155354976654, "alphanum_fraction": 0.7236055731773376, "avg_line_length": 28.52941131591797, "blob_id": "dc116d33b29fa0180547b0df9fa0bfd5a0da17d2", "content_id": "8ac1037c18b037a9060208df6a7d0d7f2de97665", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2008, "license_type": "no_license", "max_line_length": 77, "num_lines": 68, "path": "/src/cuda/level2/neuralnet/include/settings.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n\n*/\n\n#ifndef _SETTINGS_H_\n#define _SETTINGS_H_\n\n// N - number, C - channel, H - height, W - width\n// in the layout code first is slowest, last is fastest\n// order = false -> size1 is fastest, corresponds to CWHN (AlexNet) layout\n// order = true -> size2 is fastest, corresponds to NCHW (CuDNN) layout\n\n// indicates the layout of containers and maps inside them, fed from Matlab\nstatic const bool kExternalOrder = true;\n\n// indicates how containers and maps inside them are stored in toolbox memory\nstatic const bool kInternalOrder = true;\n\n// they are preferred to match, otherwise a lot of reordering is required\n\n// PRECISION = 1 -> float\n// PRECISION = 2 -> double, but it has not been tested\n#define PRECISION 1\n\n#if PRECISION == 1\n typedef float ftype;\n #define MEX_CLASS mxSINGLE_CLASS\n #define CUDNN_TYPE CUDNN_DATA_FLOAT\n#elif PRECISION == 2\n typedef double ftype;\n #define MEX_CLASS mxDOUBLE_CLASS\n #define CUDNN_TYPE CUDNN_DATA_DOUBLE\n#endif\n\n#define CUDNN_LAYOUT CUDNN_TENSOR_NCHW\n\n#define PRECISION_EPS 1e-6\nstatic const ftype kEps = (ftype) PRECISION_EPS;\n\nstatic const ftype kPi = (ftype) 3.141592654;\n\n#ifndef MIN\n #define MIN(a, b) ((a) > (b) ? (b) : (a))\n#endif\n#ifndef MAX\n #define MAX(a, b) ((a) > (b) ? (a) : (b))\n#endif\n#ifndef DIVUP\n #define DIVUP(x, y) (((x) + (y) - 1) / (y))\n#endif\n\n#endif\n" }, { "alpha_fraction": 0.7497656941413879, "alphanum_fraction": 0.7544517517089844, "avg_line_length": 28.63888931274414, "blob_id": "8f8da55fd4b7319d754fedb72e0df608f04709ec", "content_id": "e2cebacd104b4959c9b81955c4eaf93200b58b6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1067, "license_type": "no_license", "max_line_length": 69, "num_lines": 36, "path": "/src/cuda/level2/neuralnet/include/layer_full.h", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2016 Sergey Demyanov.\ncontact: [email protected]\nhttp://www.demyanov.net\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n*/\n\n#ifndef _LAYER_FULL_H_\n#define _LAYER_FULL_H_\n\n#include \"layer.h\"\n\nclass LayerFull : public Layer {\n\npublic:\n LayerFull();\n ~LayerFull() {};\n void Init(const mxArray *mx_layer, const Layer *prev_layer);\n void TransformForward(Layer *prev_layer, PassNum passnum);\n void TransformBackward(Layer *prev_layer);\n void WeightGrads(Layer *prev_layer, GradInd gradind);\n};\n\n#endif\n" }, { "alpha_fraction": 0.7265306115150452, "alphanum_fraction": 0.7265306115150452, "avg_line_length": 24.34482765197754, "blob_id": "1d31a81e3da834fdbbc57599fb5b6a7c68854352", "content_id": "dc37202b28f448fb10ef350a9ba96c3978d21850", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 735, "license_type": "no_license", "max_line_length": 66, "num_lines": 29, "path": "/src/cuda/level2/particlefilter/Makefile.am", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "include $(top_builddir)/config/config.mk\ninclude $(top_builddir)/config/targets.mk\n\n# How to find source files\nVPATH = $(srcdir):$(srcdir)/../../common:$(srcdir)/../../../common\n\nAM_LDFLAGS = $(CUDA_LDFLAGS)\nAM_CPPFLAGS = $(CUDA_INC)\n\n# What is the destination for programs built from this directory?\ncudadir = $(bindir)/CUDA\n\n# What programs should be installed to that destination?\ncuda_PROGRAMS = particlefilter_naive particlefilter_float\n\n# How to build those programs?\nparticlefilter_naive_SOURCES = \\\nmain.cpp\n\nparticlefilter_naive_LDADD = \\\nex_particle_CUDA_naive_seq.o \\\n$(CUDA_LIBS) $(LIBS) -lm\n\nparticlefilter_float_SOURCES = \\\nmain.cpp\n\nparticlefilter_float_LDADD = \\\nex_particle_CUDA_float_seq.o \\\n$(CUDA_LIBS) $(LIBS) -lm\n" }, { "alpha_fraction": 0.8472222089767456, "alphanum_fraction": 0.8611111044883728, "avg_line_length": 71, "blob_id": "a84992a22a7f7addf909c3cb05077c7d023a7578", "content_id": "a827db5db67ae0e3deb8098544c53239863b8d9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 72, "license_type": "no_license", "max_line_length": 71, "num_lines": 1, "path": "/src/cuda/level2/Makefile.am", "repo_name": "sarahgrace/mirovia", "src_encoding": "UTF-8", "text": "SUBDIRS=cfd dwt2d kmeans lavamd mandelbrot nw particlefilter srad where\n" } ]
55
7474/MackerelAlertToAwsIot
https://github.com/7474/MackerelAlertToAwsIot
37357be60476cb4403e437f2f5296f86bbeec473
888a36495e52d3ff923d26e11d287f906c95376a
0b653e33189d8829c91e85c2aa225584b5101bf5
refs/heads/master
2021-08-17T07:02:03.795561
2020-01-24T23:29:35
2020-01-24T23:29:35
229,935,168
0
0
null
2019-12-24T12:08:13
2020-01-24T23:29:39
2021-08-02T20:20:12
Python
[ { "alpha_fraction": 0.5673925876617432, "alphanum_fraction": 0.5685697197914124, "avg_line_length": 34.39583206176758, "blob_id": "df58918c23ea12f55f26b01769baf869899ab240", "content_id": "4093ff6e17e8f660a990faeb77c3e51e48a817e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1699, "license_type": "no_license", "max_line_length": 121, "num_lines": 48, "path": "/src/MackerelAlertToAwsIot/Program.cs", "repo_name": "7474/MackerelAlertToAwsIot", "src_encoding": "UTF-8", "text": "using Amazon.CDK;\nusing Microsoft.Extensions.Configuration;\nusing Microsoft.Extensions.Configuration.Json;\nusing System.IO;\nusing System.Reflection;\n\nnamespace MackerelAlertToAwsIot\n{\n sealed class Program\n {\n public static void Main(string[] args)\n {\n var config = new ConfigurationBuilder()\n .Add(new JsonConfigurationSource() { Path = @\"config.json\", })\n .Build();\n var env = new Environment();\n var app = new App();\n\n var mackerelAlertBridgeProps = LoadConfig(config, env, new MackerelAlertBridgeProps());\n var mackerelAlertBridge = new MackerelAlertBridgeStack(app, \"MackerelAlertBridge\", mackerelAlertBridgeProps);\n\n var mackerelAlertLampProps = LoadConfig(config, env, new MackerelAlertLampProps());\n var mackerelAlertLamp = new MackerelAlertLampStack(app, \"MackerelAlertLamp\", mackerelAlertLampProps);\n\n app.Synth();\n }\n\n private static T LoadConfig<T>(IConfiguration config, IEnvironment env, T props) where T : StackProps\n {\n props.Env = env;\n var section = config.GetSection(typeof(T).Name);\n foreach (var p in typeof(T).GetProperties())\n {\n object value = section[p.Name];\n if (value == null)\n {\n value = section.GetSection(p.Name)?.Get(p.PropertyType);\n }\n System.Console.Out.WriteLine(\"{0}: {1}\", p.Name, value);\n if (value != null)\n {\n p.SetValue(props, value);\n }\n }\n return props;\n }\n }\n}\n" }, { "alpha_fraction": 0.7174603343009949, "alphanum_fraction": 0.7174603343009949, "avg_line_length": 34, "blob_id": "61c4221b12e7cc776b93c7d2dc6c600ec29462eb", "content_id": "3424f3303f951a28ff1e669bea3527a45846003b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 630, "license_type": "no_license", "max_line_length": 229, "num_lines": 18, "path": "/README.md", "repo_name": "7474/MackerelAlertToAwsIot", "src_encoding": "UTF-8", "text": "# MackerelAlertToAwsIot\n\nThis [AWS IoT Greengrass](https://aws.amazon.com/greengrass/) Solution will light up the LED when it receives an alert notification from [Mackerel's EvenBridge channel](https://mackerel.io/ja/docs/entry/howto/alerts/eventbridge).\n\n## Setup\n\nT.B.D.\n\n----\n\n## Useful commands\n\n* `dotnet build src` compile this app\n* `cdk ls` list all stacks in the app\n* `cdk synth` emits the synthesized CloudFormation template\n* `cdk deploy` deploy this stack to your default AWS account/region\n* `cdk diff` compare deployed stack with current state\n* `cdk docs` open CDK documentation\n" }, { "alpha_fraction": 0.5839080214500427, "alphanum_fraction": 0.5862069129943848, "avg_line_length": 21.894737243652344, "blob_id": "6da282a260b3ce1b6b031faf53636e50681dd6e5", "content_id": "1bc5f1e512d1cccaec779567bb93c64f459e5eba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 477, "license_type": "no_license", "max_line_length": 61, "num_lines": 19, "path": "/deploy/reset-greengrass-deploy.js", "repo_name": "7474/MackerelAlertToAwsIot", "src_encoding": "UTF-8", "text": "var AWS = require(\"aws-sdk\");\nvar greengrass = new AWS.Greengrass();\n\n(async () => {\n try {\n // 当面複数グループを作らないので先頭を使う。\n const groups = await greengrass.listGroups({}).promise();\n const groupId = groups.Groups[0].Id;\n const reset = await greengrass\n .resetDeployments({\n GroupId: groupId,\n Force: true\n })\n .promise();\n console.log(reset);\n } catch (err) {\n console.error(err);\n }\n})();\n" }, { "alpha_fraction": 0.672797679901123, "alphanum_fraction": 0.6795740723609924, "avg_line_length": 22.5, "blob_id": "b8ef66d170b8b97e7619a00ba1d3fda55f6d738c", "content_id": "2e644024ed63b4f6343183c6bce5741c9c7bc409", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1033, "license_type": "no_license", "max_line_length": 64, "num_lines": 44, "path": "/handlers/device/ToggleGpio.py", "repo_name": "7474/MackerelAlertToAwsIot", "src_encoding": "UTF-8", "text": "import os\nimport greengrasssdk\nimport json\nimport sys\nimport logging\n\n# Setup logging to stdout\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\niot_client = greengrasssdk.client('iot-data')\n\nthingName = os.environ['AWS_IOT_THING_NAME']\n\ndef get_read_topic(gpio_num):\n return '/'.join(['gpio', thingName, str(gpio_num), 'read'])\n\ndef get_write_topic(gpio_num):\n return '/'.join(['gpio', thingName, str(gpio_num), 'write'])\n\ndef send_message_to_connector(topic, message=''):\n iot_client.publish(topic=topic, payload=str(message))\n\ndef set_gpio_state(gpio, state):\n send_message_to_connector(get_write_topic(gpio), str(state))\n\ndef read_gpio_state(gpio):\n send_message_to_connector(get_read_topic(gpio))\n\ndef handler(event, context):\n logger.info(\"Received message!\")\n logger.info(event)\n logger.info(type(event))\n\n # event\n # 1 : button off\n # 0 : button on\n\n state = 0\n if(event == 0):\n state = 1\n set_gpio_state(11, state)\n\n return" }, { "alpha_fraction": 0.5844785571098328, "alphanum_fraction": 0.5941794514656067, "avg_line_length": 32.43243408203125, "blob_id": "64e928912afed70f7f703e663109441183117044", "content_id": "64850f1ebbdd8753516a1d8924e86bf08faf0f85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1363, "license_type": "no_license", "max_line_length": 126, "num_lines": 37, "path": "/src/MackerelAlertToAwsIot/MackerelAlertBridge.cs", "repo_name": "7474/MackerelAlertToAwsIot", "src_encoding": "UTF-8", "text": "using Amazon.CDK;\nusing Amazon.CDK.AWS.Events;\nusing Amazon.CDK.AWS.IAM;\n\nnamespace MackerelAlertToAwsIot\n{\n public class MackerelAlertBridgeProps : StackProps\n {\n public string StsExternalId { get; set; }\n }\n\n // Mackerel と直接関連するリソースを管理する。\n // スタック名はEventBridge通知チャンネル関連のリソースを作ろうとしていた時の名残。\n // 特にBridge成分は残っていない。\n public class MackerelAlertBridgeStack : Stack\n {\n internal MackerelAlertBridgeStack(Construct scope, string id, MackerelAlertBridgeProps props) : base(scope, id, props)\n {\n // Ref: https://mackerel.io/ja/docs/entry/integrations/aws\n var integrationRole = new Role(this, \"IntegrationRole\", new RoleProps()\n {\n AssumedBy = new AccountPrincipal(\"217452466226\"),\n ExternalIds = new string[]{\n props.StsExternalId,\n },\n ManagedPolicies = new IManagedPolicy[]\n {\n ManagedPolicy.FromAwsManagedPolicyName(\"AWSLambdaReadOnlyAccess\"),\n },\n });\n new CfnOutput(this, \"IntegrationRoleArn\", new CfnOutputProps()\n {\n Value = integrationRole.RoleArn\n });\n }\n }\n}\n" }, { "alpha_fraction": 0.47653570771217346, "alphanum_fraction": 0.48159387707710266, "avg_line_length": 43.48249816894531, "blob_id": "232f4ffd2e6885aac7be2e9468cd92ba089bfeba", "content_id": "5621623a61ee906cb7f165d93ef8fb3b291644b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 18141, "license_type": "no_license", "max_line_length": 186, "num_lines": 400, "path": "/src/MackerelAlertToAwsIot/MackerelAlertLamp.cs", "repo_name": "7474/MackerelAlertToAwsIot", "src_encoding": "UTF-8", "text": "using Amazon.CDK;\nusing Amazon.CDK.AWS.Events;\nusing Amazon.CDK.AWS.Events.Targets;\nusing Amazon.CDK.AWS.Greengrass;\nusing Amazon.CDK.AWS.IAM;\nusing Amazon.CDK.AWS.IoT;\nusing Amazon.CDK.AWS.Lambda;\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\n\nnamespace MackerelAlertToAwsIot\n{\n public class MackerelAlertLampProps : StackProps\n {\n public string OrganizationName { get; set; }\n public string EventName { get; set; }\n public string[] ThingCerts { get; set; }\n }\n\n // MackerelのEventBridge通知チャンネルへのアラート通知を受けたら、\n // Greengrassデバイスに接続されているLampを点灯させるアプリケーションのスタック。\n public class MackerelAlertLampStack : Stack\n {\n internal MackerelAlertLampStack(Construct scope, string id, MackerelAlertLampProps props) : base(scope, id, props)\n {\n System.Console.Out.WriteLine(string.Join(\",\", props.ThingCerts));\n\n // MackerelのEventBridge通知チャンネル先のパートナーイベントソース名\n // リソース名に使用するのでここで構築しておく。\n var eventSourceName = $\"aws.partner/mackerel.io/{props.OrganizationName}/{props.EventName}\";\n\n var thingPolicy = new Amazon.CDK.AWS.IoT.CfnPolicy(this, \"MackerelAlertLampThingPoilcy\", new Amazon.CDK.AWS.IoT.CfnPolicyProps()\n {\n PolicyName = \"MackerelAlertLampThingPoilcy\",\n PolicyDocument = new Dictionary<string, object>\n {\n [\"Version\"] = \"2012-10-17\",\n [\"Statement\"] = new object[] {\n new Dictionary<string, object>\n {\n [\"Effect\"] = \"Allow\",\n [\"Action\"] = new string[] {\n \"iot:*\",\n \"greengrass:*\",\n },\n [\"Resource\"] = new string[] {\n \"*\"\n },\n }\n }\n }\n });\n\n // IDなどに使うために証明書のARNを加工しておく。\n var certs = props.ThingCerts\n .Select(x => new\n {\n Arn = x,\n Hash = Utils.ToHash(x),\n }).ToList();\n var certAttaches = certs.Select(x =>\n {\n var attach = new CfnPolicyPrincipalAttachment(this, \"MackerelAlertLampCertAttach-\" + x.Hash, new CfnPolicyPrincipalAttachmentProps()\n {\n PolicyName = thingPolicy.PolicyName,\n Principal = x.Arn,\n });\n attach.AddDependsOn(thingPolicy);\n return attach;\n }).ToList();\n\n var things = certs.Select(x => new\n {\n CertArn = x.Arn,\n Thing = new CfnThing(this, \"MackerelAlertLampThing-\" + x.Hash, new CfnThingProps()\n {\n ThingName = \"MackerelAlertLamp-\" + x.Hash,\n })\n }).ToList();\n var thingAttaches = things.Select(x =>\n {\n var attach = new CfnThingPrincipalAttachment(this, x.Thing.ThingName + \"Attach\", new CfnThingPrincipalAttachmentProps()\n {\n ThingName = x.Thing.ThingName,\n Principal = x.CertArn,\n });\n attach.AddDependsOn(x.Thing);\n return attach;\n }).ToList();\n\n var cloudReceiveAlertFunction = new Function(this, \"CloudReceiveAlert\", new FunctionProps()\n {\n Runtime = Runtime.PYTHON_3_7,\n Code = Code.FromAsset(\"handlers/cloud\"),\n Handler = \"ReceiveAlert.handler\",\n Environment = new Dictionary<string, string>()\n {\n [\"MACKEREL_ALERT_TOPIC\"] = eventSourceName,\n },\n });\n cloudReceiveAlertFunction.AddToRolePolicy(new PolicyStatement(new PolicyStatementProps()\n {\n Actions = new string[]\n {\n \"iot:Publish\",\n },\n Resources = new string[]\n {\n \"*\",\n },\n }));\n\n var ggLambda = new Function(this, \"DeviceReceiveAlert\", new FunctionProps()\n {\n Runtime = Runtime.PYTHON_3_7,\n Code = Code.FromAsset(\"handlers/device\"),\n Handler = \"ReceiveAlert.handler\",\n });\n var ggLambdaVersion = ggLambda.AddVersion(\"v1\");\n var ggLambdaAlias = new Alias(this, \"DeviceReceiveAlertAlias\", new AliasProps()\n {\n AliasName = \"v\" + ggLambdaVersion.Version,\n Version = ggLambdaVersion,\n });\n\n var toggleGpio = new Function(this, \"DeviceToggleGpio\", new FunctionProps()\n {\n Runtime = Runtime.PYTHON_3_7,\n Code = Code.FromAsset(\"handlers/device\"),\n Handler = \"ToggleGpio.handler\",\n });\n var toggleGpioVersion = toggleGpio.AddVersion(\"v1\");\n var toggleGpioAlias = new Alias(this, \"DeviceToggleGpioAlias\", new AliasProps()\n {\n AliasName = \"v\" + toggleGpioVersion.Version,\n Version = toggleGpioVersion,\n });\n\n var ggCoreId = 0;\n var ggCore = new CfnCoreDefinition(this, \"MackerelAlertLampCore\", new CfnCoreDefinitionProps()\n {\n Name = \"MackerelAlertLampCore\",\n InitialVersion = new CfnCoreDefinition.CoreDefinitionVersionProperty()\n {\n Cores = things.Select(x => new CfnCoreDefinition.CoreProperty()\n {\n Id = (++ggCoreId).ToString(),\n CertificateArn = x.CertArn,\n // XXX ARN参照できないの?\n //ThingArn = x.Thing.GetAtt(\"Arn\").Reference.ToString(),\n //ThingArn = x.Thing.GetAtt(\"resource.arn\").Reference.ToString(),\n ThingArn = $\"arn:aws:iot:{this.Region}:{this.Account}:thing/{x.Thing.ThingName}\",\n }).ToArray(),\n }\n });\n things.ForEach(x =>\n {\n ggCore.AddDependsOn(x.Thing);\n });\n\n var gpioRw = new CfnResourceDefinition.ResourceInstanceProperty()\n {\n Id = \"gpio-rw\",\n Name = \"RaspberryPiGpioRw\",\n ResourceDataContainer = new CfnResourceDefinition.ResourceDataContainerProperty()\n {\n LocalDeviceResourceData = new CfnResourceDefinition.LocalDeviceResourceDataProperty()\n {\n SourcePath = \"/dev/gpiomem\",\n GroupOwnerSetting = new CfnResourceDefinition.GroupOwnerSettingProperty()\n {\n AutoAddGroupOwner = true,\n },\n }\n },\n };\n var ggResource = new CfnResourceDefinition(this, \"MackerelAlertLampResource\", new CfnResourceDefinitionProps()\n {\n Name = \"MackerelAlertLampResource\",\n InitialVersion = new CfnResourceDefinition.ResourceDefinitionVersionProperty()\n {\n Resources = new CfnResourceDefinition.ResourceInstanceProperty[]\n {\n gpioRw,\n }\n },\n });\n\n var ggFunction = new CfnFunctionDefinition(this, \"MackerelAlertLampFunction\", new CfnFunctionDefinitionProps()\n {\n Name = \"MackerelAlertLampFunction\",\n InitialVersion = new CfnFunctionDefinition.FunctionDefinitionVersionProperty()\n {\n Functions = new CfnFunctionDefinition.FunctionProperty[]\n {\n new CfnFunctionDefinition.FunctionProperty(){\n Id = ggLambda.FunctionName + \"-\" + ggLambdaAlias.AliasName,\n FunctionArn = ggLambdaAlias.FunctionArn,\n FunctionConfiguration = new CfnFunctionDefinition.FunctionConfigurationProperty()\n {\n // MemorySize と Timeout は必須である様子\n MemorySize = 65535,\n Timeout = 10, // 秒\n },\n },\n new CfnFunctionDefinition.FunctionProperty(){\n Id = toggleGpio.FunctionName + \"-\" + toggleGpioAlias.AliasName,\n FunctionArn = toggleGpioAlias.FunctionArn,\n FunctionConfiguration = new CfnFunctionDefinition.FunctionConfigurationProperty()\n {\n // MemorySize と Timeout は必須である様子\n MemorySize = 65535,\n Timeout = 10, // 秒\n },\n },\n },\n },\n });\n\n // https://docs.aws.amazon.com/ja_jp/greengrass/latest/developerguide/raspberrypi-gpio-connector.html\n var gpioConnector = new CfnConnectorDefinition.ConnectorProperty()\n {\n Id = \"gpio-connector\",\n ConnectorArn = $\"arn:aws:greengrass:{this.Region}::/connectors/RaspberryPiGPIO/versions/1\",\n Parameters = new Dictionary<string, object>()\n {\n [\"GpioMem-ResourceId\"] = gpioRw.Id,\n //[\"InputGpios\"] = \"5,6U,7D\",\n //[\"InputPollPeriod\"] = 50,\n // 10, 9, 11番は配置連続しているのでとりあえずそれを使う\n [\"OutputGpios\"] = \"9L,10L,11L\",\n }\n };\n var ggConnector = new CfnConnectorDefinition(this, \"MackerelAlertLampConnector\", new CfnConnectorDefinitionProps()\n {\n Name = \"MackerelAlertLampConnector\",\n InitialVersion = new CfnConnectorDefinition.ConnectorDefinitionVersionProperty()\n {\n Connectors = new CfnConnectorDefinition.ConnectorProperty[]{\n gpioConnector,\n },\n }\n });\n\n var ggSubscriptions = new CfnSubscriptionDefinition.SubscriptionProperty[]\n {\n // ReceiveAlert Cloud to Device\n new CfnSubscriptionDefinition.SubscriptionProperty()\n {\n Id = \"mackerel-alert-to-device\",\n Source = \"cloud\",\n Target = ggLambdaAlias.FunctionArn,\n Subject = eventSourceName,\n },\n new CfnSubscriptionDefinition.SubscriptionProperty()\n {\n Id = \"mackerel-alert-gpio-write-11\",\n Source = ggLambdaAlias.FunctionArn,\n Target = gpioConnector.ConnectorArn,\n Subject =\"gpio/+/11/write\",\n },\n // XXX Currently, when you create a subscription that uses the Raspberry Pi GPIO connector, you must specify a value for at least one of the + wildcards in the topic.\n new CfnSubscriptionDefinition.SubscriptionProperty()\n {\n Id = \"gpio-read\",\n Source = \"cloud\",\n Target = gpioConnector.ConnectorArn,\n Subject =\"gpio/+/9/read\",\n },\n new CfnSubscriptionDefinition.SubscriptionProperty()\n {\n Id = \"gpio-write\",\n Source = \"cloud\",\n Target = gpioConnector.ConnectorArn,\n Subject =\"gpio/+/9/write\",\n },\n new CfnSubscriptionDefinition.SubscriptionProperty()\n {\n Id = \"gpio-state\",\n Source = gpioConnector.ConnectorArn,\n Target = \"cloud\",\n Subject =\"gpio/+/9/state\",\n },\n new CfnSubscriptionDefinition.SubscriptionProperty()\n {\n Id = \"gpio-error\",\n Source = gpioConnector.ConnectorArn,\n Target = \"cloud\",\n Subject =\"gpio/+/error\",\n },\n //\n new CfnSubscriptionDefinition.SubscriptionProperty()\n {\n Id = \"gpio-read-10\",\n Source = \"cloud\",\n Target = gpioConnector.ConnectorArn,\n Subject =\"gpio/+/10/read\",\n },\n new CfnSubscriptionDefinition.SubscriptionProperty()\n {\n Id = \"gpio-write-10\",\n Source = \"cloud\",\n Target = gpioConnector.ConnectorArn,\n Subject =\"gpio/+/10/write\",\n },\n new CfnSubscriptionDefinition.SubscriptionProperty()\n {\n Id = \"gpio-state-10\",\n Source = gpioConnector.ConnectorArn,\n Target = \"cloud\",\n Subject =\"gpio/+/10/state\",\n },\n new CfnSubscriptionDefinition.SubscriptionProperty()\n {\n Id = \"gpio-read-11\",\n Source = toggleGpioAlias.FunctionArn,\n Target = gpioConnector.ConnectorArn,\n Subject =\"gpio/+/11/read\",\n },\n new CfnSubscriptionDefinition.SubscriptionProperty()\n {\n Id = \"gpio-write-11\",\n Source = toggleGpioAlias.FunctionArn,\n Target = gpioConnector.ConnectorArn,\n Subject =\"gpio/+/11/write\",\n },\n new CfnSubscriptionDefinition.SubscriptionProperty()\n {\n Id = \"gpio-state-11\",\n Source = gpioConnector.ConnectorArn,\n Target = \"cloud\",\n Subject =\"gpio/+/11/state\",\n },\n new CfnSubscriptionDefinition.SubscriptionProperty()\n {\n Id = \"gpio-test\",\n Source = \"cloud\",\n Target = toggleGpioAlias.FunctionArn,\n Subject =\"gpio/test\",\n },\n };\n var ggSubscription = new CfnSubscriptionDefinition(this, \"MackerelAlertLampSubscription\", new CfnSubscriptionDefinitionProps()\n {\n Name = \"MackerelAlertLampSubscription\",\n InitialVersion = new CfnSubscriptionDefinition.SubscriptionDefinitionVersionProperty()\n {\n Subscriptions = ggSubscriptions,\n },\n });\n\n var ggGroup = new Amazon.CDK.AWS.Greengrass.CfnGroup(this, \"MackerelAlertLampGroup\", new Amazon.CDK.AWS.Greengrass.CfnGroupProps()\n {\n Name = \"MackerelAlertLamp\",\n // XXX 引数にする\n RoleArn = \"arn:aws:iam::854403262515:role/service-role/Greengrass_ServiceRole\",\n });\n var ggVersionHash = Utils.ToHash(string.Join(\"-\",\n ggCore.AttrLatestVersionArn,\n ggFunction.AttrLatestVersionArn,\n ggResource.AttrLatestVersionArn,\n ggConnector.AttrLatestVersionArn,\n ggSubscription.AttrLatestVersionArn));\n var ggLatestVersion = new CfnGroupVersion(this, \"MackerelAlertLampGroupVersion-\" + ggVersionHash, new CfnGroupVersionProps()\n {\n GroupId = ggGroup.AttrId,\n CoreDefinitionVersionArn = ggCore.AttrLatestVersionArn,\n FunctionDefinitionVersionArn = ggFunction.AttrLatestVersionArn,\n ResourceDefinitionVersionArn = ggResource.AttrLatestVersionArn,\n ConnectorDefinitionVersionArn = ggConnector.AttrLatestVersionArn,\n SubscriptionDefinitionVersionArn = ggSubscription.AttrLatestVersionArn,\n });\n ggLatestVersion.AddDependsOn(ggGroup);\n ggLatestVersion.AddDependsOn(ggCore);\n ggLatestVersion.AddDependsOn(ggResource);\n ggLatestVersion.AddDependsOn(ggFunction);\n ggLatestVersion.AddDependsOn(ggConnector);\n ggLatestVersion.AddDependsOn(ggSubscription);\n\n var mackerelAlertBus = new EventBus(this, \"mackerel-alert-bus\", new EventBusProps()\n {\n EventSourceName = eventSourceName,\n });\n var mackerelAlertRule = new Rule(this, \"mackerel-alert-rule\", new RuleProps()\n {\n EventBus = mackerelAlertBus,\n EventPattern = new EventPattern()\n {\n Account = new string[]{\n this.Account,\n },\n },\n Targets = new IRuleTarget[] {\n new LambdaFunction(cloudReceiveAlertFunction),\n },\n });\n }\n }\n}\n" }, { "alpha_fraction": 0.6274256110191345, "alphanum_fraction": 0.6300129294395447, "avg_line_length": 25.65517234802246, "blob_id": "d54e20a0becdc39d1d43cd06405aacc7e3096654", "content_id": "f3bb31045458c29f90cb282ca3819b6956b11e8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 893, "license_type": "no_license", "max_line_length": 61, "num_lines": 29, "path": "/deploy/create-greengrass-deploy.js", "repo_name": "7474/MackerelAlertToAwsIot", "src_encoding": "UTF-8", "text": "var AWS = require(\"aws-sdk\");\nvar greengrass = new AWS.Greengrass();\n\n(async () => {\n try {\n // 当面複数グループを作らないので先頭を使う。\n const groups = await greengrass.listGroups({}).promise();\n const groupId = groups.Groups[0].Id;\n const groupVersions = await greengrass\n .listGroupVersions({\n GroupId: groupId\n })\n .promise();\n // 多分先頭が最新なのではなかろうか?\n // sdkのリファレンスには書いていなかった気がする。\n var groupVersion = groupVersions.Versions[0];\n console.log(groupVersions);\n const deployment = await greengrass\n .createDeployment({\n GroupId: groupId,\n GroupVersionId: groupVersion.Version,\n DeploymentType: \"NewDeployment\"\n })\n .promise();\n console.log(deployment);\n } catch (err) {\n console.error(err);\n }\n})();\n" }, { "alpha_fraction": 0.6650717854499817, "alphanum_fraction": 0.6770334839820862, "avg_line_length": 23.58823585510254, "blob_id": "4c076d90415db7a2fcf5a8e689a10d505d26b455", "content_id": "726486f895e961644a32a209e0e75d6697ba685b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 420, "license_type": "no_license", "max_line_length": 108, "num_lines": 17, "path": "/src/MackerelAlertToAwsIot/Utils.cs", "repo_name": "7474/MackerelAlertToAwsIot", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Security.Cryptography;\nusing System.Text;\n\nnamespace MackerelAlertToAwsIot\n{\n class Utils\n {\n private static SHA1 _sha1 = new SHA1CryptoServiceProvider();\n\n public static string ToHash(string value)\n {\n return BitConverter.ToString(_sha1.ComputeHash(Encoding.UTF8.GetBytes(value))).Replace(\"-\", \"\");\n }\n }\n}\n" }, { "alpha_fraction": 0.6766220331192017, "alphanum_fraction": 0.6879505515098572, "avg_line_length": 24.578947067260742, "blob_id": "b46ad0c3be5f68d24bd2b1a350775110fad37653", "content_id": "27d66366f4ef8d16c5959b6e8d8259b4197a8e84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 981, "license_type": "no_license", "max_line_length": 72, "num_lines": 38, "path": "/handlers/device/ReceiveAlert.py", "repo_name": "7474/MackerelAlertToAwsIot", "src_encoding": "UTF-8", "text": "import os\nimport greengrasssdk\nimport json\nimport sys\nimport logging\nimport time\n\n# Setup logging to stdout\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\niot_client = greengrasssdk.client('iot-data')\n\nthingName = os.environ['AWS_IOT_THING_NAME']\n\ndef get_write_topic(gpio_num):\n return '/'.join(['gpio', thingName, str(gpio_num), 'write'])\n\ndef send_message_to_connector(topic, message=''):\n iot_client.publish(topic=topic, payload=str(message))\n\ndef set_gpio_state(gpio, state):\n send_message_to_connector(get_write_topic(gpio), str(state))\n\ndef handler(event, context):\n logger.info(\"Received message!\")\n logger.info(event)\n logger.info(type(event))\n\n status_to_sec = {'ok': 3, 'warning': 6, 'critical': 9, 'unknown': 9}\n taiyo_sec = status_to_sec.get(event['detail']['alert']['status'], 9)\n\n # 太陽拳する\n set_gpio_state(11, 1)\n time.sleep(taiyo_sec)\n set_gpio_state(11, 0)\n\n return" }, { "alpha_fraction": 0.6336088180541992, "alphanum_fraction": 0.641873300075531, "avg_line_length": 18.105262756347656, "blob_id": "08541304e2af152ddf5d0cfe6d9a55f4e1a690a3", "content_id": "225e8a34e2e380fbbbff2766b9d102d1847d2b39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 403, "license_type": "no_license", "max_line_length": 53, "num_lines": 19, "path": "/handlers/cloud/ReceiveAlert.py", "repo_name": "7474/MackerelAlertToAwsIot", "src_encoding": "UTF-8", "text": "import os\nimport json\nimport boto3\n\nprint('Loading function')\n\niot = boto3.client('iot-data')\n\n\ndef handler(event, context):\n # 単にペイロードをデバイスにバイパスする。\n topic = os.environ['MACKEREL_ALERT_TOPIC']\n print('Publish to: ' + topic)\n print(event)\n iot.publish(\n topic=topic,\n qos=0,\n payload=json.dumps(event, ensure_ascii=False)\n )\n" } ]
10
Mukhammadumarmukhammadzoda/weather
https://github.com/Mukhammadumarmukhammadzoda/weather
f70899a3a2065306aac60e5083b90f703eeda4a4
35fcfe9f35f3adb2424be69be14708aea68ec19a
0f82ae5110271047876baaaede07e81904ac6c12
refs/heads/main
2023-08-15T20:34:09.746547
2021-09-19T13:17:30
2021-09-19T13:17:30
408,132,257
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.4955609440803528, "alphanum_fraction": 0.5165455937385559, "avg_line_length": 26.35555648803711, "blob_id": "25f53fb3e24d96e41945ae2979122890dc956e87", "content_id": "2cacb8ba00910b1af55f7d766ad7c501683c270f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1239, "license_type": "no_license", "max_line_length": 92, "num_lines": 45, "path": "/main/views.py", "repo_name": "Mukhammadumarmukhammadzoda/weather", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nimport requests as r\nimport math\nimport json\nfrom django.http import HttpResponse\nimport datetime\nimport calendar\n# Create your views here.\n#Api getting part\n\n\n\n\ndef index(request):\n dt = datetime.datetime.today()\n date = dt.day\n month = dt.month\n m = calendar.month_name[month]\n full = str(date) + ' ' + str(m)\n now = datetime.datetime.now()\n day = now.strftime(\"%A\")\n if request.method == 'POST':\n try:\n city = request.POST.get('city')\n api_key = 'a55396f3eb34ec444b88755d0bedf7a7'\n api = 'https://api.openweathermap.org/data/2.5/weather?q='+city+'&appid='+api_key\n ans = r.get(api)\n ansj = ans.json()\n temp = ((ansj['main']['temp']) - 273.15)\n temp = round(temp)\n context = {'havo':temp,\n 'city':city,\n 'sana':full,\n 'kun':day}\n\n except :\n temp = 'Unknown Error!!' \n context = {'havo':temp,\n 'sana':full,\n 'kun':day\n }\n\n return render(request,'index.html',context)\n \n return render(request,'index.html') \n \n" }, { "alpha_fraction": 0.8080000281333923, "alphanum_fraction": 0.8080000281333923, "avg_line_length": 61.5, "blob_id": "b842fcb5d8f48a5603d5b113fb84001621561409", "content_id": "92f9dcdf68d57fac17f3d9ed2ec93a3c0ef3dc9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 125, "license_type": "no_license", "max_line_length": 106, "num_lines": 2, "path": "/README.md", "repo_name": "Mukhammadumarmukhammadzoda/weather", "src_encoding": "UTF-8", "text": "# weather-Website\n Weather website was built using Weather API. Here I used Python and Django to make this beautiful project\n" } ]
2
AnkitSnh2/Autonomous-driving
https://github.com/AnkitSnh2/Autonomous-driving
ed6d0a2da2956d2a082625619000dc1f11531d90
355876f44c308dcd9d6119c24d79c6eb34359d6d
9619491457af782729595038f6b6dc406e54119e
refs/heads/master
2020-11-30T05:14:14.204167
2019-12-26T19:00:25
2019-12-26T19:00:25
230,313,430
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8054393529891968, "alphanum_fraction": 0.8054393529891968, "avg_line_length": 158.3333282470703, "blob_id": "b99cd14b3515a18218573e86c8b30f4bd74b98a8", "content_id": "d526efcdfd7a5de550f16f88cd88fb031fc0402e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 482, "license_type": "no_license", "max_line_length": 416, "num_lines": 3, "path": "/README.md", "repo_name": "AnkitSnh2/Autonomous-driving", "src_encoding": "UTF-8", "text": "# Autonomous-driving\nEntry to autonomous Driving using CNN. \nI would like to thank Siraj Raval‘s for the videos on how to simulate self driving car, CNN architecture and Sentdex’s python plays GTA for really nice and informative video lectures and Github code to help me understand internals of CNN. The credits for neural network code goes to naok- ishibuya. We have modified his code for our platform.I want to sincerely thank https://towardsdatascience.com/ for insightful blogs.\n" }, { "alpha_fraction": 0.5492709875106812, "alphanum_fraction": 0.5746606588363647, "avg_line_length": 25.177631378173828, "blob_id": "14944776a1c302fcb156cc0a96988862b95dfd79", "content_id": "8eccb568bd18392b520dc0582434932ce1ba693c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3978, "license_type": "no_license", "max_line_length": 105, "num_lines": 152, "path": "/grabscreen.py", "repo_name": "AnkitSnh2/Autonomous-driving", "src_encoding": "UTF-8", "text": "import cv2\nimport os\nimport numpy as np\nimport pyautogui\nimport time\nimport csv\nfrom PIL import ImageGrab\nfrom pynput import keyboard\n# Imports for testing\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport ast\n\nkeys = []\nisThreadRunning = False\n\ndef imageprocessing(image):\n processed_image = cv2.cvtColor(np.float32(image),cv2.COLOR_BGR2GRAY)\n #processed_image = cv2.Canny(processed_image,threshold1=200,threshold2=200)\n return processed_image\n\ndef writecsv(o1):\n with open('log.csv','a') as fp:\n writer = csv.writer(fp,delimiter=',')\n writer.writerow(o1)\n\ndef on_press(key):\n try:\n print('alphanumeric key {0} pressed'.format(\n key.char))\n keys.append(key.char)\n except AttributeError:\n print('special key {0} pressed'.format(\n key))\n\ndef on_release(key):\n print('{0} released'.format(\n key))\n if key == keyboard.Key.esc:\n # Stop listener\n return False\n\ndef key_check():\n # Non blocking way\n listener = keyboard.Listener(\n on_press=on_press,\n on_release=on_release)\n listener.start()\n isThreadRunning = True\n # Collect events until released\n '''with keyboard.Listener(\n on_press=on_press,\n on_release=on_release) as listener:\n listener.join()\n '''\n return keys\n\n # code copied from neural network code revert the changes after test \ndef load_data():\n\n data_df = pd.read_csv('Trainingdata/log.csv', names=['image','input'])\n X1 = data_df[['image']].values\n Y1 = data_df['input'].values\n #print(\"Image values read from csv are %s \" %X1)\n X = []\n Y = []\n p=0\n for add, out in zip(X1,Y1):\n image_path= os.path.join('Trainingdata',add[0])\n print(\"Path is %s\" %image_path)\n img = cv2.imread(image_path, 0)\n #cv2.imshow('image',img)\n #cv2.waitKey(25)\n k = ast.literal_eval(out)\n if k == [0,0,1]:\n X.append(img)\n Y.append(k)\n X.append(cv2.flip( img, 1 ))\n Y.append([1,0,0])\n elif k==[1,0,0]:\n X.append(img)\n Y.append(k)\n X.append(cv2.flip( img, 1 ))\n Y.append([0,0,1])\n\n elif k == [0,1,0]:\n if p % 3 == 0:\n X.append(img)\n Y.append(k)\n else:\n pass\n p += 1\n print(Y)\n print(X)\n X = np.array(X, dtype=np.uint8)\n Y = np.array(Y, dtype=np.uint8)\n X = X.reshape(X.shape[0], IMAGE_HEIGHT, IMAGE_WIDTH, 1)\n X = X.astype('float32')\n X /= 255\n cv2.destroyAllWindows()\n\n X_train, X_valid, y_train, y_valid = train_test_split(X, Y, test_size=args.test_size, random_state=0)\n\n return X_train, X_valid, y_train, y_valid\n\ndef grab_screen(region=None):\n count = 0\n print(\"Grab screen called\")\n #os.rmdir('Dataset')\n os.mkdir('Test')\n recordingstart = raw_input('do you want to start recording ')\n time.sleep(2)\n while count <= 400:\n image1 = ImageGrab.grab()\n image1 = imageprocessing(image1)\n image1 = cv2.resize(image1, (400, 150))\n image = \"image%s.png\" %count\n full_name_image = os.path.join('Test', image)\n cv2.imwrite(full_name_image,image1)\n count = count + 1\n print(\"Image path is %s\" %full_name_image)\n img = cv2.imread(full_name_image,0)\n cv2.imshow('image',img)\n #keypressed = getkey()\n #print('alphanumeric key {0} pressed'.format(keypressed))\n #print('alphanumeric key %s global variable is ' %keys)\n\n #writecsv([full_name_image,keypressed])\n\ndef getkey():\n key = key_check()\n output = [0,0,0]\n #AWD\n\n if 'A' in key:\n output[0] = 1\n elif 'D' in key:\n output[2] = 1\n else:\n output[1] = 1\n\n return output\n\ndef runMyCarStraight(self):\n while(self.run):\n pyautogui.typewrite('w')\n\nif __name__ == \"__main__\":\n time.sleep(2)\n runMyCarStraight()\n #grab_screen(None)" } ]
2
Panuvat-Dan/crypto_vizweb
https://github.com/Panuvat-Dan/crypto_vizweb
60393ae76f8e39b60e71d351edad9b50d29d79d6
3a221250d26dc0154482a21feb47833e2254ff0b
ba578af602357def6548f7340234b2f774795e65
refs/heads/main
2023-06-05T09:08:20.342915
2021-07-07T11:44:51
2021-07-07T11:44:51
382,538,791
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.45945945382118225, "alphanum_fraction": 0.662162184715271, "avg_line_length": 16.5, "blob_id": "081c19a9d757fcb3b47a2ff20f5f5a8cefbfa7ee", "content_id": "e69f0191d74a980ff4a641a6915d0d28f19bd4d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 74, "license_type": "no_license", "max_line_length": 20, "num_lines": 4, "path": "/requirements.txt", "repo_name": "Panuvat-Dan/crypto_vizweb", "src_encoding": "UTF-8", "text": "yfinance==0.1.60\r\nstreamlit==0.84.0\r\npandas==1.2.4\r\nscikit_learn==0.24.2\r\n" }, { "alpha_fraction": 0.6591600775718689, "alphanum_fraction": 0.6902008652687073, "avg_line_length": 37.16666793823242, "blob_id": "7a604efc9cff1bb952bc7ca1a6a0fe8460ca8e17", "content_id": "d996a6aafab1807e8bd139400f2c5ee038772af9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1643, "license_type": "no_license", "max_line_length": 117, "num_lines": 42, "path": "/myapp.py", "repo_name": "Panuvat-Dan/crypto_vizweb", "src_encoding": "UTF-8", "text": "import streamlit as st\r\nimport pandas as pd\r\nimport yfinance as yf\r\nimport datetime as dt\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nst.header(\"My Crypto price webapp\")\r\nst.write(\"\"\"\r\nStock **closing price and **volume of crypto of BTC/ETH/BNB/ADA\r\n\"\"\")\r\n\r\nst.image(\"crypto.jpg\",width=650)\r\n\r\nst.subheader(\"This line chart shows the projection of crypto price 2021\")\r\n\r\n# plot stock pirce\r\ncrypto_list = ['BTC-USD', 'ETH-USD', 'BNB-USD', 'ADA-USD']\r\ninput_crypto = st.radio(\"Please select your Coin\",[\"BTC-USD\",\"ETH-USD\",\"BNB-USD\",\"ADA-USD\",\"See-Comparison\"],index=1)\r\ndf = pd.DataFrame()\r\ndf2 = pd.DataFrame()\r\ntoday = dt.datetime.now().date().strftime(\"%Y\")\r\nstart_date = st.date_input(\"Pick a start date\")\r\nstop_date = st.date_input(\"Pick a stop date\")\r\nfor crypto in crypto_list:\r\n if input_crypto == \"See-Comparison\":\r\n df[crypto] = yf.Ticker(crypto).history(period='1d', start=start_date, end=stop_date).Close\r\n else:\r\n crypto = input_crypto\r\n df[crypto] = yf.Ticker(crypto).history(period='1d', start=start_date, end=stop_date).Close\r\n # df2[crypto] = yf.Ticker(crypto).history(period='1d', start=\"2021-01-31\", end=\"2021-12-31\").Volume\r\nst.line_chart(df)\r\n# st.subheader(\"This line chart shows the projection of crypto Volume 2021\")\r\n# st.line_chart(df2)\r\nst.markdown(\"##This dataframe shows the closing number of stock price from 2021-01-31 to 2021-12-31\")\r\n# st.dataframe(df)\r\nst.dataframe(df)\r\n\r\nst.subheader(\"Do you really know that blockchain is the concept behind Crpyto and it is a Hash Function!!\")\r\nst.video(\"hash_function.mp4\")\r\nst.markdown(\"\"\"\r\n### Credit : Lisk Youtube channel \r\n\"\"\")" } ]
2
senilio/i2c
https://github.com/senilio/i2c
2f30794a5ca42802971626e6d0a7224fb0da1859
e141e4b646b7be188bb682e5daa379ca694c1737
a25c97ce7c2a21d87e19a1dc44ae4a56c02f2ee7
refs/heads/master
2023-09-01T08:03:14.374226
2023-08-23T05:20:09
2023-08-23T05:20:09
179,714,091
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6810610890388489, "alphanum_fraction": 0.7236273884773254, "avg_line_length": 33.48936080932617, "blob_id": "d74400d778bf430965ba5b96ec47a9d0eea9e8f1", "content_id": "68844433cc2c4c9cd1b93290193aab45d4c8dcfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1621, "license_type": "no_license", "max_line_length": 285, "num_lines": 47, "path": "/README.md", "repo_name": "senilio/i2c", "src_encoding": "UTF-8", "text": "### What:\n\nThis tool will launch a new iTerm2 window and create split panes for each session. By broadcasting commands (alt+cmd+i) you have a perfect clusterssh tool.\n\nI used to use https://github.com/wouterdebie/i2cssh for SSH clustering. It's dependant on Ruby and also tends to break on each OSX/Ruby upgrade. I got tired of this, so I made my own version which relies only on python (sys, subprocess, argparse) and the built in OSX osascript binary.\n\n### Install:\n\nJust download the script to your PATH, then make it executable. Example:\n\n```\nsudo curl -o /usr/local/bin/i2c https://raw.githubusercontent.com/senilio/i2c/master/i2c && \\\nsudo chmod +x /usr/local/bin/i2c\n```\n\n### iTerm2 Broadcast input:\n\nBe careful when broadcasting your keyboard strokes, as it's easy to unintentionally mess up. See the following screenshot from iTerm2 for a description of the different broadcast modes plus keyboard combinations on how to enable and disable them.\n\n![iTerm2 Broadcast Input menu](assets/iTerm2-broadcast-keyboard-shortcuts.png)\n\n### Usage:\n\n```\nusage: i2c [-h] [-p port] [-l login] [-d] ...\n\npositional arguments:\n servers\n\noptional arguments:\n -h, --help show this help message and exit\n -p port Optional: Connect to this SSH port. Defalts to port 22.\n -l login Optional: Connect as this SSH user. Defaults to current user.\n -d Enable debug output\n```\n\nExamples:\n\n```\ni2c 10.0.0.{1..5}\ni2c [email protected].{1..5}\ni2c [email protected] [email protected]\ni2c -p 123 -l user 10.0.0.{1..5}\ni2c 10.0.0.1 10.0.0.2 10.0.0.3 10.0.0.4\n```\n\nScript will try to enable broadcast mode after creating the new window.\n" }, { "alpha_fraction": 0.5967628359794617, "alphanum_fraction": 0.6055594682693481, "avg_line_length": 32.83333206176758, "blob_id": "81ef9dd2558c79211f5a359eb611b9f7e9f8af4b", "content_id": "e35b5b5687e25ef9ab6912ae009b6d78244d2fb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2842, "license_type": "no_license", "max_line_length": 136, "num_lines": 84, "path": "/i2c", "repo_name": "senilio/i2c", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport sys\nimport subprocess\nimport argparse\n\ndef main():\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(description='i2c is a cluster ssh tool for iTerm2.')\n parser.add_argument('-p', metavar='port', type=int, help='Connect to this SSH port. Obviously defaults to port 22.', required=False)\n parser.add_argument('-l', metavar='login', type=str, help='Connect as this SSH user.', required=False)\n parser.add_argument('-d', help='Enable debug output', action='store_true', required=False, default=False)\n parser.add_argument('servers', nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n # Decide number of rows and columns\n if len(args.servers) % 2 == 0:\n rows=len(args.servers)/2\n columns=len(args.servers)/2\n even=1\n else:\n rows=int(len(args.servers)/2+1)\n columns=int(len(args.servers)/2+1)\n even=0\n\n # Print help if there's no arguments\n if len(args.servers) == 0:\n parser.print_help()\n sys.exit()\n\n # Compile SSH arguments string\n ssh_opts=''\n if not args.l == None:\n ssh_opts = ' '.join(['-l {}', ssh_opts]).format(args.l)\n if not args.p == None:\n ssh_opts = ' '.join(['-p {}', ssh_opts]).format(args.p)\n\n # Start compiling the applescript\n ascript = 'tell application \"iTerm2\"\\n'\n ascript += ' tell current window\\n'\n ascript += ' create window with default profile\\n'\n ascript += ' end tell\\n'\n ascript += ' set p1 to (current session of current window)\\n'\n\n # Used for printing the horizontal panes\n index=1\n\n # Create the horizontal panes\n for i in range(1, int(rows)):\n ascript += ' tell p{}\\n'.format(index)\n ascript += ' set p{} to (split horizontally with same profile)\\n'.format(index+1)\n ascript += ' end tell\\n'\n index+=1\n\n # Create the vertical panes\n for i in range(1, int(columns+even)):\n ascript += ' tell p{}\\n'.format(i)\n ascript += ' set p{} to (split vertically with same profile)\\n'.format(i+columns)\n ascript += ' end tell\\n'\n\n # Insert text into each pane\n for i in range(0, len(args.servers)):\n ascript += ' tell p{}\\n'.format(i+1)\n ascript += ' write text \" clear\"\\n'\n ascript += ' write text \" ssh {}{}\"\\n'.format(ssh_opts, args.servers[i])\n ascript += ' end tell\\n'\n\n # Enable broadcast\n ascript += ' tell application \"System Events\"\\n'\n ascript += ' keystroke \"i\" using {command down, option down}\\n'\n ascript += ' end tell\\n'\n\n # Close the script\n ascript += 'end tell'\n\n # Display applescript if debug mode is enabled\n if args.d:\n print(ascript)\n\n # Execute applescript\n subprocess.call(['osascript', '-e', ascript])\n\nif __name__ == '__main__':\n main()\n" } ]
2
callidus/playbot
https://github.com/callidus/playbot
fe246818915a262bf6f2bb4a26f51b095e729b73
901b2e207bbec6d7a76e777dce3911b611034e2c
96eb049641e5bcd6d51a94647ec493a768b7c07c
refs/heads/master
2021-01-02T22:38:23.937266
2015-10-01T09:46:17
2015-10-01T09:46:17
27,483,460
0
1
null
2014-12-03T11:16:33
2015-04-08T16:27:49
2015-10-01T09:46:17
Python
[ { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 18, "blob_id": "0c8a0025a2ec05b1beeae819d1fab02c6b08d243", "content_id": "aee5d15b234c1a2f8d7ca6599766d9eb3480fb29", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19, "license_type": "permissive", "max_line_length": 18, "num_lines": 1, "path": "/playbot/plugins/CvH/__init__.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "import App # noqa\n" }, { "alpha_fraction": 0.6696944832801819, "alphanum_fraction": 0.6729975342750549, "avg_line_length": 21.407407760620117, "blob_id": "22558438b88bf9006fe39860699eebe3dff4c406", "content_id": "e8469afb357d54202afe696f3484f00a77474509", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1211, "license_type": "permissive", "max_line_length": 58, "num_lines": 54, "path": "/playbot/main.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "\nfrom __future__ import absolute_import\n\nfrom playbot import bot\nfrom playbot.plugins import card\nfrom playbot.plugins import control\nfrom playbot.plugins import CvH\nfrom playbot.plugins import dice\nfrom playbot.plugins import fortune\nfrom playbot.plugins import say\nfrom playbot.plugins import link_peek\n\nimport logging\n\nname = \"PlayBot\"\nserver = \"irc.afternet.org\"\nchans = [\"\"\"#testroom\"\"\", ]\nport = 6697\n\n\ndef setup_logging():\n logging.basicConfig(level=logging.INFO)\n\n\ndef main():\n setup_logging()\n b = bot.PlayBot(chans, name, None, server, port)\n b.register_command(\"disconnect\", control.Disconnect())\n b.register_command(\"die\", control.Die())\n\n cvh = CvH.App.CvH()\n cvh.setup('./cvh.db')\n b.register_command(\"cvh\", cvh)\n\n ftn = fortune.fortune.Fortune('./fortune.db')\n b.register_command('fortune', ftn)\n\n why = fortune.fortune.Fortune(\"./bofh.db\")\n b.register_command('why', why)\n\n roll = dice.Dice()\n b.register_command('roll', roll)\n\n sayer = say.Say()\n b.register_command('say', sayer)\n\n #cardGame = card.Card(b)\n #b.register_command('card', cardGame)\n\n b.register_listner(link_peek.peek)\n\n b.start()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.8421052694320679, "alphanum_fraction": 0.8421052694320679, "avg_line_length": 11.666666984558105, "blob_id": "c1d24fc0d1ab70ca7fa8ce8a4061073702b134a6", "content_id": "0a8e095e27bd244ae3838b9aea34fd4b2830399f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 38, "license_type": "permissive", "max_line_length": 29, "num_lines": 3, "path": "/requirements.txt", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "irc\nsix\nbackports.functools_lru_cache\n" }, { "alpha_fraction": 0.4570508897304535, "alphanum_fraction": 0.5421444177627563, "avg_line_length": 29.685184478759766, "blob_id": "e2574c375949a0d58b7a12dd63e070ffe6a8f1de", "content_id": "4f41d50d44c32b978b95bf2a9e4e45e77e56a26d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4971, "license_type": "permissive", "max_line_length": 208, "num_lines": 162, "path": "/playbot/plugins/card.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "# flake8: noqa\n\n\"\"\" broken for now\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\nimport random\nimport logging\n\nclass Card:\n def __init__(self,bot):\n self.log = logging.getLogger(__name__)\n self.currHands = {}\n self.dealer = bot.nickname\n self.handInProgress = False\n\n @staticmethod\n def getCardUnicode(card):\n uc = [[u\"\\U0001F0A1\",u\"\\U0001F0A2\",u\"\\U0001F0A3\",u\"\\U0001F0A4\",u\"\\U0001F0A5\",u\"\\U0001F0A6\",u\"\\U0001F0A7\",u\"\\U0001F0A8\",u\"\\U0001F0A9\",u\"\\U0001F0AA\",u\"\\U0001F0AB\",u\"\\U0001F0AC\",u\"\\U0001F0AD\",u\"\\U0001F0AE\"],\n [u\"\\U0001F0D1\",u\"\\U0001F0D2\",u\"\\U0001F0D3\",u\"\\U0001F0D4\",u\"\\U0001F0D5\",u\"\\U0001F0D6\",u\"\\U0001F0D7\",u\"\\U0001F0D8\",u\"\\U0001F0D9\",u\"\\U0001F0DA\",u\"\\U0001F0DB\",u\"\\U0001F0DC\",u\"\\U0001F0DD\",u\"\\U0001F0DE\"],\n [u\"\\U0001F0B1\",u\"\\U0001F0B2\",u\"\\U0001F0B3\",u\"\\U0001F0B4\",u\"\\U0001F0B5\",u\"\\U0001F0B6\",u\"\\U0001F0B7\",u\"\\U0001F0B8\",u\"\\U0001F0B9\",u\"\\U0001F0BA\",u\"\\U0001F0BB\",u\"\\U0001F0BC\",u\"\\U0001F0BD\",u\"\\U0001F0BE\"],\n [u\"\\U0001F0C1\",u\"\\U0001F0C2\",u\"\\U0001F0C3\",u\"\\U0001F0C4\",u\"\\U0001F0C5\",u\"\\U0001F0C6\",u\"\\U0001F0C7\",u\"\\U0001F0C8\",u\"\\U0001F0C9\",u\"\\U0001F0CA\",u\"\\U0001F0CB\",u\"\\U0001F0CC\",u\"\\U0001F0CD\",u\"\\U0001F0CE\"],\n [u\"\\U0001F0A0\",u\"\\U0001F0BF\",u\"\\U0001F0CF\",u\"\\U0001F0DF\"]]\n if card:\n return uc[card[0]][card[1]]\n return uc[4][0]\n\n @staticmethod\n def getCardAscii(card):\n s = [u\"\\u2660\",u\"\\u2663\",u\"\\u2665\",u\"\\u2666\"] # SCHD\n v = [\"A\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"J\",\"Q\",\"K\"]\n if card:\n if card[0] < 2:\n return u\"\\u00031,0[\" + s[card[0]] + v[card[1]] + u\"]\\u000F\"\n else:\n return u\"\\u00034,0[\" + s[card[0]] + v[card[1]] + u\"]\\u000F\"\n return \"[#]\"\n\n @staticmethod\n def getHand(h):\n return \"\".join(map(Card.getCardUnicode,h)) + \" \" + \"\".join(map(Card.getCardAscii,h))\n\n def newGame(self):\n self.deck = []\n self.hands = {}\n self.nicks = {}\n self.nicks[self.dealer] = \"call\"\n self.handInProgress = True\n for i in range(4):\n for d in range(13):\n self.deck.append([i,d])\n random.shuffle(self.deck)\n self.hands[self.dealer] = [self.deck.pop(), self.deck.pop()]\n\n def blackjackHandValue(self, hand):\n x = 0\n a = 0\n for c in hand:\n if c[1] == 0:\n x += 1\n else:\n a += 1\n x += min(c[1],10)\n\n if x <= 11 and a > 0:\n return x+10\n return x\n\n def __call__(self, bot, e, cmd, *arg):\n if arg[0] == \"new\":\n self.newGame()\n bot.do_send(e.target, \"New Game\")\n bot.do_send(e.target, \"Dealer: \" + Card.getHand([self.hands[self.dealer][0],False]))\n return\n\n nick = re.sub(\"!.*\",\"\",e.source)\n if self.handInProgress is not True:\n return bot.do_send(nick, \"Game not in progress\")\n\n if arg[0] == \"deal\":\n if nick in self.hands:\n return bot.do_send(nick, \"Already dealt in.\")\n h = [self.deck.pop(), self.deck.pop()]\n x = self.blackjackHandValue(h)\n self.hands[nick] = h\n self.nicks[nick] = \"dealt\"\n bot.do_send(nick, \"Hand: %s = %d\" % (Card.getHand(h), x))\n return\n\n if nick not in self.hands:\n return bot.do_send(nick, \"Not Dealt In\")\n\n if self.nicks[nick] != \"dealt\":\n return bot.do_send(nick, \"Already Called\")\n\n if arg[0] == \"hit\":\n self.hands[self.dealer].append(self.deck.pop())\n h = self.hands[self.dealer]\n x = self.blackjackHandValue(h)\n if x > 21:\n self.nicks[nick] = \"bust\"\n return bot.do_send(nick, \"BUST!\")\n\n bot.do_send(nick, \"Hand: %s = %d\" % (Card.getHand(h), x))\n return\n\n if arg[0] == \"call\":\n self.nicks[nick] = \"call\"\n\n for p in self.nicks:\n if self.nicks[p] == \"dealt\":\n return\n\n result = []\n winner = [\"Error\",0]\n for p in self.nicks:\n v = self.blackjackHandValue(self.hands[p])\n if winner[1] < v:\n winner = [p, v]\n result.append(\"%s: %s = %d\" % (p, Card.getHand(self.hands[p]), v))\n\n bot.do_send(e.target, \"\\t\".join(result))\n bot.do_send(e.target, \"%s is the winner with %d!\" % (winner[0], winner[1]))\n return\n\n bot.do_send(e.target, \"Don't know that command\")\n #\n\n# Quick Test\ndef test_all():\n class Bot:\n def __init__(self):\n self.x = 0\n self.nickname = \"PlayBot\"\n def do_send(self, tar, msg):\n print(\"(%s) %s\" % (tar,msg))\n\n class E:\n def __init__(self, source):\n self.target = 0\n self.source = source\n\n b = Bot()\n c = Card(b)\n c.__call__(b, E(\"jo\"), \"card\", \"new\")\n c.__call__(b, E(\"jo\"), \"card\", \"deal\")\n c.__call__(b, E(\"jo\"), \"card\", \"hit\")\n\n c.__call__(b, E(\"mi\"), \"card\", \"deal\")\n\n c.__call__(b, E(\"slo\"), \"card\", \"deal\")\n c.__call__(b, E(\"slo\"), \"card\", \"hit\")\n c.__call__(b, E(\"slo\"), \"card\", \"hit\")\n c.__call__(b, E(\"slo\"), \"card\", \"hit\")\n c.__call__(b, E(\"slo\"), \"card\", \"hit\")\n\n c.__call__(b, E(\"mi\"), \"card\", \"call\")\n c.__call__(b, E(\"jo\"), \"card\", \"call\")\n\nif __name__ == \"__main__\":\n test_all()\n\"\"\"\n" }, { "alpha_fraction": 0.5709302425384521, "alphanum_fraction": 0.5732558369636536, "avg_line_length": 26.70967674255371, "blob_id": "fa730e757cba0f09624d77b6cf15ff68cbaf934d", "content_id": "60f37ffd7f9b8059b2a4df1d6e5917820b5973ed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 860, "license_type": "permissive", "max_line_length": 63, "num_lines": 31, "path": "/playbot/plugins/fortune/fortune.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "\nfrom __future__ import absolute_import\n\nfrom playbot.plugins.fortune import data_source\n\nimport logging\nimport os\nimport random\n\n\nclass Fortune(object):\n def __init__(self, db, prefix=None):\n self.data = data_source.DataSource()\n if os.path.isfile(db):\n self.data.open_db(db)\n else:\n self.data.build_db(db)\n\n self.maxIdx = self.data.get_count()-1\n self.prefix = prefix\n\n self.log = logging.getLogger(__name__)\n self.log.info(\"Fortune loaded db: %s with %i entries.\",\n db, self.maxIdx)\n\n def __call__(self, bot, e, cmd, *args):\n idx = random.randint(0, self.maxIdx)\n msg = self.data.get_fortune(idx)\n if self.prefix is not None:\n bot.do_send(e.target, self.prefix + \" \" + msg)\n else:\n bot.do_send(e.target, msg)\n" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.739130437374115, "avg_line_length": 22, "blob_id": "b3321527a77b2cef92d009e4c29f2c4631080430", "content_id": "e28c84fb27b447d586331d279d56c9faf629ed07", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23, "license_type": "permissive", "max_line_length": 22, "num_lines": 1, "path": "/playbot/plugins/fortune/__init__.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "import fortune # noqa\n" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.5376197099685669, "avg_line_length": 24.172412872314453, "blob_id": "aaf6074eec52d0ab3b25bad0e1a8863c9cfa1283", "content_id": "6ea0c8bd0b18ad02cab0031e4d1cb01463d56c6c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 731, "license_type": "permissive", "max_line_length": 54, "num_lines": 29, "path": "/playbot/plugins/fortune/build_db.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nfrom playbot.plugins.fortune import data_source\n\n\ndb = data_source.DataSource()\nif os.path.isfile(sys.argv[1]):\n db.open_db(sys.argv[1])\nelse:\n db.build_db(sys.argv[1])\n\nwith open(sys.argv[2], 'r') as f:\n data = f.read()\n items = data.split(\"\\n\")\n for key, item in enumerate(items):\n if len(item) != 0:\n item = item.replace(\"\\n\", \" \")\n item = re.sub(\"[ \\t]+\", \" \", item)\n print(key, item)\n try:\n db.add_fortune(unicode(item, 'utf-8'))\n print(\"... OK\")\n except Exception as e:\n print(\"... Fail\", e)\n" }, { "alpha_fraction": 0.5173553824424744, "alphanum_fraction": 0.5190082788467407, "avg_line_length": 24.19444465637207, "blob_id": "5869bf19f58bb02049c7c238a321f6cb2fe6a85e", "content_id": "1e650d9152f12662672509c83cb1a15860919717", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1815, "license_type": "permissive", "max_line_length": 63, "num_lines": 72, "path": "/playbot/plugins/fortune/data_source.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "\nfrom __future__ import absolute_import\n\nimport sqlite3 as dbapi\n\n\nclass DataSource(object):\n def __init__(self):\n self.conn = None\n\n def __del__(self):\n if self.conn:\n self.conn.close()\n\n def open_db(self, name):\n \"\"\"open an existing database.\"\"\"\n self.conn = dbapi.connect(name)\n\n def build_db(self, name):\n \"\"\"build a new database to use.\"\"\"\n self.conn = dbapi.connect(name)\n try:\n c = self.conn.cursor()\n c.execute('CREATE TABLE fortune('\n 'id INTEGER PRIMARY KEY ASC, data TEXT)')\n self.conn.commit()\n\n except Exception:\n self.conn.rollback()\n raise\n\n def get_count(self):\n sql = 'SELECT Count(*) FROM fortune'\n c = self.conn.cursor()\n c.execute(sql)\n return c.fetchone()[0]\n\n def add_fortune(self, data):\n c = self.conn.cursor()\n sql = 'INSERT INTO fortune (data) VALUES (?)'\n try:\n c.execute(sql, (data,))\n fortuneId = c.lastrowid\n self.conn.commit()\n return fortuneId\n\n except Exception:\n self.conn.rollback()\n raise\n\n def del_fortune(self, itemId):\n c = self.conn.cursor()\n sql = 'DELETE FROM fortune WHERE id=?'\n\n try:\n c.execute(sql, (itemId,))\n self.conn.commit()\n\n except Exception:\n self.conn.rollback()\n raise\n\n def get_fortunes(self):\n sql = 'SELECT id, data FROM fortune'\n c = self.conn.cursor()\n c.execute(sql)\n return c.fetchall()\n\n def get_fortune(self, id):\n sql = 'SELECT data FROM fortune WHERE id=?'\n c = self.conn.cursor()\n c.execute(sql, (id,))\n return c.fetchone()[0]\n" }, { "alpha_fraction": 0.5572815537452698, "alphanum_fraction": 0.566990315914154, "avg_line_length": 22.363636016845703, "blob_id": "9731a785261a968814c47a14eb1da0d3ebc4a4e3", "content_id": "a1a7c4b2ceb249f4222c4485fe17138c5a35df67", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "permissive", "max_line_length": 55, "num_lines": 22, "path": "/playbot/plugins/CvH/AddWhiteData.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "\nimport os\nimport sys\n\nimport DataSource\nfrom future import print_function # noqa\n\ndb = DataSource.DataSource()\nif os.path.isfile(\"./cvh.db\"):\n db.openDB(\"./cvh.db\")\nelse:\n db.buildDB(\"./cvh.db\")\n\n\nwith open(sys.argv[1], 'r') as f:\n data = f.read()\n items = data.split(\"<>\")\n for key, item in enumerate(items):\n try:\n db.addWhiteCard(key, item)\n print(\"{0} {1} ... OK\".format(key, item))\n except Exception:\n print(\"{0} {1} ... FAIL\".format(key, item))\n" }, { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 21, "blob_id": "55e69cf6cd4791b460b39d65a820f3d7bcc2409d", "content_id": "83894d97c6e2b8c30accfb963883b01cd543877a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "permissive", "max_line_length": 42, "num_lines": 8, "path": "/playbot/plugins/control.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "class Disconnect(object):\n def __call__(self, bot, e, cmd, *arg):\n bot.disconnect()\n\n\nclass Die(object):\n def __call__(self, bot, e, cmd, *arg):\n bot.die()\n" }, { "alpha_fraction": 0.5419847369194031, "alphanum_fraction": 0.7251908183097839, "avg_line_length": 15.375, "blob_id": "0e1eacdeed3c60efecacdb4a64130284c834428f", "content_id": "5fba1e67969415cba02c588670ff2b2025357391", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 131, "license_type": "permissive", "max_line_length": 22, "num_lines": 8, "path": "/test-requirements.txt", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "coverage>=3.6\ndiscover\nfixtures>=0.3.14\nhacking>=0.9.2,<0.10\nmock>=1.0\ntestrepository>=0.0.18\ntestscenarios>=0.4\ntesttools>=0.9.34\n" }, { "alpha_fraction": 0.4613180458545685, "alphanum_fraction": 0.46418339014053345, "avg_line_length": 20.8125, "blob_id": "2f957e6887e3fa02a569863bda0033c45e5a3a6b", "content_id": "a18c9085d014353c1fcb4f3fbabf2c2f56730f8d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 349, "license_type": "permissive", "max_line_length": 46, "num_lines": 16, "path": "/playbot/plugins/say.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "import logging\n\n\nclass Say(object):\n def __init__(self):\n self.log = logging.getLogger(__name__)\n\n def __call__(self, bot, e, cmd, *arg):\n msg = \"\"\n if len(arg) == 0:\n msg = \"say what?\"\n else:\n msg = \" \".join(arg)\n\n self.log.info(\"Saying: '%s'\", msg)\n bot.do_send(e.target, msg)\n" }, { "alpha_fraction": 0.5584615468978882, "alphanum_fraction": 0.5676922798156738, "avg_line_length": 26.659574508666992, "blob_id": "64db9a92f6e8b65e06fb291c26c814e8805b3187", "content_id": "2b6a1dfb03c429e9e558559722b8bde71c14af48", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1300, "license_type": "permissive", "max_line_length": 78, "num_lines": 47, "path": "/playbot/plugins/CvH/App.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "import os\nimport random\n\nimport DataSource\n\n\nclass Phrase(object):\n def __init__(self, data):\n self.title = data[0]\n self.text = data[1]\n self.numSlots = int(data[2])\n self.slots = []\n\n def fillSlot(self, data):\n self.slots.append(data[1])\n\n def __str__(self):\n string = self.text\n if self.numSlots:\n for slot in self.slots:\n string = string.replace(\"__________\", slot, 1)\n else:\n string += \" ... \" + self.slots[0]\n return string\n\n\nclass CvH(object):\n def __init__(self):\n self.dataSource = DataSource.DataSource()\n\n def setup(self, path):\n if os.path.isfile(path):\n self.dataSource.openDB(path)\n else:\n self.dataSource.buildDB(path)\n\n self.blacks = self.dataSource.getBlackCards()\n self.whites = self.dataSource.getWhiteCards()\n\n def __call__(self, bot, e, cmd, *args):\n idx = random.randint(0, len(self.blacks))\n phrase = Phrase(self.dataSource.getBlackCard(self.blacks[idx][0]))\n for x in range(0, max(phrase.numSlots, 1)):\n idx = random.randint(0, len(self.whites))\n phrase.fillSlot(self.dataSource.getWhiteCard(self.whites[idx][0]))\n\n bot.do_send(e.target, str(phrase))\n" }, { "alpha_fraction": 0.4955555498600006, "alphanum_fraction": 0.4959259331226349, "avg_line_length": 26.272727966308594, "blob_id": "24bcd0e1586fbf67600b08770258b344364a75b5", "content_id": "4f66da816ec650e3bbf250861d64c0c8892cd39a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2700, "license_type": "permissive", "max_line_length": 71, "num_lines": 99, "path": "/playbot/plugins/CvH/DataSource.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "import sqlite3 as dbapi\n\n\nclass DataSource(object):\n def __init__(self):\n self.conn = None\n\n def __del__(self):\n if self.conn:\n self.conn.close()\n\n def openDB(self, name):\n \"\"\"open an existing database.\"\"\"\n self.conn = dbapi.connect(name)\n\n def buildDB(self, name):\n \"\"\"build a new database to use.\"\"\"\n self.conn = dbapi.connect(name)\n try:\n c = self.conn.cursor()\n c.execute('CREATE TABLE white('\n 'id INTEGER PRIMARY KEY ASC, '\n 'title TEXT, '\n 'data TEXT)')\n\n c.execute('CREATE TABLE black('\n 'id INTEGER PRIMARY KEY ASC, '\n 'title TEXT, '\n 'slots INTEGER, '\n 'data TEXT)')\n self.conn.commit()\n\n except Exception:\n self.conn.rollback()\n raise\n\n def addWhiteCard(self, title, data):\n c = self.conn.cursor()\n sql = 'INSERT INTO white (title, data) VALUES (?, ?)'\n try:\n c.execute(sql, (title, data))\n cardId = c.lastrowid\n self.conn.commit()\n return cardId\n\n except Exception:\n self.conn.rollback()\n raise\n\n def addBlackCard(self, title, slots, data):\n c = self.conn.cursor()\n sql = 'INSERT INTO black (title, slots, data) VALUES (?, ?, ?)'\n try:\n c.execute(sql, (title, slots, data))\n cardId = c.lastrowid\n self.conn.commit()\n return cardId\n\n except Exception:\n self.conn.rollback()\n raise\n\n def delCard(self, white, itemId):\n c = self.conn.cursor()\n sql = 'DELETE FROM black WHERE id=? '\n if white:\n sql = 'DELETE FROM white WHERE id=? '\n\n try:\n c.execute(sql, (itemId,))\n self.conn.commit()\n\n except Exception:\n self.conn.rollback()\n raise\n\n def getBlackCards(self):\n sql = 'SELECT id, title FROM black'\n c = self.conn.cursor()\n c.execute(sql)\n return c.fetchall()\n\n def getWhiteCards(self):\n sql = 'SELECT id, title FROM white'\n c = self.conn.cursor()\n c.execute(sql)\n return c.fetchall()\n\n def getBlackCard(self, id):\n sql = 'SELECT title, data, slots FROM black WHERE id=?'\n c = self.conn.cursor()\n c.execute(sql, (id,))\n return c.fetchone()\n\n def getWhiteCard(self, id):\n sql = 'SELECT title, data FROM white WHERE id=?'\n c = self.conn.cursor()\n c.execute(sql, (id,))\n return c.fetchone()\n" }, { "alpha_fraction": 0.3691959083080292, "alphanum_fraction": 0.3793884515762329, "avg_line_length": 24.22857093811035, "blob_id": "e34d371c15ef3e55cde609af45833ec2c82b1f98", "content_id": "0f79f3e22ca904fc70a64927cdbc64dbf580c6e3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 883, "license_type": "permissive", "max_line_length": 53, "num_lines": 35, "path": "/playbot/plugins/dice.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "import logging\nimport random\n\n\nclass Dice:\n def __init__(self):\n self.log = logging.getLogger(__name__)\n\n def __call__(self, bot, e, cmd, *arg):\n msg = \"\"\n if len(arg) == 0:\n msg = \"roll some dice, e.g. 'roll 2d6-2'\"\n else:\n num, max = arg[0].lower().split('d')\n mod = 0\n val = []\n\n if '-' in max:\n max, mod = max.split('-')\n mod = -int(mod)\n elif '+' in max:\n max, mod = max.split('+')\n mod = int(mod)\n\n for i in range(0, int(num)):\n r = random.randint(1, int(max))\n val.append(r)\n\n val.sort()\n msg = \"%s = %i [%s]\" % (\n arg[0],\n sum(val)+mod,\n \" \".join([str(v) for v in val]))\n\n bot.do_send(e.target, msg)\n" }, { "alpha_fraction": 0.7457627058029175, "alphanum_fraction": 0.7457627058029175, "avg_line_length": 28.5, "blob_id": "c40fcf996c7c9e9c8ed283c8131943e90d205ca6", "content_id": "12ebee238ec0210e2813877cd4ae5f9aef23f96a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 118, "license_type": "permissive", "max_line_length": 100, "num_lines": 4, "path": "/README.md", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "playbot\n=======\n\nSimple IRC bot created in Python, created for personal use, probably not much use for anything else.\n" }, { "alpha_fraction": 0.5194727182388306, "alphanum_fraction": 0.5254643559455872, "avg_line_length": 30.78095245361328, "blob_id": "27b626ffc8443a34d58ba7d1f3ef63e121649d69", "content_id": "3911bc61133fadb898c3c6d18fadad16abb6d8db", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3338, "license_type": "permissive", "max_line_length": 72, "num_lines": 105, "path": "/playbot/bot.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport irc.bot\n\nimport logging\nimport re\nimport ssl\nimport time\n\nlogger = logging.getLogger(__name__)\n\n\nclass PlayBot(irc.bot.SingleServerIRCBot):\n def __init__(self, channels, nickname, password, server, port=6667,\n force_ssl=False, server_password=None):\n if force_ssl or port == 6697:\n factory = irc.connection.Factory(wrapper=ssl.wrap_socket)\n super(PlayBot, self).__init__(\n [(server, port, server_password)],\n nickname, nickname,\n connect_factory=factory)\n else:\n super(PlayBot, self).__init__(\n [(server, port, server_password)],\n nickname, nickname)\n\n self.commands = {}\n self.listeners = []\n self.channel_list = channels\n self.nickname = nickname\n self.password = password\n\n def register_command(self, name, obj):\n self.commands[name] = obj\n\n def register_listner(self, obj):\n self.listeners.append(obj)\n\n def on_nicknameinuse(self, c, e):\n logger.info('Nick previously in use, recovering.')\n self.nickname = c.get_nickname() + \"_\"\n c.nick(self.nickname)\n time.sleep(1)\n logger.info('Nick previously in use, recovered.')\n\n def on_welcome(self, c, e):\n for channel in self.channel_list:\n c.join(channel)\n logger.info('Joined channel %s' % channel)\n time.sleep(0.5)\n\n def on_privmsg(self, c, e):\n e.target = re.sub(\"!.*\", \"\", e.source)\n self.do_command(e)\n\n def on_pubmsg(self, c, e):\n if(e.arguments[0].lower().startswith(self.nickname.lower())):\n # Remove Name\n e.arguments[0] = re.sub(\"^[\\t:]*\", \"\",\n e.arguments[0][len(self.nickname):])\n self.do_command(e)\n\n else:\n try:\n for listener in self.listeners:\n msg = listener(self, c, e)\n if msg is not None:\n self.do_send(e.target, msg)\n\n except Exception as err:\n logger.warn('Error in listener: %s', err)\n\n def on_dccmsg(self, c, e):\n c.privmsg(\"You said: \" + e.arguments[0])\n\n def do_command(self, e):\n msg = e.arguments[0].strip().split(\" \")\n cmd = msg[0].lower()\n arg = msg[1:]\n\n if cmd == 'help':\n cmdStr = \"commands: help \" + \" \".join(self.commands.keys())\n self.do_send(e.target, cmdStr)\n\n elif cmd in self.commands:\n c = self.commands[cmd]\n try:\n c(self, e, cmd, *arg)\n except Exception as err:\n logger.warn('Error in command: %s %s', str(cmd), err)\n self.do_send(e.target, \"Huh?\")\n else:\n nick = re.sub(\"!.*\", \"\", e.source) # Strip IP from nick\n c = self.connection\n c.notice(nick, \"Not understood: \" + cmd)\n\n def do_send(self, channel, msg):\n logger.info('Sending \"%s\" to %s' % (msg, channel))\n try:\n self.connection.privmsg(channel, msg)\n time.sleep(0.5)\n except Exception:\n logger.exception('Exception sending message:')\n self.reconnect()\n" }, { "alpha_fraction": 0.5957446694374084, "alphanum_fraction": 0.6106383204460144, "avg_line_length": 28.25, "blob_id": "322960d641b0bbe704074999c558d359af01e173", "content_id": "341b4267c5d2a988203fc138fa63b26c2bbf002b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 470, "license_type": "permissive", "max_line_length": 71, "num_lines": 16, "path": "/playbot/plugins/link_peek.py", "repo_name": "callidus/playbot", "src_encoding": "UTF-8", "text": "\n\nfrom __future__ import absolute_import\n\nimport re\nfrom six.moves.urllib import request\n\ndef peek(bot, c, e):\n msg = e.arguments[0].strip()\n msg = re.search(\"(http[^ ]*)\", msg)\n if msg is None:\n return\n url = msg.group(1)\n req = request.Request(url)\n response = request.urlopen(req)\n the_page = response.read().decode('windows-1252')\n title = re.search(\"<title>([^<]*)</title>\", str(the_page)).group(1)\n return \"Link peek: %s\" % title\n" } ]
18
Zearin/bibtexml2
https://github.com/Zearin/bibtexml2
13436c021e2d6d0b6d22e95cb3f77f2804584dc6
f2ac6bea5386b402771806f3b4d1d2137043eeb4
49dab59539f38a2152c7b4911f30c09eafd790d6
refs/heads/master
2021-01-01T06:11:53.058168
2013-07-29T15:53:44
2013-07-29T15:53:44
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4260588586330414, "alphanum_fraction": 0.43251973390579224, "avg_line_length": 28.02083396911621, "blob_id": "e25ddc35e046e5ce52d9efbf1f04e3ddb029ca9d", "content_id": "05eb87734738e09ca890c9b5cff1126353011099", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2786, "license_type": "no_license", "max_line_length": 74, "num_lines": 96, "path": "/setup.py", "repo_name": "Zearin/bibtexml2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n'''Convert bibTeX files to XML! Built on Pygments.\n\nUseful for manipulating bibTeX data as XML with XML toolsets.\n\nIf you don't like something about bibtexml2, it's built with Pygments--so \nyou have its mature, widespread ecosystem at your disposal to tweak \nwhatever you want.\n\n'''\n\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\nfrom textwrap import dedent\n\n##---------------------------------------------------------------\n__name__ = 'bibtexml2'\n__version__ = '0.2'\n__author__ = 'Zearin'\n__author_email__ = '[email protected]'\n__description__ = __doc__.splitlines()[0]\n##---------------------------------------------------------------\n\nconfig = {\n ##\n ## OVERALL\n ##---------------------------------------------------------------\n 'name': __name__,\n 'version': __version__,\n 'description': __description__,\n 'long_description': __doc__,\n \n ##\n ## PEOPLE\n ##---------------------------------------------------------------\n 'author': __author__,\n 'author_email': __author_email__,\n \n ##\n ## METADATA\n ##---------------------------------------------------------------\n 'license': 'MIT',\n 'keywords': 'bibtex xml conversion pygments',\n 'classifiers': [\n 'Development Status :: 2 - Pre-Alpha',\n \n 'Environment :: Console',\n 'Environment :: Plugins',\n \n 'Intended Audience :: Science/Research',\n \n 'License :: OSI Approved :: MIT License',\n \n 'Natural Language :: English',\n \n 'Operating System :: MacOS',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: OS Independent',\n \n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n \n 'Topic :: Text Processing',\n 'Topic :: Utilities',\n ],\n\n\n ##\n ## URLS\n ##---------------------------------------------------------------\n #'url': 'URL to get it at.',\n #'download_url': 'Where to download it.',\n\n ##\n ## TECHNICAL\n ##---------------------------------------------------------------\n 'packages': [__name__],\n 'install_requires': ['docopt', 'pygments'],\n 'setup_requires': ['docopt', 'pygments'],\n 'tests_require': ['pyvows>=2.0.4'],\n \n 'entry_points': {\n 'pygments.lexers': 'bibtex = bibtexml2.lexer:BibtexLexer',\n 'pygments.formatters': 'bibtex = bibtexml2.formatter:BibTeXML',\n 'console_scripts': 'bibtexml2 = bibtexml2.__main__:main'\n },\n \n #'scripts': [],\n}\n\n\nsetup(**config)\n" }, { "alpha_fraction": 0.3784722089767456, "alphanum_fraction": 0.38012567162513733, "avg_line_length": 23.88888931274414, "blob_id": "2380f1392b90c096e7898e9e660501d9ca1fb3cf", "content_id": "7ba28cf43fe8dee6930d44742f6299dd1593e974", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6048, "license_type": "no_license", "max_line_length": 89, "num_lines": 243, "path": "/bibtexml2/lexer.py", "repo_name": "Zearin/bibtexml2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n'''FIXME: <<DocString>>\n'''\n\n# Based on spec summary at\n# http://artis.imag.fr/~Xavier.Decoret/resources/xdkbibtex/bibtex_summary.html\n\n\n#--------------------------------------------------------------------\n## Imports\n#--------------------------------------------------------------------\n\n\n### STDLIB\nfrom __future__ import (\n absolute_import, \n with_statement, \n print_function,)\n\n### External\nfrom pygments.lexer import (\n RegexLexer, \n bygroups, \n include,)\n \nfrom pygments.token import (\n Text, \n Comment, \n Keyword, \n String, \n Number, \n Operator,\n Other, \n Punctuation, \n Literal, \n Whitespace,\n Name,)\n\n\n#--------------------------------------------------------------------\n## Variables\n#--------------------------------------------------------------------\n\n\nPUBTYPES = frozenset((\n 'article',\n 'book',\n 'booklet',\n 'conference',\n 'inbook',\n 'incollection',\n 'inproceedings',\n 'manual',\n 'mastersthesis',\n 'misc',\n 'phdthesis',\n 'proceedings',\n 'techreport',\n 'unpublished'\n))\n'''Official bibTeX publication types.'''\n### TODO: Tokenize as Keyword.Reserved\n\n_pubtypes_re_string = r'|'.join(PUBTYPES)\n\n\nFIELDS = frozenset((\n 'address',\n 'annote',\n 'author',\n 'booktitle',\n 'chapter',\n 'crossref',\n 'edition',\n 'editor',\n 'eprint',\n 'howpublished',\n 'institution',\n 'journal',\n 'key',\n 'month',\n 'note',\n 'number',\n 'organization',\n 'pages',\n 'publisher',\n 'school',\n 'series',\n 'title',\n 'type',\n 'url',\n 'volume',\n 'year',\n))\n'''Standard bibTeX fields. (Does not include non-standard fields.)'''\n\n\n### TODO: Tokenize these as Name.Constant\nMONTH_ABBR = ('jan' ,'feb' ,'mar' ,'apr',\n 'may' ,'jun' ,'jul' ,'aug',\n 'sep' ,'oct' ,'nov' ,'dec')\n'''Predefined bibTeX \"variables\" for the months of the year, \nwhich resolve to the month's full name.\n'''\n_month_abbr_re_string = '|'.join(MONTH_ABBR)\n\n\n#--------------------------------------------------------------------\n## Classes\n#--------------------------------------------------------------------\nclass BibtexLexer(RegexLexer):\n '''This class is a modification of the 'BibtexLexer' class from the module \n 'bibtex-pygments-lexer' (version 0.0.1), originally authored by Marco D. Adelfio. \n \n I couldn't find a repository for the module anywhere, so I modified it according \n to my needs.\n \n '''\n \n ### TODO: Change '=' type from Token.Text to Operator\n \n name = 'BibTeX'\n aliases = ['bibtex', 'bib', 'bibtexml']\n filenames = ['*.bib']\n tokens = {\n 'root': [\n include('whitespace'),\n include('@nonentries'),\n include('@entries'),\n include('raw_comment'),\n ],\n \n 'whitespace': [\n (r'\\s+', Whitespace)\n ],\n\n 'bracket': [\n (r'[^}{]+', String.Double),\n (r'{', Punctuation, '#push'),\n (r'}', Punctuation, '#pop'),\n ],\n \n 'raw_comment': [\n (r'.*\\n', Comment)\n ],\n \n '@entries': [ \n (r'(?i)(@(?:' + _pubtypes_re_string + r'))\\s*({)',\n bygroups(\n Keyword.Reserved, \n Punctuation), \n '@entry'\n ),\n ],\n \n '@nonentries': [\n # non-comment @declarations\n (r'(?i)(@(?:string|preamble))\\s*({)',\n bygroups(\n Keyword.Declaration,\n Punctuation),\n 'field'),\n \n (r'(?i)(@(?:comment))\\s*({)',\n bygroups(\n Keyword.Declaration,\n Punctuation),\n '@comment'), # like 'bracket', but contents tokenized as Comment instead\n \n (r'(?i)(@[^(' + _pubtypes_re_string + '){]+)\\s*({)',\n bygroups(\n Keyword, \n Punctuation), \n '@entry'\n ),\n ],\n \n '@comment': [\n (r'[^}{]+', Comment),\n (r'{', Punctuation, '#push'),\n (r'}', Punctuation, '#pop'),\n ],\n \n '@entry': [\n include('whitespace'),\n (r'(?i)([^, ]*)\\s*(\\,)',\n bygroups(\n Name.Label, \n Punctuation), \n 'field_multi'\n ),\n ],\n\n 'field_multi': [\n include('whitespace'),\n \n (r'}', Punctuation, '#pop:2'), # pop back to root\n \n (r'(?i)([^}=\\s]*)\\s*(=)', \n bygroups(\n Name.Attribute, \n Operator), \n 'value_multi'\n ),\n \n (r'[^}]+\\n', Text),\n ],\n \n 'field': [\n include('whitespace'),\n (r'}', Punctuation, '#pop'), # pop back to root\n (r'(?i)([^}=\\s]*)\\s*(=)',\n bygroups(\n Name.Label, \n Operator), \n 'value_single'\n ),\n (r'[^}]+\\n', Text),\n ],\n\n 'value': [\n include('whitespace'),\n (r'-?(0|[1-9]\\d*)', Number.Integer),\n (r'\"(\\\\\\\\|\\\\\"|[^\"])*\"', String.Double),\n (r\"'(\\\\\\\\|\\\\'|[^'])*'\", String.Single),\n (r'{', Punctuation, 'bracket'),\n (r'[^,}{]+', Text),\n ],\n \n 'value_multi': [\n include('value'),\n (r',', Punctuation, '#pop'), # pop back to field_multi\n (r'}', Punctuation, '#pop:3'), # pop back to root\n ],\n\n 'value_single': [\n include('value'),\n (r'}', Punctuation, '#pop:2'), # pop back to root\n ],\n \n \n \n }\n" }, { "alpha_fraction": 0.5782918334007263, "alphanum_fraction": 0.5788849592208862, "avg_line_length": 23.071428298950195, "blob_id": "84e821a62354cb59f3378e783ac85d411b58a93f", "content_id": "72d6c012d955ee1e66ed2c68d9a124a118c21d07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1686, "license_type": "no_license", "max_line_length": 58, "num_lines": 70, "path": "/bibtexml2/filter.py", "repo_name": "Zearin/bibtexml2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n'''Pygments-style filters.'''\n\n## StdLib\nfrom __future__ import (\n absolute_import, \n with_statement, \n print_function,)\n \n\n## External\nfrom pygments.util import get_bool_opt\nfrom pygments.token import Name\nfrom pygments.filter import (\n Filter, # class-based filters\n simplefilter, # decorator-based filters\n )\n\n\n@simplefilter\ndef token_types(lexer, stream, options):\n for ttype, value in stream:\n if ttype in options['ttypes']:\n yield ttype, value\n\n\n@simplefilter \ndef lowercase_entries(lexer, stream, options):\n for ttype, value in stream:\n pass # TODO\n \n \n@simplefilter \ndef lowercase_fields(lexer, stream, options):\n for ttype, value in stream:\n pass # TODO\n \n\n@simplefilter\ndef expand_month_abbrs(lexer, stream, options):\n months = {\n 'jan': 'January',\n 'feb': 'February',\n 'mar': 'March',\n 'apr': 'April',\n 'may': 'May',\n 'jun': 'June',\n 'jul': 'July',\n 'aug': 'August',\n 'sep': 'September',\n 'oct': 'October',\n 'nov': 'November',\n 'dec': 'December',}\n for ttype, value in stream:\n if ttype is Token.Text and value in months.keys():\n value = months[value]\n yield ttype, value\n \n\n@simplefilter \ndef drop_whitespace(lexer, stream, options):\n for ttype, value in stream: \n if ttype is not Token.Text.Whitespace:\n yield ttype, value\n\n@simplefilter \ndef drop_punctuation(lexer, stream, options):\n for ttype, value in stream: \n if ttype is not Token.Punctuation:\n yield ttype, value\n\n" }, { "alpha_fraction": 0.4868287742137909, "alphanum_fraction": 0.4926545023918152, "avg_line_length": 29.376922607421875, "blob_id": "49a80ab1895b0291955e4ad0d5a82170bb9ffa79", "content_id": "4a40dbadd211cd0ec3dd7cfd8920ede9b3ca1569", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3950, "license_type": "no_license", "max_line_length": 79, "num_lines": 130, "path": "/tests/lexer_vows.py", "repo_name": "Zearin/bibtexml2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n## Generated by PyVows v2.0.4 (2013/07/19)\n## http://pyvows.org\n\n\n\n#--------------------------------------------------------------------\n## Imports\n#--------------------------------------------------------------------\n### Standard Library\nfrom __future__ import (\n absolute_import, \n with_statement, \n print_function,)\n\nimport os\nfrom os import path\nfrom os.path import (\n abspath,\n basename,\n dirname,)\n \nfrom pprint import pprint\nimport sys\n\n### Third Party\nimport pygments\nfrom pygments.token import *\nfrom pygments.filters import (\n KeywordCaseFilter, \n TokenMergeFilter, \n RaiseOnErrorTokenFilter,)\n \nimport six\n\n### PyVows Testing\nfrom pyvows import (Vows, expect)\n\n## Local Imports\n# imported below...\n\n\n\n#--------------------------------------------------------------------\n## Variables\n#--------------------------------------------------------------------\nTEST_PATH = abspath(dirname(__file__))\nMOD_PATH = abspath(path.join(TEST_PATH, '../'))\nTESTDATA_PATH = abspath(path.join(TEST_PATH, 'examples'))\n\ntry:\n # Import the file directly above this one\n # (i.e., don’t use similar modules found in PYTHONPATH)\n _syspath = sys.path[:]\n sys.path.insert(0, MOD_PATH)\n \n import bibtexml2\n from bibtexml2 import lexer\n sys.path = _syspath[:]\n del _syspath\nexcept ImportError as err:\n print(err)\n sys.exit(err)\n\n\nFILES = set((f for f in os.listdir(TESTDATA_PATH)\n if f != 'testcases.bib')) # causes weird encoding errors; fix later\nFILES = set((path.join(TESTDATA_PATH, f) for f in FILES))\n\nLEXER = lexer.BibtexLexer()\nLEXER.add_filter( RaiseOnErrorTokenFilter() )\n\n#--------------------------------------------------------------------\n## Custom Contexts\n#--------------------------------------------------------------------\ndef token_context(token_types):\n class Context(Vows.Context):\n def topic(self, parent_topic):\n if parent_topic[0] in frozenset(token_types):\n yield parent_topic\n return Context\n\n\n#--------------------------------------------------------------------\n## Tests\n#--------------------------------------------------------------------\[email protected]\nclass FilesToLex(Vows.Context):\n # first, rule out any dumb file errors\n def topic(self):\n for f in FILES:\n yield f\n \n def test_files_exist(self, topic):\n expect(topic).to_be_a_file()\n \n \n class WhenLexed(Vows.Context):\n def topic(self, parent_topic):\n with open(parent_topic, 'r') as f:\n code = ''.join( f.readlines() )\n for item in pygments.lex(code, LEXER):\n yield item # 2-tuple of TokenType, TokenValue\n \n ##\n ## Ignored; use pygments RaiseOnErrorFilter\n ##\n # def we_get_no_lexer_errors(self, topic):\n # expect(topic).not_to_be_an_error()\n # expect(topic[0]).not_to_equal(Token.Error)\n \n \n class WhitespaceTokens(token_context( (Token.Text.Whitespace,) )):\n def only_contain_whitespace(self, topic):\n expect(topic[1]).to_match(r'\\s+')\n \n \n class EntriesAndFields(token_context( (Token.Keyword.Reserved,\n Token.Name.Attribute) )):\n def contain_no_whitespace(self, topic):\n pre_strip, post_strip = topic[1], topic[1].strip()\n expect(topic[1]).not_to_match(r'\\s+')\n expect(pre_strip == post_strip).to_be_true()\n \n \n class Entries(token_context( (Token.Keyword.Reserved, ) )):\n def have_valid_token_values(self, topic):\n pubtype = topic[1].lower()\n pubtype = pubtype.lstrip('@')\n expect(pubtype in lexer.PUBTYPES).to_be_true()" }, { "alpha_fraction": 0.5390256643295288, "alphanum_fraction": 0.5400733351707458, "avg_line_length": 25.51388931274414, "blob_id": "1af4aa0375b69fc2057014e4989b4db87d23f73e", "content_id": "3379f6aab3ae09ae2cf41d0e49b1a3d63b08751d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1909, "license_type": "no_license", "max_line_length": 88, "num_lines": 72, "path": "/bibtexml2/utils.py", "repo_name": "Zearin/bibtexml2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n'''Miscellaneous BibTeX utilties.'''\n\n## STDLIB\nfrom __future__ import (\n absolute_import, \n with_statement, \n print_function,\n )\n \nimport io\n\n## External\nimport six\n\n## Local\n\n\n#------------------------------------------------------------------------------\ndef _indeces_that_match(sequence, re_object):\n '''Given `sequence`, returns a list of indeces where \n `sequence[index]` matches `re_object`.\n \n '''\n indeces = []\n for idx, line in enumerate(sequence):\n if re_object.search(line):\n indeces.append(idx)\n return indeces\n\ndef _break_list(sequence, indeces):\n '''Breaks sequence into a list containing tuples. \n Each tuple contains a slice of `sequence`, calculated \n using each pair of values in `indeces`.\n \n '''\n results = []\n \n for idx, item in enumerate(indeces):\n start = indeces[idx]\n try: stop = indeces[idx+1]\n except IndexError: stop = None\n results.append( tuple(sequence[start:stop]) )\n \n return results\n\n#------------------------------------------------------------------------------\ndef open(file):\n '''Opens a bibtex `file` and returns its contents as a string.\n\n Uses `readlines()` internally.\n\n '''\n try:\n lines = None\n # open the file as *bytes*:\n # - lots of TeX stuff is written in ASCII, \n # - this function shouldn't alter the text\n # - any unicode translation is up to other functions\n with io.open(file, 'rb') as f:\n lines = f.readlines()\n return lines\n except Exception as e:\n # Just adds a little extra message with whitespace to make errors easier to spot\n from textwrap import dedent\n message = '''\n \nERROR OPENING BIBTEX FILE: {file}\n\n '''.format(file=file)\n print(dedent(message))\n raise e\n" }, { "alpha_fraction": 0.6675062775611877, "alphanum_fraction": 0.6763224005699158, "avg_line_length": 19.384614944458008, "blob_id": "29555320c10b2689e4ad24fb1d3f6943beae5b25", "content_id": "466990471ae783e5a73e2860906ccfa1dd7d05e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 794, "license_type": "no_license", "max_line_length": 75, "num_lines": 39, "path": "/TODO.md", "repo_name": "Zearin/bibtexml2", "src_encoding": "UTF-8", "text": "# TODO\n\n\n## Overall Project\n\n- [x] Set up Travis-CI (working on it!)\n- [x] Set up CodeQ.io\n- [x] Set up Coveralls.io\n- [ ] Set up PyPI Version (Crate.io)\n- [ ] Set up Downloads (Crate.io)\n\n\n## Tokenization \n\nSee [bibtexml2/lexer.py](bibtexml2/lexer.py).\n\n- [x] Tokenize bibtex pubtypes as `Keyword.Reserved`\n- [x] Change tokenization type of `=` from `Token.Text` to `Token.Operator`\n- [ ] Tokenize bibtex predefined month abbreviations as `Name.Constant`\n\n\n## Filtering\n\nSee [bibtexml2/filter.py](bibtexml2/filter.py).\n\n- [ ] Convert special bibtex characters to Unicode/UTF-8 characters\n\n\n## Formatter\n\nSee [bibtexml2/formatter.py](bibtexml2/formatter.py).\n\n- [ ] Convert tokens to XML (using `xmlwitch`)\n- [ ] Valid output (from original BibTeXML schema)\n\n\n\n<!-- ## Style -->\n<!-- - [ ] -->" }, { "alpha_fraction": 0.4598463773727417, "alphanum_fraction": 0.4668295979499817, "avg_line_length": 32.23255920410156, "blob_id": "baa9f9bb1df26c6d430681817cb7f0ec019cf67b", "content_id": "091a56a4d639b0c5f6e4ebf55ad6b0270bc99517", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5728, "license_type": "no_license", "max_line_length": 107, "num_lines": 172, "path": "/bibtexml2/formatter.py", "repo_name": "Zearin/bibtexml2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n'''Pygments formatter. Converts bibTeX to XML.'''\n\n\n#----------------------------------------------------------------------\n# Imports\n#----------------------------------------------------------------------\n## StdLib\nfrom __future__ import (\n absolute_import, \n with_statement, \n print_function,)\n\nimport itertools \n\n## External\nfrom pygments.formatter import Formatter\nfrom pygments.token import (\n Text, \n Comment, \n Keyword, \n String, \n Number, \n Operator,\n Other, \n Punctuation, \n Literal, \n Whitespace,\n Name,)\n \nimport xmlwitch\n\n\n#----------------------------------------------------------------------\n# Variables\n#----------------------------------------------------------------------\nDTD = '<!DOCTYPE bibtex:file PUBLIC \"-//BibTeXML//DTD XML for BibTeX v1.0//EN\" \"bibtexml.dtd\">\\n'\nXML = xmlwitch.Builder(version='1.0', encoding='utf-8')\nXMLNS = 'http://bibtexml.sf.net/'\n\nSKIPPABLE_TTYPES = frozenset([Text.Whitespace, Whitespace, Operator, Punctuation])\n\n\n\n#----------------------------------------------------------------------\n# Functions\n#----------------------------------------------------------------------\ndef _is_entry(token_tuple):\n ttype, value = token_tuple[0], token_tuple[1]\n return ttype == Keyword.Reserved # and str(value).startswith('@')\n \ndef _write_entry(entry_token_stream):\n try:\n assert( entry_token_stream[0][0] is Keyword.Reserved ) # sanity check! be sure we have an entry\n except AssertionError:\n import sys\n sys.exit(entry_token_stream[0])\n \n entrytype = entry_token_stream[0][1]\n entrytype = entrytype.lstrip('@').lower()\n entrylabel = (entry_token_stream[1][0] is Name.Label) and entry_token_stream[1][1] or None\n \n if entrylabel:\n with XML.bibtex__entry( id=entrylabel ):\n with XML['bibtex__{0}'.format(entrytype)]:\n _write_fields( entry_token_stream[2:] )\n else:\n with XML.bibtex__entry():\n with XML['bibtex__{0}'.format(entrytype)]:\n _write_fields( entry_token_stream[1:] )\n\ndef _write_fields(field_token_list):\n field_indeces = sorted([idx for idx, item in enumerate(field_token_list) if item[0] is Name.Attribute])\n field_indeces = tuple(field_indeces)\n is_field = lambda x: x[0] is not Name.Attribute\n \n for idx, item in enumerate(field_token_list):\n ttype, value = item[0], item[1].lower().strip(r' \\'\"}{')\n if ttype is Name.Attribute:\n fieldname = value\n metaidx = field_indeces.index(idx)\n start = idx+1\n stop = 1 + metaidx\n value = ''.join([ item[1] for item in \n itertools.takewhile(\n is_field, \n field_token_list[idx+1:])\n ])\n XML[ 'bibtex__{0}'.format(fieldname) ]( value )\n \n\ndef _entries(token_seq, indeces):\n # returns a list where each item is an entry \n # (a slice of `token_seq`)\n entries = []\n \n for idx, i in enumerate(indeces):\n start = indeces[idx]\n try:\n stop = indeces[idx + 1]\n except IndexError:\n stop = None\n finally:\n entries.append( token_seq[start:stop] )\n \n return entries\n\n\n#----------------------------------------------------------------------\n# Classes\n#----------------------------------------------------------------------\nclass BibTeXML(Formatter):\n '''Formats a bibTeX token-stream to XML output.\n \n Output (should) be valid according to the output from the original \n BibTeXML project (hosted on SourceForge.net).\n \n '''\n \n name = 'BibTeXML'\n aliases = ['bibtexml', 'bibteXML', 'bibxml']\n \n \n def __init__(self, **options):\n super(BibTeXML, self).__init__(**options)\n\n def format(self, tokensource, outfile):\n # need to be able to look ahead \n token_seq = tuple([i for i in tokensource if i[0] not in SKIPPABLE_TTYPES])\n \n # mark where entries occur\n entry_indeces = tuple([idx for idx, item in enumerate(token_seq) if _is_entry(item)])\n \n # build list of sub-iterators\n entries = _entries(token_seq, entry_indeces)\n\n # begin XML document\n XML.write(DTD)\n \n with XML.bibtex__file(xmlns__bibtex=XMLNS):\n idx = 0\n \n # make sure we've captured entries\n if len(entry_indeces): \n # write anything that occurs before the first entry\n \n in_comment = False\n for idx, item in enumerate(token_seq):\n if idx > entry_indeces[0]:\n break\n \n ttype, value = item[0], item[1]\n next_ttype = token_seq[idx+1][0]\n \n if ttype is Comment:\n if not in_comment:\n in_comment = True\n XML.write_indented('\\n<!--\\n')\n XML._indentation += 1\n XML.write_indented(value.strip('\\r\\n'))\n continue\n elif in_comment:\n XML.write_indented('\\n-->')\n XML._indentation -= 1\n continue\n \n \n # write the entries\n for idx, entry in enumerate( entries ):\n _write_entry(entry)\n \n outfile.write(str(XML))\n " }, { "alpha_fraction": 0.6815286874771118, "alphanum_fraction": 0.6878980994224548, "avg_line_length": 21.380952835083008, "blob_id": "d750d72101bc440ab5d0e6efaaaabe3620e4a6fc", "content_id": "c31dc0a0b5017a85ef357013c207d706a3360db3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 74, "num_lines": 21, "path": "/bibtexml2/__init__.py", "repo_name": "Zearin/bibtexml2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n'''Convert bibTeX files to XML! Built on Pygments.\n\nUseful for manipulating bibTeX data as XML with XML toolsets.\n\nIf you don't like something about bibtexml2, it's built with Pygments--so \nyou have its mature, widespread ecosystem at your disposal to tweak \nwhatever you want.\n\n'''\n\n## STDLIB\nfrom __future__ import (\n absolute_import, \n with_statement, \n print_function,)\n\n## Local \nfrom bibtexml2 import (\n lexer,\n utils)\n\n" }, { "alpha_fraction": 0.4520731270313263, "alphanum_fraction": 0.4592064321041107, "avg_line_length": 24.79310417175293, "blob_id": "58339d038f8b60d36388691e9390189fd37e31b6", "content_id": "f2ea8b12040f8845d5e29718e9873e0fa75fffa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2243, "license_type": "no_license", "max_line_length": 84, "num_lines": 87, "path": "/bibtexml2/__main__.py", "repo_name": "Zearin/bibtexml2", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''{program[human_format]}\n\nUsage: {program[cli_format]} [options] <file>...\n\nOptions:\n -X, --no-xml Output bibTeX instead of XML\n\n'''\n\n#--------------------------------------------------------------------\n## Imports\n#--------------------------------------------------------------------\n## StdLib \nfrom __future__ import (\n absolute_import, \n with_statement, \n print_function,)\n\nimport sys\n\n## External\nfrom docopt import docopt\n\nimport pygments\nfrom pygments.filters import (\n KeywordCaseFilter, \n TokenMergeFilter, \n RaiseOnErrorTokenFilter,)\n \nfrom pygments.token import *\n\n\n## Internal\nfrom bibtexml2 import (\n lexer,\n utils,)\n\n\n#--------------------------------------------------------------------\n## Variables\n#--------------------------------------------------------------------\nBibtexLexer = lexer.BibtexLexer\ndocstring_format_dict = {\n 'human_format': 'BibTeXML2',\n 'cli_format' : 'bibtexml2',\n }\n\n\n#--------------------------------------------------------------------\n## __main__\n#--------------------------------------------------------------------\ndef main():\n arguments = docopt( \n __doc__.format( program=docstring_format_dict ), \n version= '{docstring_format_dict[\"human_format\"]} 2.0', \n options_first= True\n )\n\n lexer = BibtexLexer()\n lexer.add_filter( RaiseOnErrorTokenFilter() )\n #lexer.add_filter( TokenMergeFilter() )\n lexer.add_filter( KeywordCaseFilter(case='lower') )\n \n for f in arguments['<file>']:\n # get bibtex source\n code = None\n with open(f, 'r') as f:\n code = ''.join( f.readlines() )\n\n # NOW LEX SEE CODE!\n for idx, item in enumerate(pygments.lex(code, lexer)):\n tokentype, tokenvalue = item[0], item[1]\n \n # if tokentype in frozenset([Token.Text.Whitespace, Token.Punctuation]):\n # continue\n print( \"{0:>5}\\t{1[0]!s:<25}\\t{1[1]!r}\".format(idx, item),\n file=sys.stdout )\n\n \n \nif __name__ == '__main__':\n try:\n main()\n except pygments.filters.ErrorToken as e:\n sys.exit(e)" }, { "alpha_fraction": 0.6902927756309509, "alphanum_fraction": 0.7010785937309265, "avg_line_length": 35.05555725097656, "blob_id": "b9221233f0ed8a1b1798cff00f703d25827ca59c", "content_id": "02a80e29d7754dd0731544ef504bf1d90486f4bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 655, "license_type": "no_license", "max_line_length": 128, "num_lines": 18, "path": "/README.md", "repo_name": "Zearin/bibtexml2", "src_encoding": "UTF-8", "text": "# BibTeXML2\n\n**This project is currently in ALPHA!** \n**It’s not working, and you probably shouldn’t even run it. (Yet.)**\n\n\n<!-- Code Badges -->\n[![Build Status](https://travis-ci.org/Zearin/bibtexml2.png)](https://travis-ci.org/Zearin/bibtexml2)\n[![codeq](https://codeq.io/github/Zearin/bibtexml2/badges/master.png)](https://codeq.io/github/Zearin/bibtexml2/branches/master)\n[![Coverage Status](https://coveralls.io/repos/Zearin/bibtexml2/badge.png)](https://coveralls.io/r/Zearin/bibtexml2)\n<!-- /Code Badges -->\n\n\nConvert BibTeX files to XML.\n\nCustomizable using Pygments. (Or with XML tools, of course!)\n\n*More to come soon…*\n" } ]
10
MeganPorzio/class
https://github.com/MeganPorzio/class
e0629095c74415a82cd279a54dcbe2bb11a2fc36
476e53f0c2c9f24ba450bb34c8adeccded49507d
eeac7098a5e10e9cdeaa90f37fc2a9535748f340
refs/heads/master
2016-09-06T16:48:07.919562
2014-12-03T02:50:42
2014-12-03T02:50:42
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6834130883216858, "alphanum_fraction": 0.7113237380981445, "avg_line_length": 24.612245559692383, "blob_id": "1bf974c4059ff0ca2269db4811da7ea03f7daea9", "content_id": "cf4004b28a465fac4e65544ce1087c86ee83f644", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1254, "license_type": "no_license", "max_line_length": 220, "num_lines": 49, "path": "/HW5.py", "repo_name": "MeganPorzio/class", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 30 21:29:39 2014\n\n@author: Student\n\"\"\"\n\nimport pandas\nimport numpy as np\nfrom sklearn import cross_validation\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\n\nwine = pandas.read_csv(\"wine.data\", header=None)\nwine.columns = ['Class','Alcohol','Malic Acid','Ash','Alcalinity of Ash','Magnesium','Total Phenols','Flavanoids','Nonflavanoid Phenols','Proanthocyanins','Color Intensity','Hue','OD280/OD315 of Diluted Wines','Proline']\n\nwine.hist([1])\n\ndata_values=wine.values\nall_array=np.array(data_values)\ndata_array=all_array[:,1:]\nlabel_array=all_array[:,0:1]\nlabel_array=label_array.ravel()\n\n\ndata_train, data_test, label_train, label_test = cross_validation.train_test_split(data_array,label_array)\n\nlr = LogisticRegression().fit(data_train,label_train)\nlr_prediction = lr.predict(data_test)\n\ni = 0\ncount = 0\nwhile i <= 44:\n if lr_prediction[i] == label_test[i]:\n count += 1\n i+=1\n\nprint \"Number of correct predictions: \"\nprint count\n\nknn = KNeighborsClassifier().fit(data_train,label_train)\nknn_prediction = knn.predict(data_test)\n\nki = 0\nkcount = 0\nwhile ki <= 44:\n if knn_prediction[ki] == label_test[ki]:\n count += 1\n i+=1" } ]
1
zhuang-hao-ming/select_dominator_point
https://github.com/zhuang-hao-ming/select_dominator_point
e089ee0b5c6fa560df1eee50e740757db5e88175
5cb78857a683de7207f522e5ac52f85a027c96cf
eb48020cbcad0520893ccc12cb32868211f4e2aa
refs/heads/master
2021-05-12T17:15:19.653053
2018-01-11T02:37:52
2018-01-11T02:37:52
117,040,017
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5286359786987305, "alphanum_fraction": 0.535474419593811, "avg_line_length": 28.56317710876465, "blob_id": "dbcdd7869d531396f6cf345124d8d9933230f9ef", "content_id": "7014bb570e51f57b860bc82c765b8a8da06bc5cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8493, "license_type": "no_license", "max_line_length": 137, "num_lines": 277, "path": "/signal_select.py", "repo_name": "zhuang-hao-ming/select_dominator_point", "src_encoding": "UTF-8", "text": "# -*- encoding: utf-8 -*-\n'''\n\n \n\n\n'''\nfrom collections import OrderedDict, defaultdict\nimport networkx as nx\nfrom shapely.geometry import shape\nimport fiona\nfrom operator import itemgetter\n\n\ndef get_signal_list():\n '''\n 获得signal list\n '''\n c = fiona.open('./shp/origin_signal_clean.shp') \n signal_list = []\n for rec in c: \n id = int(rec['properties']['id'])\n signal_list.append(id) \n c.close()\n \n return signal_list\n\ndef get_pnt_geom():\n '''\n 读取信号灯shp文件,返回信号灯id列表和信号灯几何信息\n '''\n c = fiona.open('./shp/shenzhen_point.shp') \n pnt_id_to_geom = {}\n for rec in c: \n id = int(rec['properties']['ID'])\n pnt_id_to_geom[id] = rec['geometry']\n \n c.close()\n \n return pnt_id_to_geom\n\ndef get_min_out(select_signal_out_pnt_dict):\n '''\n 遍历每个signal对应的out point,找出距离最近的out point\n 返回对应的signal和out point\n '''\n min_out_pnt = None\n min_signal = None\n min_distance = 99999999\n\n for signal, out_pnt in select_signal_out_pnt_dict.items():\n\n if out_pnt['distance'] < min_distance: \n min_out_pnt = out_pnt\n min_signal = signal\n min_distance = out_pnt['distance'] \n\n return min_signal, min_out_pnt\n\ndef get_road_graph():\n '''\n 读入路网数据构建网络结构,将所有道路看做双向的。\n\n '''\n\n road_graph = nx.Graph() # 路网\n c = fiona.open('./shp/shenzhen_road.shp')\n \n for record in c:\n\n geometry = record['geometry']\n assert(geometry['type'] == 'LineString')\n\n properties = record['properties']\n \n length = properties['LENGTH'] \n source = properties['SOURCE']\n target = properties['TARGET']\n \n \n if length < 0:\n length = properties['REVERSE_CO']\n\n assert(length > 0) \n\n road_graph.add_edge(source, target, **{\n 'weight': length \n })\n\n c.close()\n return road_graph\n\ndef write_result(travel_signal_list, signal_id_to_geom):\n driver = 'ESRI Shapefile'\n crs = {'init': 'epsg:32649'}\n schema = {\n 'properties': OrderedDict([('id', 'int')]),\n 'geometry': 'Point'\n }\n target_c = fiona.open('./shp/signal_v3.shp', 'w', driver=driver, crs=crs, schema=schema, encoding='utf-8')\n for signal in travel_signal_list:\n rec = {\n 'type': 'Feature',\n 'id': '-1',\n 'geometry': signal_id_to_geom[signal],\n 'properties': OrderedDict([('id', signal)])\n }\n target_c.write(rec)\n target_c.close()\n\n\ndef write_result_1(dominated_point_dict, signal_id_to_geom):\n driver = 'ESRI Shapefile'\n crs = {'init': 'epsg:32649'}\n schema = {\n 'properties': OrderedDict([('id', 'int'), ('d_id', 'int'), ('dis', 'float'),]),\n 'geometry': 'Point'\n }\n target_c = fiona.open('./shp/dominated_point_v3.shp', 'w', driver=driver, crs=crs, schema=schema, encoding='utf-8')\n for key, val in dominated_point_dict.items():\n dominator_key = val['dominator']\n distance = val['distance']\n rec = {\n 'type': 'Feature',\n 'id': '-1',\n 'geometry': signal_id_to_geom[key],\n 'properties': OrderedDict([('id', key), ('d_id', dominator_key), ('dis', distance)])\n }\n target_c.write(rec)\n target_c.close()\n\n\ndef set_out_pnt(signal_out_pnts_dict, signal, out_signal_dict, signal_list, dominated_point_dict, select_signal_out_pnt_dict):\n '''\n 重新设置候选点\n '''\n well = False\n for out_pnt in signal_out_pnts_dict[signal]:\n pnt_id = out_pnt['pnt_id'] \n if (pnt_id not in out_signal_dict) and (pnt_id not in signal_list) and (pnt_id not in dominated_point_dict):\n out_signal_dict[pnt_id] = signal\n select_signal_out_pnt_dict[signal] = out_pnt\n well = True\n break\n\n if not well: \n select_signal_out_pnt_dict[signal] = {\n 'distance': 999999999999,\n 'pnt_id': -1\n }\n \n\n\n\ndef main():\n\n\n SIGNAL_DISTANCE = 600\n SEARCH_DISTANCE = 3000\n \n\n signal_list = get_signal_list() # 得到信号灯列表\n road_graph = get_road_graph() # 得到路网\n\n dominated_point_dict = {} # 记录每个被统治节点的信息: 统治节点,距离统治节点的距离\n\n signal_out_pnts_dict = defaultdict(list) # 记录source可用的out pnts\n\n out_signal_dict = {} # 记录out pnt被谁使用了\n\n select_signal_out_pnt_dict = {} # 记录signal实际使用的out\n\n\n for signal in signal_list:\n dists = nx.single_source_dijkstra_path_length(road_graph, signal, cutoff=SEARCH_DISTANCE, weight='weight')\n out_pnts = []\n for target, distance in dists.items():\n \n if distance <= SIGNAL_DISTANCE:\n\n if target not in dominated_point_dict:\n dominated_point_dict[target] = {\n 'distance': distance,\n 'dominator': signal\n }\n else:\n if distance < dominated_point_dict[target]['distance']:\n dominated_point_dict[target]['distance'] = distance\n dominated_point_dict[target]['dominator'] = signal\n else:\n pass\n\n else:\n out_pnts.append({\n 'pnt_id': target,\n 'distance': distance\n })\n assert(len(out_pnts) > 0)\n signal_out_pnts_dict[signal] = sorted(out_pnts, key=itemgetter('distance')) \n\n\n for signal in signal_list:\n set_out_pnt(signal_out_pnts_dict, signal, out_signal_dict, signal_list, dominated_point_dict, select_signal_out_pnt_dict)\n while True:\n \n print('num of dominated pnt {}'.format(len(dominated_point_dict)))\n print('num of signal {}'.format(len(signal_list)))\n \n # 获得距离最近的候选点\n min_signal, min_out_pnt = get_min_out(select_signal_out_pnt_dict)\n\n if min_signal is None:\n break\n # if len(dominated_point_dict) > 10000:\n # break\n \n\n new_signal = min_out_pnt['pnt_id']\n print(min_out_pnt['distance'])\n print('----')\n assert(new_signal not in signal_list)\n \n if new_signal in dominated_point_dict:\n set_out_pnt(signal_out_pnts_dict, min_signal, out_signal_dict, signal_list, dominated_point_dict, select_signal_out_pnt_dict)\n continue\n\n \n dists = nx.single_source_dijkstra_path_length(road_graph, new_signal, cutoff=SEARCH_DISTANCE, weight='weight')\n out_pnts = []\n for target, distance in dists.items():\n \n \n if distance <= SIGNAL_DISTANCE:\n\n if target not in dominated_point_dict:\n dominated_point_dict[target] = {\n 'distance': distance,\n 'dominator': new_signal\n }\n else:\n if distance < dominated_point_dict[target]['distance']:\n dominated_point_dict[target]['distance'] = distance\n dominated_point_dict[target]['dominator'] = new_signal\n else:\n pass\n\n else:\n out_pnts.append({\n 'pnt_id': target,\n 'distance': distance\n })\n assert(len(out_pnts) > 0)\n signal_out_pnts_dict[new_signal] = sorted(out_pnts, key=itemgetter('distance')) \n\n # 确定new_signal的out pnt\n signal_list.append(new_signal)\n del out_signal_dict[new_signal]\n\n \n set_out_pnt(signal_out_pnts_dict, new_signal, out_signal_dict, signal_list, dominated_point_dict, select_signal_out_pnt_dict)\n\n # 更新min_signal的out pnt\n set_out_pnt(signal_out_pnts_dict, min_signal, out_signal_dict, signal_list, dominated_point_dict, select_signal_out_pnt_dict)\n\n\n\n signal_id_to_geom = get_pnt_geom()\n write_result(signal_list, signal_id_to_geom)\n write_result_1(dominated_point_dict, signal_id_to_geom)\n # plt.show()\n\n \n \n\n\nif __name__ == '__main__':\n main()\n" } ]
1
Chesterdelang/Project_1
https://github.com/Chesterdelang/Project_1
abe6f3b15b94406ce27ac2d2d63034e2b704573b
1cc193c8b53814afa312c57c084534a53b3b3008
c57bc5341b3bcd56d2cda2c74dcb66e40f92d26a
refs/heads/master
2021-06-28T16:54:10.998178
2017-09-11T09:09:03
2017-09-11T09:09:03
103,114,982
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6478873491287231, "alphanum_fraction": 0.6549295783042908, "avg_line_length": 27.399999618530273, "blob_id": "1ef0b0d277f8c153dbafa1f94cab4dd6f52a0349", "content_id": "00e802855dc93f08a8a96fa947fdce27a438fb63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 284, "license_type": "no_license", "max_line_length": 86, "num_lines": 10, "path": "/Python bestanden/Les 3/PE3_1.py", "repo_name": "Chesterdelang/Project_1", "src_encoding": "UTF-8", "text": "#Chester de Lang\n\nvoldoende_score = 15\ningegeven_score = eval(input('Geef je score: '))\n\nif ingegeven_score > voldoende_score:\n print('Gefeliciteerd!')\n print('met een score van' + ' ' + str(ingegeven_score) + ' ' + 'ben je geslaagd!')\nelse:\n print('Je hebt het helaas niet gehaald')\n" }, { "alpha_fraction": 0.7021276354789734, "alphanum_fraction": 0.7021276354789734, "avg_line_length": 34.25, "blob_id": "b39a7b54cec050edb96f8f1ce6ee3d350eb7ec39", "content_id": "d43f037097fbea9f39b90f8e5595c13bc706048a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 141, "license_type": "no_license", "max_line_length": 51, "num_lines": 4, "path": "/Python bestanden/Les 2/PE2_4.py", "repo_name": "Chesterdelang/Project_1", "src_encoding": "UTF-8", "text": "uren = float(input('Hoeveel uur heb je gewerkt: '))\nverdien = float(input('Wat verdien je per uur: '))\ntotaal = (uren*verdien)\nprint(totaal)\n" }, { "alpha_fraction": 0.6111111044883728, "alphanum_fraction": 0.6333333253860474, "avg_line_length": 32.75, "blob_id": "d954d09c2f0d3b9c4486cc06db55fafd8d6ffba4", "content_id": "ce81dbbdb56d46450c8fbcece34266786bbf1daa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 125, "num_lines": 8, "path": "/Python bestanden/Les 2/PE2_2.py", "repo_name": "Chesterdelang/Project_1", "src_encoding": "UTF-8", "text": "cijferICOR = 7\ncijferCSN = 6\ncijferPROG = 8\ntotaal = cijferCSN + cijferICOR + cijferPROG\nbedrag = str(totaal * 30)\ngemiddeld = str(totaal / 3)\n\nprint('Mijn cijfers' + '(gemiddeld een' +' '+ gemiddeld + '' + ') Leveren een beloning op van' + ' ' + bedrag + ' ' + 'op !')\n" } ]
3
eemerica/astr-119-hw-1
https://github.com/eemerica/astr-119-hw-1
a1e0bba0850084ec3bda2bbe2788bd464188e575
cdd2469de6217beaf21f2b9f609a5414c066cfd3
f7848b6233359da814b12753214d21a6fe070f07
refs/heads/master
2020-03-31T00:53:58.866083
2018-10-05T17:53:56
2018-10-05T17:53:56
151,758,045
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6619468927383423, "alphanum_fraction": 0.6902654767036438, "avg_line_length": 28.789474487304688, "blob_id": "3734336fe7cc9ab53ea79c1f5ef3f5f03722d2ca", "content_id": "933ac0cb58b03a0438df8735fa4e402014ee03ec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 565, "license_type": "permissive", "max_line_length": 64, "num_lines": 19, "path": "/variables_and_loops.py", "repo_name": "eemerica/astr-119-hw-1", "src_encoding": "UTF-8", "text": "#! usr/bin/env python3\nimport numpy as np\ndef main():\n\ti=0 #integers can be declared with a number\n\tn=10 # also an int\n\tx=119.0 #floating point nums are declared with a \".\"\n\t\n\t#we can use numpy to declare arrays quickly\n\ty=np.zeros(n,dtype=float) #declares 10 zeros as floats using np\n\t#can use for loops to iterate with a variable.\n\tfor i in range(n):\n\t\ty[i]=2.0 * float(i) + 1.0 #set y=2i+1 as floats\n\t#or we can also simply iterate through a variable.\n\t#print(y)\n\tfor y_element in y:\n\t\tprint(y_element)\n#execute the main function\nif __name__==\"__main__\":\n\tmain()" }, { "alpha_fraction": 0.7160940170288086, "alphanum_fraction": 0.7522603869438171, "avg_line_length": 33.625, "blob_id": "8181fac21fb5599e4f51f2ab90be33d00e5efe2d", "content_id": "689c3ce8019de4ac1cddf222e5916b68e8c8ebc2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 553, "license_type": "permissive", "max_line_length": 52, "num_lines": 16, "path": "/data_types.py", "repo_name": "eemerica/astr-119-hw-1", "src_encoding": "UTF-8", "text": "#! usr/bin/env python3\nimport numpy as np #import numpy library\n#integers\ni=10 #integer\nprint(type(i))#print out the data type of i\na_i=np.zeros(i,dtype=int)#declare an array of ints\nprint(type(a_i))#will return nd array\nprint(type(a_i[0]))#will return int64\n#floats\nx=119.0#floating point number\nprint(type(x))#print out the data type of x\ny=1.19e2#float 119 in scientific notation\nprint(type(y))#print out the data type of y\nz=np.zeros(i,dtype=float)#declare an array of floats\nprint(type(z))#will return nd array\nprint(type(z[0]))#will return float64" }, { "alpha_fraction": 0.7242063283920288, "alphanum_fraction": 0.738095223903656, "avg_line_length": 25.578947067260742, "blob_id": "237c9a40ef4ff7c0e2fbf67e288f372cd8b04ccc", "content_id": "ebf6fa5ed87e6d9d7ffa052ee4a186ab60274542", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 504, "license_type": "permissive", "max_line_length": 51, "num_lines": 19, "path": "/python_dictionaries.py", "repo_name": "eemerica/astr-119-hw-1", "src_encoding": "UTF-8", "text": "#! usr/bin/env python3\n#define a dictionary data structure\n#dictionaries have key : value for the elements\nexample_dict={\n\t\"class\"\t\t\t:\t\"Astr 119\",\n\t\"prof\"\t\t\t:\t\"Brant\",\n\t\"awesomeness\"\t:\t10\n}\nprint(type(example_dict))#will say dict\n#get a value via key\ncourse=example_dict[\"class\"]\nprint(course)\n#change a value via key\nexample_dict[\"awesomeness\"]+=1#increase awesomeness\n#print the dictionary\nprint(example_dict)\n#print dictionary element by element\nfor x in example_dict.keys():\n\tprint(x,example_dict[x])" }, { "alpha_fraction": 0.6396551728248596, "alphanum_fraction": 0.684482753276825, "avg_line_length": 33.17647171020508, "blob_id": "643014d1fe7d1550ba5b673ba94a334e9ba001f0", "content_id": "ced81b96767398cdd2d0896d2e140ba29ff1aa8a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 580, "license_type": "permissive", "max_line_length": 80, "num_lines": 17, "path": "/check_in_solution.py", "repo_name": "eemerica/astr-119-hw-1", "src_encoding": "UTF-8", "text": "#! usr/bin/env python3\nimport numpy as np #Includes the numpy library\n\ndef main():#Defines a function named main that does not currently take arguments\n\ti=0 #declare i, initialize to integer 0\n\tx=119.0\n\tfor i in range(120): #iterates from 0 to 119 (0 to n-1)\n\t\tif((i%2)==0):#if i is even.\n\t\t\tx+=3.0#add 3 to x\n\t\telse:# if i is odd\n\t\t\tx-=5.0 #subtract 5 from x\n\ts=\"%3.2e\"%x #makes a string s containing the value of x in scientific notation\n\tprint(s) #print s to the screen\n\n#now the rest of the program continues.\nif __name__==\"__main__\": #call main\n\tmain() #run the main function" }, { "alpha_fraction": 0.7385444641113281, "alphanum_fraction": 0.7425876259803772, "avg_line_length": 31.30434799194336, "blob_id": "f2d703c986b71a9523e3f5351ac24aa5f5281771", "content_id": "f9b85117357db1a8089d8b5630a21a408245e58d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 742, "license_type": "permissive", "max_line_length": 61, "num_lines": 23, "path": "/data_types_continued.py", "repo_name": "eemerica/astr-119-hw-1", "src_encoding": "UTF-8", "text": "#! usr/bin/env python3\n#string\ns=\"I am a string.\"\nprint(type(s))#will say str\n#boolean\nyes=True#boolean true\nprint(type(yes))\nno=False#Boolean False\nprint(type(no))\n#List -- ordered and changeable\nalpha_list=[\"a\",\"b\",\"c\"]#list initialization\nprint(type(alpha_list))#will say tuple\nprint(type(alpha_list[0]))#will say string\nalpha_list.append(\"d\")#will add \"d\" to the list end\nprint(alpha_list)#will print list\n#Tuple -- ordered and unchangeable\nalpha_tuple=(\"a\",\"b\",\"c\")#tuple initialization\nprint(type(alpha_tuple))#will say tuple\ntry:#attempt the following line\n\talpha_tuple[2]=\"d\"#won't work and will raise TypeError\nexcept TypeError:#when we get a TypeError\n\tprint(\"We can't add elements to tuples!\")#print this message\nprint(alpha_tuple)" }, { "alpha_fraction": 0.6566265225410461, "alphanum_fraction": 0.7289156913757324, "avg_line_length": 32.400001525878906, "blob_id": "870cc0e4b2d877901f278ef6d86849e1d7832de9", "content_id": "5f4c20c7346a2a355364f7369ff391a7a0908b3f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "permissive", "max_line_length": 57, "num_lines": 5, "path": "/while.py", "repo_name": "eemerica/astr-119-hw-1", "src_encoding": "UTF-8", "text": "#! usr/bin/env python3\ni=0#define an integer\nwhile (i<119):#while i is less than 119, do the following\n\tprint(i)#print the current value of i\n\ti+=10#increment i by 10" }, { "alpha_fraction": 0.7156549692153931, "alphanum_fraction": 0.7236421704292297, "avg_line_length": 30.350000381469727, "blob_id": "8ddc8fb3ae449ca32b7ae73d5c41fc2824f5a4a1", "content_id": "225f30a47922029cc1a7c6b7c43637c23c521296", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 626, "license_type": "permissive", "max_line_length": 74, "num_lines": 20, "path": "/functions.py", "repo_name": "eemerica/astr-119-hw-1", "src_encoding": "UTF-8", "text": "#! usr/bin/env python3\nimport numpy as np\nimport sys\n#define a function that returns a value\ndef expo(x):\n\treturn np.exp(x)#return the np e^x function\n#define a subroutine that does not return a value\ndef show_expo(n):\n\tfor i in range(n):\n\t\tprint(expo(float(i)))#call the expo function ensures that it is a float.\n#define a main function\ndef main():\n\tn=10#provide a default function for n\n\t#check if there is a command line argument provided\n\tif(len(sys.argv)>1):\n\t\tn=int(sys.argv[1])#if an argument was provided, use it for n\n\tshow_expo(n)#call the show_expo subroutine\n#run the main function\nif __name__==\"__main__\":\n\tmain()" }, { "alpha_fraction": 0.8073394298553467, "alphanum_fraction": 0.8103975653648376, "avg_line_length": 45.85714340209961, "blob_id": "5915d555a8f2b6c405dfd3f9298a43bc6616f129", "content_id": "585bab79b7404e80f51bd345ce909674248f6c35", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 327, "license_type": "permissive", "max_line_length": 68, "num_lines": 7, "path": "/useful_modules.py", "repo_name": "eemerica/astr-119-hw-1", "src_encoding": "UTF-8", "text": "#! usr/bin/env python3\nimport numpy as np#the numpy library\nimport matplotlib.pyplot as plt#Matplotlib's pyplot\nimport sys#gives access to a c-like sys library\nimport os#gives access to operating system\nprint(sys.argv)#prints any command line arguments, incl program name\nprint(os.getcwd())#prints the current working directory" }, { "alpha_fraction": 0.6353982090950012, "alphanum_fraction": 0.6796460151672363, "avg_line_length": 14.69444465637207, "blob_id": "95aff63181d974a1b08f07a09f83a58b118558c0", "content_id": "c55483beeb62382747afea33d7629c02d2fd5d14", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 565, "license_type": "permissive", "max_line_length": 39, "num_lines": 36, "path": "/operators.py", "repo_name": "eemerica/astr-119-hw-1", "src_encoding": "UTF-8", "text": "#! usr/bin/env python3\nx=9\ny=3\n#arithmetic operators\nprint(x+y)#addition\nprint(x-y)#subtraction\nprint(x*y)#multiplication\nprint(x/y)#division\nprint(x%y)#modulus\nprint(x**y)#exponentiation\nx=9.191823\nprint(x//y)#floor division\n\n#assignment operators\nx=9 #set x = 9\nx+=3 #x=x+3\nprint(x)\nx=9\nx-=3 #x=x-3\nprint(x)\nx*=3#x=x*3\nprint(x)\nx/=3#x=x/3\nprint(x)\nx**=3#x=x^3\nprint(x)\n\n#comparison operators\nx=9\ny=3\nprint(x==y)#true if x exactly equals y\nprint(x!=y)#true if x is not equal to y\nprint(x>y)#true x>y\nprint(x<y)#true x<y\nprint(x>=y)#true x>=y\nprint(x<=y)#true x<=y\n" } ]
9
gregd190/AI-Gym---LunarLander-v2-Actor-Critic
https://github.com/gregd190/AI-Gym---LunarLander-v2-Actor-Critic
dec33bb2e47530f8df6ff5312f942c0876060752
84208f34d62b6f8fb38d947c10a76bbb0a7f9dc8
e1c1aaa58a53864abde467a6c3e1ec3f9d26ce92
refs/heads/master
2020-03-31T00:27:10.406853
2018-10-08T07:45:33
2018-10-08T07:45:33
151,740,142
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6103475093841553, "alphanum_fraction": 0.630178689956665, "avg_line_length": 35.46595001220703, "blob_id": "46fc1d063a5a30f924f00ff2df5756d0e28b1ab6", "content_id": "41d39e8307f88f4ce8739614990c5d02cfc997f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10186, "license_type": "no_license", "max_line_length": 709, "num_lines": 279, "path": "/LunarLander-V2 -Multiprocessing.py", "repo_name": "gregd190/AI-Gym---LunarLander-v2-Actor-Critic", "src_encoding": "UTF-8", "text": "# # A2C Solution to OpenAI Gym LunarLander-v2 environment\n# \n# ## Introduction:\n# \n# ### OpenAI Gym Environment Description:\n# \"Landing pad is always at coordinates (0,0). Coordinates are the first two numbers in state vector. Reward for moving from the top of the screen to landing pad and zero speed is about 100..140 points. If lander moves away from landing pad it loses reward back. Episode finishes if the lander crashes or comes to rest, receiving additional -100 or +100 points. Each leg ground contact is +10. Firing main engine is -0.3 points each frame. Solved is 200 points. Landing outside landing pad is possible. Fuel is infinite, so an agent can learn to fly and then land on its first attempt. Four discrete actions available: do nothing, fire left orientation engine, fire main engine, fire right orientation engine.\n# \"\n# \n# ### Discussion\n# Due to the continuous and varied nature of the state space, discretising the state space at a sufficiently high resolution would result in an impractically large number of possible states. A conventional Q-table type solution is therefore impractical.\n# An actor - Critic method is used, training 2 neural networks, an 'actor' network to determine the optimal action, and a 'critic' network to estimate the potential reward of the action. \n# \n# Keras, as a frontend for Tensorflow, is used to create and train the neural networks. \n# \n# As 'solved' is considered 200 points, the networks will be trained and optimized until the average over the last 100 iterations exceeds this value. \n# \n# This version enables multiprocessing to allow training of multiple models simulteously to speed up the hyperparameter search on a CPU-only tensorflow setup.\n# It is set to train 3 models at a time, as that provided best performance on a 4 core machine, but can be easily changed to suit higher-performance cpus. \n\n#Import the various gym, keras, numpy and libraries we will require\n\nimport gym\nimport gym.spaces\nimport gym.wrappers\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport pickle\nimport time\n\nfrom collections import deque\nfrom keras.layers import Flatten, Dense\nfrom keras import backend as K\nfrom keras.models import Sequential, Model, load_model\nfrom keras import optimizers\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom multiprocessing import Pool\n\n# ### Creating the models\n# \n# Functions for model creation allow for flexibility in network size to allow for comparison of network sizes. \n# \n# Adam is used as the optimizer, as it has proven efficient on prior problems.\n# \n\ndef build_model_critic(num_input_nodes, num_output_nodes, lr, size):\n \n model = Sequential()\n \n model.add(Dense(size[0], input_shape = (8,), activation = 'relu'))\n \n for i in range(1,len(size)):\n model.add(Dense(size[i], activation = 'relu'))\n \n \n model.add(Dense(num_output_nodes, activation = 'linear')) \n \n adam = optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999)\n \n model.compile(loss = 'mse', optimizer = adam, metrics=['acc'])\n \n return model\n\ndef build_model_actor(num_input_nodes, num_output_nodes, lr, size):\n \n model = Sequential()\n \n model.add(Dense(size[0], input_shape = (num_input_nodes,), activation = 'relu'))\n \n for i in range(1, len(size)):\n model.add(Dense(size[i], activation = 'relu'))\n \n model.add(Dense(num_output_nodes, activation = 'softmax')) \n \n adam = optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999)\n \n model.compile(loss = 'categorical_crossentropy', optimizer = adam)\n \n return model\n\n# ### Deciding on an Action\n# \n# Action state is very simple - one of 4 possible actions (do nothing, or fire left, right or main engine). Action is selected randomly from the 4 actions, with the probability of a given action being chosen being proportional to the probability the actor network give for that action being the optimal action. This inherently encourages exploration in the early stages of training, and moves to a exploitation strategy as the network becomes more sure of itself. \n# \n\ndef decide_action(actor, state):\n\n flat_state = np.reshape(state, [1,8])\n action = np.random.choice(4, 1, p = actor.predict(flat_state)[0])[0]\n \n return(action)\n\n# ### Running episodes\n# \n# The simulation is run for a predefined number of episodes.\n# \n# For each step, the state, action, resulting state, reward and whether or not the step completed the episode (the boolean 'done') were saved in a list 'memory'.\n# \n# For each episode the totalreward is saved in an array 'totrewardarray'.\n# \n# Each episode is limited to 1000 timesteps, to cut short scenarios where the lander (which contains infinite fuel) refusing to land in the early stages of training.\n# \n# The episodes run until either the predefined number of episodes are completed, or the problem is considered solved (average totalreward of last 100 episodes exceeds 200). \n\ndef run_episodes(env, actor, r = False, iters=1):\n \n memory = deque()\n \n totrewardarray = []\n \n bestyet = float('-inf')\n \n i = 0\n \n while i < iters:\n \n i += 1\n state = env.reset()\n \n totalreward = 0\n \n cnt = 0 \n \n done = False\n \n while not done and cnt <1000:\n \n cnt += 1\n \n if r:\n env.render()\n \n action = decide_action(actor, state)\n \n observation, reward, done, _ = env.step(action) \n \n totalreward += reward\n \n state_new = observation \n \n memory.append((state, action, reward, state_new, done))\n \n state = state_new\n \n totrewardarray.append(totalreward)\n\n return(memory, totrewardarray)\n\n# ### Training the Networks\n# \n# Now the memory list gathered from running the episodes to a training function which trains the networks. \n# \n# The training data is shuffled so it is not presented to the networks in order. \n# \n# The discount factor, 'gamma', is another hyperparameter that will need to be optimised. \n\ndef train_models(actor, critic, memory, gamma):\n\n random.shuffle(memory)\n \n for i in range(len(memory)):\n \n state, action, reward, state_new, done = memory[i]\n \n flat_state_new = np.reshape(state_new, [1,8])\n flat_state = np.reshape(state, [1,8])\n \n target = np.zeros((1, 1))\n advantages = np.zeros((1, 4))\n\n value = critic.predict(flat_state)\n next_value = critic.predict(flat_state_new)\n\n if done:\n advantages[0][action] = reward - value\n target[0][0] = reward\n else:\n advantages[0][action] = reward + gamma * (next_value) - value\n target[0][0] = reward + gamma * next_value\n \n actor.fit(flat_state, advantages, epochs=3, verbose=0)\n critic.fit(flat_state, target, epochs=3, verbose=0) \n \n# ### Running episodes without training\n# \n# Sometimes we might want to run episodes without saving data for training, for instance if we want to render a few episodes of the trained network, or if we want to assess the performance of a trained network. This is simply a modification of the 'run_episodes' function. \n# \n# It includes a render option (boolean 'r') which turns on or off rendering the episode. \n\ndef play_game(env, iters, r = True):\n \n totalrewardarray = []\n \n for i in range(iters):\n \n totalreward = 0\n cnt = 0\n \n state = env.reset()\n \n done = False\n \n while not done and cnt <1000:\n \n cnt += 1\n \n if r:\n env.render()\n \n action = decide_action(actor, state)\n \n observation, reward, done, _ = env.step(action) \n \n totalreward += reward\n \n state_new = observation \n \n state = state_new\n \n totalrewardarray.append(totalreward)\n \n return totalrewardarray\n\n\n# Function to run episodes, train the networks and output an array of the rewards from each episode to allow us to plot the performance of the models. \n\ndef run_and_train(a_size, c_size, alr, clr, gamma, numepisodes):\n \n env = gym.make('LunarLander-v2')\n \n totrewardarray = []\n \n actor = build_model_actor(num_input_nodes = 8, num_output_nodes = 4, lr = alr, size = a_size)\n critic = build_model_critic(num_input_nodes = 8, num_output_nodes = 1, lr= clr, size = c_size)\n \n totrewardarray = [] \n \n best = float('-inf')\n \n episodes = 0\n \n while episodes < numepisodes: \n \n memory, temptotrewardarray = run_episodes(env, actor, r = False, iters = 1)\n \n totrewardarray.extend(temptotrewardarray)\n \n episodes = len(totrewardarray)\n \n if episodes >= 100:\n score = np.average(totrewardarray[-101:-1])\n if score>best:\n best = score\n if episodes%100==0:\n print('Episode ', episodes, 'of',numepisodes, 'Average Reward (last 100 eps)= ', score, 'Best = ', best)\n \n train_models(actor, critic, memory, gamma)\n \n avgarray = []\n cntarray = []\n\n for i in range(100,len(totrewardarray),10):\n avgarray.append(np.average(totrewardarray[i-100:i]))\n cntarray.append(i)\n \n params = 'A_size='+str(a_size)+'C_size='+str(c_size)+'ALR='+str(alr)+'CLR='+str(clr)+'gamma='+str(gamma)+'best='+str(round(best,1))\n \n return(cntarray, avgarray, params)\n \nif __name__ == '__main__':\n \n with Pool(3) as p:\n x = p.starmap(run_and_train, [([64,64,64],[64,64,64], [5e-6], [5e-4], [0.999], 1500),([64,64],[64,64], [5e-6], [5e-4], [0.999], 1500),([128],[128], [5e-6], [5e-4], [0.999], 1500)])\n \n for i in range(len(x)):\n plt.plot(x[i][0], x[i][1], label = x[i][2])\n \n plt.legend(loc='best')\n plt.show()\n \n\n \n\n" } ]
1
craigcurtin/mlb
https://github.com/craigcurtin/mlb
b68b5bf18e26050e94d10d4b6f826c8bb51bef36
5b99b418559e5f0829a3a4c7ba3cbdb7d372fc2c
4f255f5a2c91b6d8a4d5341923ce2d0c3a27e954
refs/heads/master
2023-04-18T21:52:30.688682
2021-05-11T12:25:05
2021-05-11T12:25:05
366,148,579
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5233758091926575, "alphanum_fraction": 0.5239830017089844, "avg_line_length": 34.80434799194336, "blob_id": "f760711d2c1e4db423ebf2e31b007f966627f4f7", "content_id": "513fdc0c7379b65ecffa519407e8b9df6ec1ddb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1647, "license_type": "no_license", "max_line_length": 87, "num_lines": 46, "path": "/Game.py", "repo_name": "craigcurtin/mlb", "src_encoding": "UTF-8", "text": "import datetime\nimport pytz\nimport os\n\n\nclass Game(object):\n \"\"\"Class Game - methods to extract various data pieces of the Game dictionary\"\"\"\n\n def __init__(self, game_dict):\n self.game_dict = game_dict\n\n def game_id(self):\n return self.game_dict['game_id']\n\n def game_time(self):\n return self.game_dict['game_datetime']\n\n def home_name(self):\n return self.game_dict['home_name']\n\n def away_name(self):\n return self.game_dict['away_name']\n\n def home_probable_pitcher(self):\n return self.game_dict['home_probable_pitcher'] or 'TBD'\n\n def away_probable_pitcher(self):\n return self.game_dict['away_probable_pitcher'] or 'TBD'\n\n def venue_name(self):\n return self.game_dict['venue_name']\n\n def summary_info(self):\n # datetime info is stored as UTC, extract the 'trailing Z' from the string\n utc_datetime = datetime.datetime.fromisoformat(self.game_time()[:-1])\n utc_datetime = utc_datetime.replace(tzinfo=pytz.utc)\n local_timezone = pytz.timezone(\"US/Eastern\")\n local_datetime = utc_datetime.astimezone(local_timezone)\n\n return '{} {} at {}, SP: {} vs {}{}'.format(local_datetime.strftime(\"%H:%MET\"),\n self.away_name(),\n self.home_name(),\n self.away_probable_pitcher(),\n self.home_probable_pitcher(),\n os.linesep,\n )\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.7678571343421936, "avg_line_length": 10.399999618530273, "blob_id": "089738a0eec850ec1d90be3e5ceaf004bc5e4600", "content_id": "8b8be4d258a962d084b614e193151cecbfe8bdb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 56, "license_type": "no_license", "max_line_length": 18, "num_lines": 5, "path": "/requirements.txt", "repo_name": "craigcurtin/mlb", "src_encoding": "UTF-8", "text": "pip==21.1.1\nsetuptools==56.2.0\nMLB-StatsAPI\npytz\nyagmail" }, { "alpha_fraction": 0.6249139904975891, "alphanum_fraction": 0.6434962153434753, "avg_line_length": 30.586956024169922, "blob_id": "3eb7c6a24dac5e22aa5af390d0f320f81405d031", "content_id": "76b413d000b59813da1942fe02c66d7c43c1c001", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1453, "license_type": "no_license", "max_line_length": 103, "num_lines": 46, "path": "/todays_game.py", "repo_name": "craigcurtin/mlb", "src_encoding": "UTF-8", "text": "import statsapi\nimport datetime\nfrom datetime import datetime as dt\nfrom Game import Game\nimport logging\nimport sys\nimport os\n\nfrom cc_mail import cc_mail\nfrom util_logger import setup_logger\nfrom yagmail import send_yag\n\n\ndef todays_game():\n today = datetime.date.today()\n tommorrow = today + datetime.timedelta(days=1)\n # sched = statsapi.schedule(start_date='07/01/2018', end_date='07/31/2018', team=143, opponent=121)\n statsapi.lookup_team(147)\n sched_games = statsapi.schedule(start_date=today, end_date=tommorrow)\n today_games = {}\n tomorrow_games = {}\n for game in sched_games:\n if today == dt.strptime(game['game_date'], '%Y-%m-%d').date():\n today_games[game['game_id']] = Game(game)\n else:\n tomorrow_games[game['game_id']] = Game(game)\n\n body = \"\"\n # now, print out Today's followed by Tommorrow's games\n body += \"Today's Games: {}{}\".format(today.isoformat(), os.linesep)\n for game in today_games:\n body += '{}'.format(today_games[game].summary_info())\n\n body += \"Tommorrow's Games: {}{}\".format(tommorrow.isoformat(), os.linesep)\n for game in tomorrow_games:\n body += '{}'.format(tomorrow_games[game].summary_info())\n return body\n\n\nif __name__ == '__main__':\n setup_logger('todays_game', 'c:/Temp', logging.DEBUG)\n\n email_body = todays_game()\n cc_mail('[email protected]', '{} MLB games'.format(datetime.date.today()), email_body)\n\n sys.exit(0)\n" }, { "alpha_fraction": 0.4701397716999054, "alphanum_fraction": 0.48538753390312195, "avg_line_length": 27.14285659790039, "blob_id": "be92835b1513a505cee5719d6008a1dadcf00da6", "content_id": "dd5cafb2a999d7173289773d89eff7940251c486", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 787, "license_type": "no_license", "max_line_length": 57, "num_lines": 28, "path": "/standings.py", "repo_name": "craigcurtin/mlb", "src_encoding": "UTF-8", "text": "import statsapi\n\n\ndef standings():\n standings = statsapi.standings(leagueId=\"103,104\",\n division=\"all\",\n include_wildcard=True,\n season=None,\n standingsTypes=None,\n date=None, )\n return standings\n\ndef standings_data():\n standings_data = statsapi.standings_data(\n leagueId=\"103,104\",\n division=\"all\",\n include_wildcard=True,\n season=None,\n standingsTypes=None,\n date=None,)\n return standings_data\n\nif __name__ == '__main__':\n standings = standings()\n print (standings)\n\n standings_data = standings_data()\n print (standings_data)" }, { "alpha_fraction": 0.6053383946418762, "alphanum_fraction": 0.6120114326477051, "avg_line_length": 35.13793182373047, "blob_id": "3cf92e5d5b5482537f6fd2e9803dfa9217452371", "content_id": "6d7acd8c31f942484a9ee0c7420efcca7555b6ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1049, "license_type": "no_license", "max_line_length": 83, "num_lines": 29, "path": "/Roster.py", "repo_name": "craigcurtin/mlb", "src_encoding": "UTF-8", "text": "import re\nfrom collections import namedtuple\nfrom collections import defaultdict\n\nPosition = namedtuple('Position', 'pos name')\n\nvalid_positions = ['P', 'C', '1B', '2B', '3B', 'SS', 'LF', 'CF', 'RF']\n\nclass Roster(object):\n def __init__(self, teamId, roster_list):\n self.teamId = teamId\n self.uniform_number_dict = {}\n self.position_dict = defaultdict(list)\n self.roster_list = roster_list\n for player in roster_list:\n if len(player) == 0:\n continue\n uniform_number, position, name = re.split(r\"\\s{2,}\", player)\n self.uniform_number_dict[uniform_number[1:]] = Position(position, name)\n self.position_dict[position].append(uniform_number[1:])\n def position(self, position):\n if position in valid_positions:\n return self.position_dict[position]\n else:\n return []\n def uniform_number(self, uniform_number):\n return self.uniform_number_dict[uniform_number]\n def roster(self):\n return self.roster\n\n" }, { "alpha_fraction": 0.5981566905975342, "alphanum_fraction": 0.599078357219696, "avg_line_length": 40.61538314819336, "blob_id": "d04f5a50d93266e000faa419d75667c8f1355570", "content_id": "7fed71ff15304daab6b4e88b61d83ebd8e7c694c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1085, "license_type": "no_license", "max_line_length": 104, "num_lines": 26, "path": "/roster_info.py", "repo_name": "craigcurtin/mlb", "src_encoding": "UTF-8", "text": "import statsapi\nfrom Team import Team\nfrom datetime import datetime\nfrom Roster import Roster\nfrom Roster import valid_positions\n\ndef team_roster(teamId):\n team_roster = statsapi.roster(teamId, rosterType=None, season=datetime.now().year, date=None)\n return team_roster\n\nif __name__ == '__main__':\n team_dict = {}\n teams = statsapi.get('teams', {'sportIds': 1, 'activeStatus': 'Yes'})\n for team in teams['teams']:\n team_dict[team['id']] = Team(team)\n league_roster_dict = {}\n for teamId in team_dict.keys():\n roster = team_roster(teamId)\n team_roster_list = roster.split('\\n')\n league_roster_dict[teamId] = Roster(teamId, team_roster_list)\n print (team_dict[teamId].summary_info())\n for position in valid_positions:\n for uniform_number in league_roster_dict[teamId].position(position):\n print ('{}, #{}, {}'.format(position,\n uniform_number,\n league_roster_dict[teamId].uniform_number(uniform_number)))\n\n\n\n" }, { "alpha_fraction": 0.6764705777168274, "alphanum_fraction": 0.6764705777168274, "avg_line_length": 33, "blob_id": "c6b6534a6f5f09f564c85da284b79efcdf05985a", "content_id": "c24473cffc7bd0a8992b5498185678bf99e22219", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 544, "license_type": "no_license", "max_line_length": 137, "num_lines": 16, "path": "/linescore_info.py", "repo_name": "craigcurtin/mlb", "src_encoding": "UTF-8", "text": "import statsapi\nfrom teams_info import teams_info\n\nif __name__ == '__main__':\n teams_dict = teams_info()\n for teamId in teams_dict.keys():\n most_recent_game_id = statsapi.last_game(teamId)\n print(statsapi.boxscore(most_recent_game_id))\n print(statsapi.linescore(most_recent_game_id))\n\n\n statsapi.linescore(gamePk, timecode=None)\n params = {\n \"gamePk\": gamePk,\n \"fields\": \"gameData,teams,teamName,shortName,status,abstractGameState,liveData,linescore,innings,num,home,away,runs,hits,errors\",\n }\n" }, { "alpha_fraction": 0.5204113721847534, "alphanum_fraction": 0.5210345983505249, "avg_line_length": 27.651784896850586, "blob_id": "7d4a1b09cc3ce1351201d9b499e5490528464142", "content_id": "7c0522dc84703b11701cd571c071466dc4839358", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3209, "license_type": "no_license", "max_line_length": 85, "num_lines": 112, "path": "/Team.py", "repo_name": "craigcurtin/mlb", "src_encoding": "UTF-8", "text": "import os\n\n\n# noinspection PyPep8Naming\nclass Team(object):\n def __init__(self, team_dict):\n self.team_dict = team_dict\n\n def id(self):\n return self.team_dict['id']\n\n def name(self):\n return self.team_dict['name']\n\n def link(self):\n return self.team_dict['link']\n\n def season(self):\n return self.team_dict['season']\n\n def league_id(self):\n return self.team_dict['league']['id']\n\n def league_name(self):\n return self.team_dict['league']['name']\n\n def league_link(self):\n return self.team_dict['league']['link']\n\n def division_id(self):\n return self.team_dict['division']['id']\n\n def division_name(self):\n return self.team_dict['division']['name']\n\n def division_link(self):\n return self.team_dict['division']['link']\n\n def venue_id(self):\n return self.team_dict['venue']['id']\n\n def venue_name(self):\n return self.team_dict['venue']['name']\n\n def venue_link(self):\n return self.team_dict['venue']['link']\n\n def springVenue_id(self):\n return self.team_dict['springVenue']['id']\n\n # noinspection PyPep8Naming\n def springVenue_link(self):\n return self.team_dict['springVenue']['link']\n\n def teamCode(self):\n return self.team_dict['teamCode']\n\n def fileCode(self):\n return self.team_dict['fileCode']\n\n def abbreviation(self):\n return self.team_dict['abbreviation']\n\n def teamName(self):\n return self.team_dict['teamName']\n\n def locationName(self):\n return self.team_dict['locationName']\n\n def firstYearOfPlay(self):\n return self.team_dict['firstYearOfPlay']\n\n def sport_id(self):\n return self.team_dict['sport']['id']\n\n def sport_name(self):\n return self.team_dict['sport']['name']\n\n def sport_link(self):\n return self.team_dict['sport']['link']\n\n def shortName(self):\n return self.team_dict['shortName']\n\n def springLeague_id(self):\n return self.team_dict['springLeague']['id']\n\n def springLeague_name(self):\n return self.team_dict['springLeague']['name']\n\n def springLeague_link(self):\n return self.team_dict['springLeague']['link']\n\n def springLeague_abbreviation(self):\n return self.team_dict['springLeague']['abbreviation']\n\n def allStarStatus(self):\n return self.team_dict['allStarStatus']\n\n def active(self):\n return self.team_dict['active']\n\n def summary_info(self):\n return '{}, play in {}, in {} at {} since: {}'.format(self.name(),\n # self.shortName(),\n # self.league_name(),\n self.division_name(),\n self.locationName(),\n self.venue_name(),\n self.firstYearOfPlay(),\n os.linesep,\n )\n" }, { "alpha_fraction": 0.5826042294502258, "alphanum_fraction": 0.5862069129943848, "avg_line_length": 27.173913955688477, "blob_id": "fd1fcd2a5e27c99433f95fc1204f1f95b9148e5b", "content_id": "0473d6543585b37b80ea94bcae5af6574b77d1be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1943, "license_type": "no_license", "max_line_length": 116, "num_lines": 69, "path": "/player_info.py", "repo_name": "craigcurtin/mlb", "src_encoding": "UTF-8", "text": "import statsapi\nfrom util_logger import setup_logger\nimport logging\nfrom Team import Team\nimport sys\n\n\ndef team_info():\n # teams = statsapi.get('teams',{'sportIds':1,'activeStatus':'Yes','fields':'teams,name,id,division,league'})\n team_dict = {}\n teams = statsapi.get('teams', {'sportIds': 1, 'activeStatus': 'Yes'})\n for team in teams['teams']:\n team_dict[team['id']] = Team(team)\n\n return team_dict\n\n\nif __name__ == '__main__':\n setup_logger('teams', 'c:/Temp', logging.DEBUG)\n\n import statsapi\n from util_logger import setup_logger\n import logging\n from Team import Team\n import sys\n\n\n def team_info():\n # teams = statsapi.get('teams',{'sportIds':1,'activeStatus':'Yes','fields':'teams,name,id,division,league'})\n team_dict = {}\n teams = statsapi.get('teams', {'sportIds': 1, 'activeStatus': 'Yes'})\n for team in teams['teams']:\n team_dict[team['id']] = Team(team)\n\n return team_dict\n\n\n if __name__ == '__main__':\n setup_logger('teams', 'c:/Temp', logging.DEBUG)\n team_dict = team_info()\n buffer = []\n for team_id in team_dict.keys():\n buffer.append(team_dict[team_id].summary_info())\n buffer.sort()\n for buf in buffer:\n print(buf)\n logging.info(\"normal termination\")\n sys.exit(0)\n import statsapi\n from util_logger import setup_logger\n import logging\n from Team import Team\n import sys\n\n\n def team_info():\n # teams = statsapi.get('teams',{'sportIds':1,'activeStatus':'Yes','fields':'teams,name,id,division,league'})\n team_dict = {}\n teams = statsapi.get('teams', {'sportIds': 1, 'activeStatus': 'Yes'})\n for team in teams['teams']:\n team_dict[team['id']] = Team(team)\n\n return team_dict\n\n\n if __name__ == '__main__':\n setup_logger('teams', 'c:/Temp', logging.DEBUG)\n\n statsapi.lookup_player()" }, { "alpha_fraction": 0.6502073407173157, "alphanum_fraction": 0.6607614159584045, "avg_line_length": 47.23636245727539, "blob_id": "62a56234a922a58a8bdfe52b65188a706fe37070", "content_id": "fa8412514a97110bf2929b19ab1f32eea316ac0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2653, "license_type": "no_license", "max_line_length": 292, "num_lines": 55, "path": "/util_logger.py", "repo_name": "craigcurtin/mlb", "src_encoding": "UTF-8", "text": "import sys\nimport logging\nfrom pytz import timezone\nfrom datetime import datetime\nfrom pathlib import Path\n\n# *force* UTC based time in log messages\ntz = timezone('UTC')\n\n\n# logging formatter, specify UTC as TZ to hardcode\ndef time_tz(*args):\n return datetime.now(tz).timetuple()\n\n\n# TODO - CSC working this function to be JSON aware/enabled ...\n\ndef setup_logger(app_name, log_directory, log_level):\n \"\"\"configure logger with UTC timestamp, bunch of default values\"\"\"\n # Setting up logger\n # log_levels: NOTSET=0, DEBUG=10, INFO=20, WARN=30, ERROR=40, and CRITICAL=50\n # TODO - on linux we want /var/log ... error on MacOs ... protected directory\n # log_file_name = Path('/var/log/{}.log'.format(app_name))\n log_file_name = Path('{}/{}.log'.format(log_directory, app_name))\n\n short_file_format = \"%(asctime)s:%(levelname)s:%(message)s\"\n long_file_format = \"%(asctime)s %(HOST)s %(AppId)d %(AppVersion)s %(levelname)s %(name)s %(message)s %(filename)s %(funcName)s %(levelname)s %(lineno)d %(message)s %(module)s %(msecs)d %(name)s %(pathname)s %(process)d %(processName)s %(relativeCreated)d %(thread)d %(threadName)s %(uid)\"\n long_file_format = \"%(asctime)s %(levelname)s %(name)s %(message)s %(filename)s %(funcName)s %(levelname)s %(lineno)d %(message)s %(module)s %(msecs)d %(name)s %(pathname)s %(process)d %(processName)s %(relativeCreated)d %(thread)d %(threadName)s\"\n # long_file_format = \"%(asctime)s:%(levelname)s%(name)s %(message)s %(filename)s %(funcName)s %(levelname)s %(lineno)d %(message)s %(module)s %(msecs)d %(name)s %(pathname)s %(process)d %(processName)s %(relativeCreated)d %(thread)d %(threadName)s\"\n log_file_format = short_file_format\n\n # make sure valid log level is passed in, default to DEBUG ...\n valid_log_levels = [logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, logging.CRITICAL]\n if log_level not in valid_log_levels:\n log_level = logging.DEBUG\n\n extra_attributes = {'Host': '10.0.0.1',\n 'AppId': 1024,\n 'AppVersion': '1.0.0',\n 'uid': 12345}\n logger = logging.getLogger()\n logging.LoggerAdapter(logger, extra_attributes)\n\n # add in our custom UTC timezone converter\n logging.Formatter.converter = time_tz\n logging.basicConfig(level=log_level, filename=log_file_name, filemode=\"a\",\n format=log_file_format)\n\n # configure stdout same as file\n sh = logging.StreamHandler(sys.stdout)\n sh.setFormatter(logging.Formatter(log_file_format))\n logging.getLogger().addHandler(sh)\n\n logging.info('App:{} startup'.format(app_name))\n return\n" }, { "alpha_fraction": 0.6984127163887024, "alphanum_fraction": 0.6984127163887024, "avg_line_length": 24.200000762939453, "blob_id": "97d44d2766d38d07bc8f21b332d6e51d84212e45", "content_id": "2dda1f4b5aa7fd29f65442e9baf81b2053dc5470", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 126, "license_type": "no_license", "max_line_length": 42, "num_lines": 5, "path": "/yagmail.py", "repo_name": "craigcurtin/mlb", "src_encoding": "UTF-8", "text": "import yagmail\n\ndef send_yag(to_email, subject, contents):\n yag = yagmail.SMTP()\n yag.send(to_email, subject, contents)\n" } ]
11
fusion44/crypternity-backend
https://github.com/fusion44/crypternity-backend
d18636af16dd03effb1b864c23057f7354b80459
091158d3c25bf83c45a90864cefddb93d480ceb3
bc9083891b27354f4f3ce1939e3ef7085f6fbb08
refs/heads/master
2021-04-26T23:14:55.769657
2018-12-01T15:43:12
2018-12-01T15:43:12
123,955,081
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7242424488067627, "alphanum_fraction": 0.7242424488067627, "avg_line_length": 21, "blob_id": "db4087ac42c18c13bb6dbb188ec0b366374009cc", "content_id": "bb8e6242c3a17a7ee584ce9eb6f5f7186b0f96f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 330, "license_type": "no_license", "max_line_length": 69, "num_lines": 15, "path": "/backend/user_profile/tasks.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "\"\"\"\nContains all recurring tasks relevant to the user.\nThis includes:\n* Calculating user's net worth\n* Calculating data necessary for the charts shown in UI\n* Scheduling scans for new trades in his exchange and wallet accounts\n* ...\n\"\"\"\nfrom backend.celery import app\n\n\[email protected]\ndef test(arg):\n \"\"\"Test task\"\"\"\n print(arg)\n" }, { "alpha_fraction": 0.776157796382904, "alphanum_fraction": 0.776157796382904, "avg_line_length": 35.46875, "blob_id": "77570f7e32a42e3de678e8e6463801a4f4430e43", "content_id": "0a3ceae6f28ad31b073f056e26641c2d37e6f705", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1166, "license_type": "no_license", "max_line_length": 100, "num_lines": 32, "path": "/backend/schema.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "import graphene\n\nimport backend.coins.schema\nimport backend.accounts.schema\nimport backend.transactions.schema\nimport backend.user_profile.schema\n\n\nclass Query(backend.coins.schema.Query, backend.user_profile.schema.Query,\n backend.accounts.schema.Query, backend.transactions.schema.Query,\n graphene.ObjectType):\n # This class will inherit from multiple Queries\n # as we begin to add more apps to our project\n pass\n\n\nclass Mutation(graphene.ObjectType):\n create_account = backend.accounts.schema.CreateAccountMutation.Field()\n create_crypto_address = backend.accounts.schema.CreateCryptoAddressMutation.Field(\n )\n edit_crypto_address = backend.accounts.schema.EditCryptoAddressMutation.Field(\n )\n edit_account = backend.accounts.schema.EditAccountMutation.Field()\n account_refresh_transactions = backend.accounts.schema.AccountRefreshTransactionsMutation.Field(\n )\n coins_refresh_mutation = backend.coins.schema.CoinRefreshTransactionsMutation.Field(\n )\n import_transaction = backend.transactions.schema.ImportTransactionsMutation.Field(\n )\n\n\nschema = graphene.Schema(query=Query, mutation=Mutation)" }, { "alpha_fraction": 0.6409090757369995, "alphanum_fraction": 0.6621212363243103, "avg_line_length": 30.428571701049805, "blob_id": "222b0ae4019be20f13fe12429331676dbe18af41", "content_id": "bdcede3c25d837614940a7bef131f6dfadc523ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 660, "license_type": "no_license", "max_line_length": 60, "num_lines": 21, "path": "/backend/coins/models.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "'''Contains the models for this app'''\nfrom django.db import models\n\n\nclass Coin(models.Model):\n '''Database model representing a coin'''\n\n class Meta:\n ordering = (\"symbol\", )\n\n id = models.AutoField(primary_key=True)\n cc_id = models.IntegerField(unique=True)\n img_url = models.CharField(max_length=200)\n name = models.CharField(max_length=200)\n symbol = models.CharField(max_length=10)\n coin_name = models.CharField(max_length=200)\n full_name = models.CharField(max_length=200)\n\n def __str__(self):\n '''Assembles a string description for this object'''\n return \"{} - {}\".format(self.symbol, self.full_name)\n" }, { "alpha_fraction": 0.6469830274581909, "alphanum_fraction": 0.6489155888557434, "avg_line_length": 32.264286041259766, "blob_id": "e309a0d5bfd44cdc2833a849838d4f80847a4908", "content_id": "b06dda8285bbca8639072b43f96f4427b2766e5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4657, "license_type": "no_license", "max_line_length": 136, "num_lines": 140, "path": "/backend/transactions/schema.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "import json\nimport graphene\nimport time\n\nfrom django.db.models import QuerySet\nfrom graphene_django.types import DjangoObjectType\n\nfrom backend.transactions.models import Transaction\nfrom backend.transactions.importers.livecoin import import_data_livecoin\n\n\nclass TransactionType(DjangoObjectType):\n class Meta:\n model = Transaction\n only_fields = [\n 'id',\n 'owner',\n 'date',\n 'spent_currency',\n 'spent_amount',\n 'source_peer',\n 'acquired_currency',\n 'acquired_amount',\n \"target_peer\",\n \"fee_currency\",\n \"fee_amount\",\n \"book_price_eur\",\n \"book_price_btc\",\n \"book_price_fee_eur\",\n \"book_price_fee_btc\",\n \"icon\",\n ]\n\n tags = graphene.List(graphene.String)\n\n @staticmethod\n def resolve_tags(self: Transaction, context, **kwargs):\n \"\"\"Resolve all tags associated with this object\"\"\"\n return self.tags.all().order_by(\"name\")\n\n\nclass Query(object):\n # Single transaction by ID\n get_transaction = graphene.Field(\n TransactionType, id=graphene.Int(required=True))\n\n def resolve_get_transaction(self, info, **kwargs) -> Transaction:\n if not info.context.user.is_authenticated:\n return None\n\n transaction_id = kwargs.get('id')\n\n if transaction_id is not None:\n t = Transaction.objects.get(pk=transaction_id)\n if t.owner == info.context.user:\n return t\n return None\n\n # Get all transaction where user has access rights\n all_transactions = graphene.List(TransactionType)\n\n def resolve_all_transactions(self, info, **kwargs) -> QuerySet:\n if not info.context.user.is_authenticated:\n return Transaction.objects.none()\n filtered = Transaction.objects.filter(owner=info.context.user)\n return filtered\n\n\nclass TransactionData(graphene.InputObjectType):\n \"\"\"Data to import from the client. This should normally be already pre-processed data.\n \"\"\"\n date = graphene.String()\n transaction_type = graphene.String(\n required=True,\n description=\"\"\"\nOptions:\n\nexchange - exchange between currencies on this peer\ntransfer - transfer one coin from one wallet to another\nbuy - buy cryptos from fiat\nsell - sell cryptos for fiat\nincome - receive cryptos for a service or selling of a good (refferal bonus, selling of hardware etc)\nexpense - pay for a service or a good (online subscription, buy of an hardware)\nmining - mining income\n\"\"\")\n transaction_type_raw = graphene.String(\n description=\n \"\"\"The raw unprocessed transaction type coming from the data source.\n Can be different from peer to peer. This is included so the importer in the server might implement this in a non standard way\"\"\"\n )\n spent_currency = graphene.String(required=True)\n spent_amount = graphene.Float(required=True)\n source_peer = graphene.ID()\n acquired_currency = graphene.String()\n acquired_amount = graphene.Float()\n target_peer = graphene.ID()\n fee_currency = graphene.String()\n fee_amount = graphene.Float()\n tags = graphene.List(graphene.String)\n\n\nclass ImportTransactionInput(graphene.InputObjectType):\n \"\"\"The input type for the import mutation.\n \"\"\"\n service_type = graphene.String()\n import_mechanism = graphene.String(\n required=True,\n description=\"\"\"\n The mechanism of import: \\n\n * csv - Import via file (csv, excel, etc)\n * manual - Import with manually entered input\n \"\"\")\n transactions = graphene.List(TransactionData, required=True)\n\n\nclass ImportTransactionsMutation(graphene.relay.ClientIDMutation):\n \"\"\"Contains the import mutations\"\"\"\n\n class Input:\n \"\"\"The input class for the mutation\"\"\"\n data = graphene.Field(ImportTransactionInput, required=True)\n\n status = graphene.Int()\n formErrors = graphene.String()\n transactions = graphene.List(TransactionType)\n\n @classmethod\n def mutate(cls, root, info, input=None):\n if not info.context.user.is_authenticated:\n return ImportTransactionsMutation(status=403)\n\n if input.data.service_type == \"livecoin\":\n transactions = import_data_livecoin(input.data, info.context.user)\n return ImportTransactionsMutation(\n status=200, transactions=transactions)\n\n return ImportTransactionsMutation(\n status=404,\n formErrors=\"Service type {} not found\".format(\n input.data.service_type))\n" }, { "alpha_fraction": 0.797468364238739, "alphanum_fraction": 0.797468364238739, "avg_line_length": 30.600000381469727, "blob_id": "3d45f275e6c24017578bdb5898dc957f16055616", "content_id": "913f1c53c5aed798fcad1b425d5cb9d12b54f12e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 158, "license_type": "no_license", "max_line_length": 59, "num_lines": 5, "path": "/backend/coins/admin.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "'''Contains all database models for the coins django app'''\nfrom django.contrib import admin\nfrom backend.coins.models import Coin\n\nadmin.site.register(Coin)\n" }, { "alpha_fraction": 0.7410072088241577, "alphanum_fraction": 0.7410072088241577, "avg_line_length": 26.399999618530273, "blob_id": "969d8222682a62bce925ebf9cd35834eb92cc082", "content_id": "8f1783c9e7325a112486cce4b26dbe3aebdbae5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 139, "license_type": "no_license", "max_line_length": 48, "num_lines": 5, "path": "/pytest.ini", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "\n[pytest]\nDJANGO_SETTINGS_MODULE = backend.test_settings\n\n# add options to command line.\naddopts = --cov-report html --cov --nomigrations \n" }, { "alpha_fraction": 0.6394051909446716, "alphanum_fraction": 0.6579925417900085, "avg_line_length": 25.899999618530273, "blob_id": "403a315ac4e9e58f957ae5812c5673d6562b9c54", "content_id": "ef8373fcbfc864700015c0f61cc442b77c17242f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 538, "license_type": "no_license", "max_line_length": 67, "num_lines": 20, "path": "/backend/celery.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import, unicode_literals\nimport os\nfrom celery import Celery\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"backend.settings\")\napp = Celery(\"backend\")\n\napp.config_from_object(\"django.conf:settings\", namespace='CELERY')\n\napp.autodiscover_tasks()\n\napp.conf.beat_schedule = {\n 'update-coins-every-24-hours': {\n 'task': 'backend.coins.tasks.async_update_supported_coins',\n 'schedule': 86400.0, # 24 hours\n 'options': {\n 'task_id': \"task_update_coins\"\n },\n },\n}\n" }, { "alpha_fraction": 0.7404718399047852, "alphanum_fraction": 0.7563520669937134, "avg_line_length": 32.907691955566406, "blob_id": "65b0c8b23f772ca66900e8b7d7d528edacf11b72", "content_id": "e983cdfceee6619967fea84345ccc2a65650286b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2204, "license_type": "no_license", "max_line_length": 278, "num_lines": 65, "path": "/README.md", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "# !Currently unmaintained!\n\n## Crypternity Backend\n\n### Setup\n\n[Virtualenvwrapper](http://virtualenvwrapper.readthedocs.io/en/latest/install.html) must be working properly on your system before continuing.\n\n* clone the repository\n* add a new virtual environment: _mkvirtualenv crypternity_\n* _pip install -r requirements.txt_\n* deactivate virtual environment to prevent some errors _deactivate_\n* use the environment: _workon crypternity_\n* _./manage.py makemigrations_\n* _./manage.py migrate_\n* copy config.ini.sample to config.ini and change the secret key and database password. The secret key should be a long string of random letters and numbers.\n\n#### RabbitMQ\n\nA RabbitMQ server or a Redis instance for [Celery](http://www.celeryproject.org/) is necessary. Easiest way to run RabbitMQ is using a Raspberry Pi with [HypriotOS](https://blog.hypriot.com/downloads/). Hypriot comes with everything necessary to run the necessary Docker Images:\n\nOn the RPi:\n\n* mkdir rabbitmqdata && mkdir rabbitmqlogs\n* _docker pull ronnyroos/rpi-rabbitmq_\n* _docker run --restart=unless-stopped -d -p 5672:5672 -p 15672:15672 -v /home/pirate/rabbitmqlogs:/data/log -v /home/pirate/rabbitmqdata:/data/mnesia ronnyroos/rpi-rabbitmq_\n\nOn the dev machine, open config.ini and find the following line:\n\n```config\ncelery_broker_url = amqp://192.168.178.108//\n```\n\nReplace the ip with your server ip\n\nRun Celery Beat (task scheduler):\n\n* _celery -A backend beat -l debug --scheduler django_celery_beat.schedulers:DatabaseScheduler_\n\nFinally, run Celery:\n\n* _celery worker -A backend --loglevel=debug --concurrency=4_\n\n#### Debugging using VS code\n\n* Install the Visual Studio code [Python extension](https://code.visualstudio.com/docs/languages/python)\n* Open workspace settings and set the appropriate python path for the virtual env\n\n```json\n{\n \"python.pythonPath\": \"/home/fusion44/.virtualenvs/crypternity/bin/python3.6\"\n}\n```\n\n* Select _Python: Django_ as DEBUG configuration\n\n### Testing\n\nRun _pytest --cov-report html --cov_ to run the tests.\n\nAlternatively, run tests and open the results coverage html page with Google Chrome: _pytest --cov-report html --cov && google-chrome-stable ./htmlcov/index.html_\n\n```\n\n```\n" }, { "alpha_fraction": 0.6121999621391296, "alphanum_fraction": 0.6159436106681824, "avg_line_length": 33.40151596069336, "blob_id": "f7dcbed156f3998338dfa7ff041c5dc254d706db", "content_id": "2db81b4d14e3e4669933d3f854a3679d7167711c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4541, "license_type": "no_license", "max_line_length": 94, "num_lines": 132, "path": "/backend/transactions/fetchers/generic_exchange.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "\"\"\"\nContains all functions related to importing transactions\nfrom exchanges supported by the ccxt library\n\"\"\"\n\nimport ccxt\nimport time\nfrom datetime import datetime, timezone\nfrom django.utils.timezone import now\nfrom requests.exceptions import ReadTimeout\nfrom dateutil import parser\nfrom django.db.models import QuerySet\n\nfrom backend.utils.utils import get_name_price\n\nfrom backend.accounts.models import Account\nfrom backend.transactions.models import Transaction\nfrom backend.transactions.models import TransactionUpdateHistoryEntry\n\nfrom ...utils.utils import exchange_can_batch\n\n\ndef fetch_trades_unbatched(exchange: ccxt.Exchange):\n \"\"\"\n Some exchanges like Binance don't support fetching all trades at\n once and need to fetch per trading pair (market).\n \"\"\"\n markets = exchange.load_markets()\n trades = []\n for market in markets:\n try:\n trades += exchange.fetch_my_trades(market)\n except ReadTimeout as err:\n print(err)\n continue\n\n # exchange.rateLimit is milliseconds but time.sleep expects seconds\n # plus add an extra 2 seconds as some exchanges like Bitfinex have varying rate limits\n # and still return rate limit exceeded errors when using the value provided by CCXT\n time.sleep((exchange.rateLimit / 1000) + 2)\n return trades\n\n\ndef update_exchange_trx_generic(account: Account):\n \"\"\"\n Fetches all trades and if older than last check imports to database\n \"\"\"\n exchange: ccxt.Exchange = None\n starttime: datetime = now()\n\n if hasattr(ccxt, account.service_type):\n exchange: ccxt.Exchange = getattr(ccxt, account.service_type)({\n \"api_key\":\n account.api_key,\n \"secret\":\n account.api_secret\n })\n else:\n print(\"nope\")\n\n last_update_query: QuerySet = TransactionUpdateHistoryEntry.objects.filter(\n account=account).order_by('-date')\n latest_update = datetime.utcfromtimestamp(0).replace(tzinfo=timezone.utc)\n\n if last_update_query.count():\n latest_update = last_update_query[:1][0].date\n\n transactions = []\n trades = []\n if exchange_can_batch(account.service_type):\n trades = exchange.fetch_my_trades()\n else:\n trades = fetch_trades_unbatched(exchange)\n\n total = len(trades)\n num_imports = 0\n\n if trades:\n for trade in trades:\n # print(trade[\"symbol\"] + \" \" + trade[\"datetime\"])\n\n trade_date = parser.parse(trade[\"datetime\"])\n if trade_date <= latest_update:\n print(\"skiping \", trade[\"symbol\"] + \" \" + trade[\"datetime\"])\n continue\n\n split = trade[\"symbol\"].split(\"/\")\n\n trx = Transaction()\n if trade[\"side\"] == \"buy\":\n trx.spent_amount = trade[\"cost\"]\n trx.spent_currency = split[1]\n\n trx.acquired_amount = trade[\"amount\"]\n trx.acquired_currency = split[0]\n elif trade[\"side\"] == \"sell\":\n trx.spent_amount = trade[\"amount\"]\n trx.spent_currency = split[0]\n\n trx.acquired_amount = trade[\"cost\"]\n trx.acquired_currency = split[1]\n\n trx.fee_amount = trade[\"fee\"][\"cost\"]\n trx.fee_currency = trade[\"fee\"][\"currency\"]\n\n trx.date = trade[\"datetime\"]\n trx.owner = account.owner\n trx.source_peer = account\n trx.target_peer = account\n\n date = parser.parse(trx.date)\n timestamp = time.mktime(date.timetuple())\n\n trx.book_price_btc = get_name_price(\n trx.spent_amount, trx.spent_currency, \"BTC\", timestamp)\n trx.book_price_eur = get_name_price(\n trx.spent_amount, trx.spent_currency, \"EUR\", timestamp)\n trx.book_price_fee_btc = get_name_price(\n trx.fee_amount, trx.fee_currency, \"BTC\", timestamp)\n trx.book_price_fee_eur = get_name_price(\n trx.fee_amount, trx.fee_currency, \"EUR\", timestamp)\n trx.icon = Transaction.TRX_ICON_EXCHANGE\n trx.save()\n trx.tags.add(account.service_type, Transaction.TRX_TAG_EXCHANGE)\n trx.save()\n num_imports += 1\n time.sleep(0.2) # avoid hammering the API's\n\n print(\"Imported {} trades.\".format(num_imports))\n entry: TransactionUpdateHistoryEntry = TransactionUpdateHistoryEntry(\n date=starttime, account=account, fetched_transactions=num_imports)\n entry.save()\n" }, { "alpha_fraction": 0.5096153616905212, "alphanum_fraction": 0.5692307949066162, "avg_line_length": 22.636363983154297, "blob_id": "88491952796ac8373cc8afbb5e5605577bd0416a", "content_id": "ff10abdc728413cff0039a32d383fe674157ffca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 520, "license_type": "no_license", "max_line_length": 53, "num_lines": 22, "path": "/backend/accounts/migrations/0007_auto_20180510_1515.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.5 on 2018-05-10 15:15\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0006_auto_20180506_1126'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='cryptoaddress',\n options={'ordering': ('id',)},\n ),\n migrations.AddField(\n model_name='cryptoaddress',\n name='watch',\n field=models.BooleanField(default=False),\n ),\n ]\n" }, { "alpha_fraction": 0.4949381351470947, "alphanum_fraction": 0.696287989616394, "avg_line_length": 16.096153259277344, "blob_id": "3dc19020ca4afdbe0c94c4e7ee659ec8f5343094", "content_id": "8e907a4b44a75b0b50618ee0649cdf59d483060f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1778, "license_type": "no_license", "max_line_length": 31, "num_lines": 104, "path": "/requirements.txt", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "aiodns==1.1.1\naiohttp==3.1.3\namqp==2.2.2\naniso8601==3.0.0\narrow==0.12.1\nasn1crypto==0.24.0\nastroid==1.6.3\nasync-timeout==3.0.0\nattrs==18.1.0\nautobahn==18.4.1\nAutomat==0.6.0\nbilliard==3.5.0.3\ncachetools==2.0.1\ncchardet==2.1.1\nccxt==1.13.85\ncelery==4.1.0\ncertifi==2018.4.16\ncffi==1.11.5\nchardet==3.0.4\nclick==6.7\ncoinbase==2.1.0\ncoinmarketcap==4.2.1\ncolorclass==2.2.0\nconstantly==15.1.0\ncoverage==4.5.1\ncryptocompare==0.6\ncryptography==2.2.2\ndateparser==0.7.0\ndiskcache==3.0.6\nDjango==2.0.5\ndjango-celery-beat==1.1.1\ndjango-cors-headers==2.2.0\ndjango-crontab==0.7.1\ndjango-taggit==0.22.2\ndjangorestframework==3.8.2\ndjangorestframework-jwt==1.11.0\ndocopt==0.6.2\nFaker==0.8.13\nfirst==2.0.1\ngraphene==2.1\ngraphene-django==2.0.0\ngraphql-core==2.0\ngraphql-relay==0.4.5\nhyperlink==18.0.0\nidna==2.6\nidna-ssl==1.0.1\nincremental==17.5.0\niso8601==0.1.12\nisort==4.3.4\nkombu==4.1.0\nlazy-object-proxy==1.3.1\nmccabe==0.6.1\nmixer==6.0.1\nmock==2.0.0\nmore-itertools==4.1.0\nmultidict==4.2.0\npackaging==17.1\npbr==4.0.2\npip-tools==2.0.2\npip-upgrader==1.4.6\npluggy==0.6.0\npromise==2.1\npsycopg2==2.7.4\npy==1.5.3\npyasn1==0.4.2\npyasn1-modules==0.2.1\npycares==2.3.0\npycparser==2.18\npycrypto==2.6.1\npycryptodome==3.6.1\nPyJWT==1.6.1\npylint==1.8.4\npylint-django==0.11\npylint-plugin-utils==0.2.6\npyOpenSSL==17.5.0\npyparsing==2.2.0\npytest==3.5.1\npytest-cov==2.5.1\npytest-django==3.2.1\npytest-mock==1.10.0\npython-dateutil==2.7.2\npytz==2018.4\nregex==2018.2.21\nrequests==2.18.4\nrequests-cache==0.4.13\nrope==0.10.7\nruamel.yaml==0.15.37\nRx==1.6.1\nservice-identity==17.0.0\nsingledispatch==3.4.0.3\nsix==1.11.0\nSQLAlchemy==1.2.7\nterminaltables==3.1.0\ntext-unidecode==1.2\nTwisted==18.4.0\ntxaio==2.10.0\ntyping==3.6.4\ntzlocal==1.5.1\nurllib3==1.22\nvine==1.1.4\nwrapt==1.10.11\nyapf==0.21.0\nyarl==1.2.3\nzope.interface==4.5.0\n" }, { "alpha_fraction": 0.48227664828300476, "alphanum_fraction": 0.5501440763473511, "avg_line_length": 28.53191566467285, "blob_id": "1745153efc1221ce3b9c1e82d9d179ee5214a14e", "content_id": "3b26ef6044b03ec626f156fdbd029df4a22cd7eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6940, "license_type": "no_license", "max_line_length": 82, "num_lines": 235, "path": "/backend/transactions/tests/test_fetcher_generic.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "\"\"\"Contains all tests for the generic exchange fetcher\"\"\"\nimport time\nfrom datetime import datetime, timedelta\nimport pytest\nfrom django.utils.timezone import now\nfrom _pytest.monkeypatch import MonkeyPatch\nfrom mixer.backend.django import mixer\nimport ccxt\nimport cryptocompare\n\nfrom backend.accounts.models import Account\nfrom backend.transactions.models import Transaction\nfrom backend.transactions.models import TransactionUpdateHistoryEntry\n\nfrom ..fetchers.generic_exchange import update_exchange_trx_generic\n\npytestmark = pytest.mark.django_db\n\n\ndef new_get_historical_price(coin, curr=\"EUR\", timestamp=time.time()):\n return {coin: {curr: 2000.0}}\n\n\ndef new_load_markets(self):\n return {\n 'BTC/ETH': {},\n }\n\n\nBINANCE_CHECK_TRANSACTION_ID = 2\nBINANCE_AMOUNT = 0.20931215\nBINANCE_COST = 0.00357691\nBINANCE_PRICE = 0.01708888\nBINANCE_BOOK_PRICE_EUR = \\\n new_get_historical_price(\"BTC\")[\"BTC\"][\"EUR\"] * BINANCE_AMOUNT\n\n\ndef new_fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):\n ret_binance = [{\n 'amount': 0.1,\n 'cost': 0.0003,\n 'datetime': '2018-01-10T06:03:29.213Z',\n 'fee': {\n 'cost': 0.0002,\n 'currency': 'BNB'\n },\n 'id': '1',\n 'price': 0.1,\n 'side': 'sell',\n 'symbol': 'BTC/ETH',\n 'timestamp': 1515564209213,\n }, {\n 'amount': BINANCE_AMOUNT,\n 'cost': BINANCE_COST,\n 'datetime': '2017-12-28T09:26:52.249Z',\n 'fee': {\n 'cost': 0.011,\n 'currency': 'BNB'\n },\n 'id': '2',\n 'price': BINANCE_PRICE,\n 'side': 'sell',\n 'symbol': 'LTC/BTC',\n 'timestamp': 1514453212249,\n }, {\n 'amount': 240.0,\n 'cost': 0.01,\n 'datetime': '2018-01-08T18:23:09.665Z',\n 'fee': {\n 'cost': 0.0037,\n 'currency': 'BNB'\n },\n 'id': '3',\n 'price': 4.335e-05,\n 'side': 'buy',\n 'symbol': 'XMR/BTC',\n 'timestamp': 1515694988665,\n }]\n\n ret_cryptopia = [{\n 'amount': 7.58039241,\n 'cost': 0.00356278,\n 'datetime': '2018-01-16T06:04:09.889Z',\n 'fee': {\n 'cost': 7.13e-06,\n 'currency': 'BTC'\n },\n 'id': '1',\n 'price': 0.00047,\n 'side': 'buy',\n 'symbol': 'EMC/BTC',\n 'timestamp': 1516082648889,\n }, {\n 'amount': 0.20931215,\n 'cost': 0.00357691,\n 'datetime': '2018-01-16T05:59:03.521Z',\n 'fee': {\n 'cost': 7.15e-06,\n 'currency': 'BTC'\n },\n 'id': '2',\n 'price': 0.01708888,\n 'side': 'sell',\n 'symbol': 'LTC/BTC',\n 'timestamp': 1516082342521,\n }, {\n 'amount': 130.77497801,\n 'cost': 0.0353184,\n 'datetime': '2017-12-25T20:25:33.460Z',\n 'fee': {\n 'cost': 7.064e-05,\n 'currency': 'LTC'\n },\n 'id': '3',\n 'price': 0.00027007,\n 'side': 'buy',\n 'symbol': 'DGB/LTC',\n 'timestamp': 1514233533460,\n }, {\n 'amount': 130.77497801,\n 'cost': 0.0353184,\n 'datetime': '2017-12-25T20:25:33.460Z',\n 'fee': {\n 'cost': 7.064e-05,\n 'currency': 'LTC'\n },\n 'id': '4',\n 'price': 0.00027007,\n 'side': 'buy',\n 'symbol': 'DGB/LTC',\n 'timestamp': 1514233533460,\n }]\n\n if symbol is None:\n return ret_cryptopia\n else:\n return ret_binance\n\n\[email protected]\ndef patch_ccxt(monkeypatch: MonkeyPatch):\n monkeypatch.setattr(ccxt.binance, \"load_markets\", new_load_markets)\n monkeypatch.setattr(ccxt.binance, \"fetch_my_trades\", new_fetch_my_trades)\n monkeypatch.setattr(ccxt.cryptopia, \"load_markets\", new_load_markets)\n monkeypatch.setattr(ccxt.cryptopia, \"fetch_my_trades\", new_fetch_my_trades)\n monkeypatch.setattr(cryptocompare, \"get_historical_price\",\n new_get_historical_price)\n\n\ndef test_update_exchange_trx_generic_binance(monkeypatch: MonkeyPatch):\n user = mixer.blend(\"auth.User\")\n account_bin: Account = mixer.blend(\n \"accounts.Account\", owner=user, service_type=\"binance\")\n account_crypt: Account = mixer.blend(\n \"accounts.Account\", owner=user, service_type=\"cryptopia\")\n\n patch_ccxt(monkeypatch)\n\n update_exchange_trx_generic(account_bin)\n update_exchange_trx_generic(account_crypt)\n\n t = Transaction.objects.filter(target_peer=account_bin)\n assert t.count() == 3\n\n t = Transaction.objects.filter(target_peer=account_crypt)\n assert t.count() == 4\n\n t: Transaction = Transaction.objects.get(pk=BINANCE_CHECK_TRANSACTION_ID)\n assert float(t.spent_amount) == BINANCE_AMOUNT\n assert float(t.acquired_amount) == BINANCE_COST\n assert float(t.book_price_eur) == BINANCE_BOOK_PRICE_EUR\n\n update_entry = TransactionUpdateHistoryEntry.objects.get(\n account=account_bin)\n assert update_entry.fetched_transactions == 3\n update_entry = TransactionUpdateHistoryEntry.objects.get(\n account=account_crypt)\n assert update_entry.fetched_transactions == 4\n\n\ndef test_update_exchange_trx_generic_transaction_history(\n monkeypatch: MonkeyPatch):\n \"\"\" Test, that the update function does not import \"\"\"\n user = mixer.blend(\"auth.User\")\n account_bin: Account = mixer.blend(\n \"accounts.Account\", owner=user, service_type=\"binance\")\n\n patch_ccxt(monkeypatch)\n\n date: datetime = now()\n\n mixer.blend(\n \"transactions.TransactionUpdateHistoryEntry\",\n date=date,\n account=account_bin,\n fetched_transactions=3)\n\n monkeypatch.setattr(\n ccxt.binance, \"fetch_my_trades\",\n lambda self, symbol=None, since=None, limit=None, params={}:\n [\n {\n 'amount': 0.3,\n 'cost': 0.00032,\n 'datetime': str(date + timedelta(days=-1)), # Should be discarded\n 'fee': {\n 'cost': 0.00044,\n 'currency': 'BNB'\n },\n 'id': '4',\n 'price': 0.1,\n 'side': 'sell',\n 'symbol': 'BTC/ETH',\n 'timestamp': 1515564209213,\n },\n {\n 'amount': BINANCE_AMOUNT,\n 'cost': BINANCE_COST,\n 'datetime': str(date + timedelta(days=1)),\n 'fee': {\n 'cost': 0.011,\n 'currency': 'BNB'\n },\n 'id': '5',\n 'price': BINANCE_PRICE,\n 'side': 'sell',\n 'symbol': 'LTC/BTC',\n 'timestamp': 1514453212249,\n }\n ])\n\n update_exchange_trx_generic(account_bin)\n transaction = Transaction.objects.filter(target_peer=account_bin)\n assert transaction.count(\n ) == 1, \"Should not import transactions older than last update time\"\n" }, { "alpha_fraction": 0.5557940006256104, "alphanum_fraction": 0.6008583903312683, "avg_line_length": 23.526315689086914, "blob_id": "49d98b8943aabc8a6a488177e93605a9010b5079", "content_id": "ac20e9b18141ea2532d7820b1d59f8fe92de01d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 466, "license_type": "no_license", "max_line_length": 82, "num_lines": 19, "path": "/backend/accounts/migrations/0003_peer_class_type.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.2 on 2018-03-17 08:00\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0002_auto_generate_default_objects'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='peer',\n name='class_type',\n field=models.CharField(default='Bier', editable=False, max_length=50),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.5888965129852295, "alphanum_fraction": 0.5958836674690247, "avg_line_length": 32.41878128051758, "blob_id": "5c4c7651525ed0def0f2ec0487e0f8ccc670ebdb", "content_id": "b9a172c2f5ddc3442d42b85fa607911a07b284b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13167, "license_type": "no_license", "max_line_length": 80, "num_lines": 394, "path": "/backend/accounts/schema.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "import json\nimport graphene\nimport ccxt\nimport celery\nfrom django.db.models import ObjectDoesNotExist\n\nfrom graphene_django.types import DjangoObjectType\n\nfrom backend.accounts.models import Account, CryptoAddress, Peer\nfrom backend.coins.models import Coin\nfrom backend.accounts.tasks import async_update_account_trx\n\n\nclass PeerType(DjangoObjectType):\n class Meta:\n model = Peer\n\n\nclass SupportedService(graphene.ObjectType):\n short_name = graphene.String()\n long_name = graphene.String()\n importer = graphene.String()\n\n\nclass SupportedSymbol(graphene.ObjectType):\n symbol = graphene.String()\n base = graphene.String()\n quote = graphene.String()\n\n\nclass AccountType(DjangoObjectType):\n class Meta:\n model = Account\n\n\nclass CryptoAddressType(DjangoObjectType):\n class Meta:\n model = CryptoAddress\n\n\nclass Query(object):\n # Single account by ID or name\n get_account = graphene.Field(\n AccountType,\n id=graphene.ID(required=False, description=\"ID of the peer\"),\n name=graphene.String(required=False, description=\"Name of the peer\"))\n\n def resolve_get_account(self, info, **kwargs):\n account_id = kwargs.get('id')\n account_name = kwargs.get('name')\n\n if account_id is not None:\n return Account.objects.get(pk=account_id)\n if account_name is not None:\n return Account.objects.get(name=account_name)\n\n # Get all accounts where user has access rights\n all_accounts = graphene.List(AccountType)\n\n def resolve_all_accounts(self, info, **kwargs):\n if not info.context.user.is_authenticated:\n return Account.objects.none()\n filtered = Account.objects.filter(owner=info.context.user)\n return filtered\n\n get_crypto_addresses = graphene.List(\n CryptoAddressType,\n peer_id=graphene.ID(required=True, description=\"ID of the peer\"),\n description=\"Gets all crypto addresses for a peer\")\n\n def resolve_get_crypto_addresses(self, info, **kwargs):\n \"\"\"Gets all crypto addresses for a peer\"\"\"\n\n if not info.context.user.is_authenticated:\n return CryptoAddress.objects.none()\n\n peer_id = kwargs.get('peer_id')\n\n try:\n peer = Peer.objects.get(pk=peer_id)\n except ObjectDoesNotExist:\n return CryptoAddress.objects.none()\n\n if not peer.owner == info.context.user:\n return CryptoAddress.objects.none()\n\n return CryptoAddress.objects.filter(peer=peer)\n\n supported_services = graphene.List(SupportedService)\n\n def resolve_supported_services(self, info, **kwargs):\n l = []\n for val in Account.SERVICE_TYPES:\n s = SupportedService()\n s.short_name = val[0]\n s.long_name = val[1]\n s.importer = val[2]\n l.append(s)\n return l\n\n supported_symbols = graphene.List(\n SupportedSymbol, service=graphene.String(required=True))\n\n def resolve_supported_symbols(self, info, **kwargs):\n l = []\n if not info.context.user.is_authenticated:\n return l\n\n service_id = kwargs.get('service')\n try:\n exchange = getattr(ccxt, service_id)()\n markets = exchange.load_markets()\n for m in markets:\n market = markets[m]\n if market:\n s = SupportedSymbol()\n s.symbol = market[\"symbol\"]\n s.base = market[\"base\"]\n s.quote = market[\"quote\"]\n l.append(s)\n except AttributeError:\n # coinbase will land here\n # it is not supported by ccxt and will receive special treatment\n pass\n\n return l\n\n\nclass CreateAccountMutation(graphene.relay.ClientIDMutation):\n class Input:\n name = graphene.String()\n service_type = graphene.String()\n symbols = graphene.String()\n api_key = graphene.String()\n api_secret = graphene.String()\n\n status = graphene.Int()\n formErrors = graphene.String()\n account = graphene.Field(AccountType)\n\n @classmethod\n def mutate(cls, root, info, input: Input):\n if not info.context.user.is_authenticated:\n return CreateAccountMutation(status=403)\n name = input.get(\"name\", \"\").strip()\n service_type = input.get(\"service_type\", \"\").strip()\n symbols = input.get(\"symbols\", \"\").strip()\n api_key = input.get(\"api_key\", \"\").strip()\n api_secret = input.get(\"api_secret\", \"\").strip()\n\n # TODO: validate input using django forms or whatnot\n if not name or not service_type:\n return CreateAccountMutation(\n status=400,\n formErrors=json.dumps({\n \"account\": [\"Please enter valid account data\"]\n }))\n\n if Account.objects.filter(name=name).exists():\n print(\"exists\")\n return CreateAccountMutation(\n status=422,\n formErrors=json.dumps({\n \"account\": [\"A account with this name exists\"]\n }))\n\n obj = Account.objects.create(\n owner=info.context.user,\n name=name,\n slug=name,\n service_type=service_type,\n symbols=symbols,\n api_key=api_key,\n api_secret=api_secret)\n\n return CreateAccountMutation(status=200, account=obj)\n\n\nclass CreateCryptoAddressMutation(graphene.relay.ClientIDMutation):\n class Input:\n account_id = graphene.ID(required=True)\n address = graphene.String(required=True)\n coin_id = graphene.ID(required=True)\n watch = graphene.Boolean()\n\n status = graphene.Int()\n formErrors = graphene.String()\n address = graphene.Field(CryptoAddressType)\n\n @classmethod\n def mutate(cls, root, info, input: Input):\n if not info.context.user.is_authenticated:\n return CreateAccountMutation(\n status=403, client_mutation_id=input['client_mutation_id'])\n\n account_id = input.get(\"account_id\", -1)\n address = input.get(\"address\", \"\").strip()\n coin_id = input.get(\"coin_id\", -1)\n watch = input.get(\"watch\", False)\n\n try:\n account: Account = Account.objects.get(pk=account_id)\n except ObjectDoesNotExist:\n return CreateCryptoAddressMutation(\n status=404,\n formErrors=json.dumps({\n \"account_id\": [\"Please enter valid account id\"]\n }),\n client_mutation_id=input['client_mutation_id'])\n\n if not account.owner == info.context.user:\n return CreateAccountMutation(\n status=403, client_mutation_id=input['client_mutation_id'])\n\n try:\n coin: Coin = Coin.objects.get(pk=coin_id)\n except ObjectDoesNotExist:\n return CreateCryptoAddressMutation(\n status=404,\n formErrors=json.dumps({\n \"coin_id\": [\"Please enter valid coin id\"]\n }),\n client_mutation_id=input['client_mutation_id'])\n\n crypto_address = CryptoAddress.objects.create(\n peer=account, coin=coin, address=address, watch=watch)\n\n return CreateCryptoAddressMutation(\n status=200,\n address=crypto_address,\n client_mutation_id=input['client_mutation_id'])\n\n\nclass EditCryptoAddressMutation(graphene.relay.ClientIDMutation):\n class Input:\n id = graphene.ID(required=True)\n address = graphene.String(required=True)\n coin_id = graphene.ID(required=True)\n watch = graphene.Boolean()\n\n status = graphene.Int()\n formErrors = graphene.String()\n address = graphene.Field(CryptoAddressType)\n\n @classmethod\n def mutate(cls, root, info, input: Input):\n if not info.context.user.is_authenticated:\n return EditCryptoAddressMutation(\n status=403, client_mutation_id=input['client_mutation_id'])\n\n object_id = input.get(\"id\", None)\n address = input.get(\"address\", None)\n coin_id = input.get(\"coin_id\", None)\n watch = input.get(\"watch\", False)\n\n try:\n crypto_address: CryptoAddress = CryptoAddress.objects.get(\n pk=object_id)\n except ObjectDoesNotExist:\n return EditCryptoAddressMutation(\n status=404,\n formErrors=json.dumps({\n \"id\": [\"Address ID not found\"]\n }),\n client_mutation_id=input['client_mutation_id'])\n\n if not crypto_address.peer.owner.id == info.context.user.id:\n return EditCryptoAddressMutation(\n status=403, client_mutation_id=input['client_mutation_id'])\n\n try:\n coin: Coin = Coin.objects.get(pk=coin_id)\n except ObjectDoesNotExist:\n return EditCryptoAddressMutation(\n status=404,\n formErrors=json.dumps({\n \"coin_id\": [\"Coin not fund\"]\n }),\n client_mutation_id=input['client_mutation_id'])\n\n crypto_address.address = address\n crypto_address.coin = coin\n crypto_address.watch = watch\n crypto_address.save()\n\n return EditCryptoAddressMutation(\n status=200,\n address=crypto_address,\n client_mutation_id=input['client_mutation_id'])\n\n\nclass EditAccountMutation(graphene.relay.ClientIDMutation):\n class Input:\n account_id = graphene.Int()\n name = graphene.String()\n api_key = graphene.String()\n api_secret = graphene.String()\n\n status = graphene.Int()\n formErrors = graphene.String()\n account = graphene.Field(AccountType)\n\n @classmethod\n def mutate(cls, root, info, input: Input):\n if not info.context.user.is_authenticated:\n return EditAccountMutation(status=403)\n\n account_id = input.get(\"account_id\", -1)\n name = input.get(\"name\", \"\").strip()\n api_key = input.get(\"api_key\", \"\").strip()\n api_secret = input.get(\"api_secret\", \"\").strip()\n\n # TODO: validate input using django forms or whatnot\n if account_id < 0 or not name or not api_key or not api_secret:\n return EditAccountMutation(\n status=400,\n formErrors=json.dumps({\n \"account\": [\"Please enter valid account data\"]\n }))\n\n try:\n account: Account = Account.objects.get(pk=account_id)\n except ObjectDoesNotExist:\n return EditAccountMutation(\n status=422,\n formErrors=json.dumps({\n \"account\": [\"Account does not exists\"]\n }))\n\n if account.owner != info.context.user:\n return EditAccountMutation(status=403)\n\n if not account:\n return EditAccountMutation(\n status=422,\n formErrors=json.dumps({\n \"account\": [\"This account does not exist\"]\n }))\n\n account.name = name\n account.api_key = api_key\n account.api_secret = api_secret\n account.save()\n\n return EditAccountMutation(status=200, account=account)\n\n\nclass AccountRefreshTransactionsMutation(graphene.relay.ClientIDMutation):\n class Input:\n account_id = graphene.String()\n\n status = graphene.Int()\n formErrors = graphene.String()\n msg = graphene.String()\n\n @classmethod\n def mutate(cls, root, info, input) -> \"AccountRefreshTransactionsMutation\":\n if not info.context.user.is_authenticated:\n return AccountRefreshTransactionsMutation(status=403)\n\n if input.get(\"account_id\", -1) == -1:\n return AccountRefreshTransactionsMutation(status=400)\n\n account_id = input.get(\"account_id\", -1).strip()\n\n try:\n id_int = int(account_id)\n if id_int < 0:\n raise ValueError(\"Invalid input\")\n except ValueError as err:\n return AccountRefreshTransactionsMutation(status=400)\n\n account: Account = Account.objects.get(pk=account_id)\n\n if account.owner != info.context.user:\n return AccountRefreshTransactionsMutation(status=403)\n\n tid = account_id + account.name\n # celery.result will only be available until after the task has run once\n if hasattr(celery, \"result\") and celery.result.AsyncResult(\n tid).status == \"RUNNING\":\n print(\"skipping task\")\n return AccountRefreshTransactionsMutation(\n msg=\"Task is already running\", status=202)\n else:\n try:\n print(\"starting task\")\n async_update_account_trx.apply_async(\n args=[account_id], task_id=tid)\n except async_update_account_trx.OperationalError as err:\n print(\"Sending task raised: %r\", err)\n return AccountRefreshTransactionsMutation(status=500)\n\n return AccountRefreshTransactionsMutation(msg=\"Working\", status=200)\n" }, { "alpha_fraction": 0.5453628897666931, "alphanum_fraction": 0.5786290168762207, "avg_line_length": 29.060606002807617, "blob_id": "5da8489812be47d7272686e59e47f50966037198", "content_id": "87d63c979d514afafbbd718f16f909d7975ca5fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 992, "license_type": "no_license", "max_line_length": 83, "num_lines": 33, "path": "/backend/transactions/migrations/0004_auto_20180408_1110.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.3 on 2018-04-08 11:10\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('transactions', '0003_transaction_icon'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='transaction',\n name='acquired_amount',\n field=models.DecimalField(decimal_places=10, default=0, max_digits=19),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='acquired_currency',\n field=models.CharField(default='', max_length=10),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='spent_amount',\n field=models.DecimalField(decimal_places=10, default=0, max_digits=19),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='spent_currency',\n field=models.CharField(default='', max_length=10),\n ),\n ]\n" }, { "alpha_fraction": 0.5524442195892334, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 38.75471878051758, "blob_id": "4d62571517ce827d67887d05ea0e67dfe7128fff", "content_id": "b6587d3b66d034ab5deded221047b54961824612", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2107, "license_type": "no_license", "max_line_length": 188, "num_lines": 53, "path": "/backend/accounts/migrations/0001_initial.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.2 on 2018-03-15 19:24\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('coins', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Address',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('address', models.CharField(max_length=256)),\n ('address_str', models.CharField(blank=True, max_length=300)),\n ('coin', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coins.Coin')),\n ],\n ),\n migrations.CreateModel(\n name='Peer',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Account',\n fields=[\n ('peer_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='accounts.Peer')),\n ('slug', models.SlugField()),\n ('service_type', models.CharField(max_length=50)),\n ('api_key', models.CharField(max_length=100)),\n ('api_secret', models.CharField(max_length=100)),\n ('creation_date', models.DateTimeField(auto_now_add=True)),\n ('symbols', models.CharField(blank=True, max_length=1000, null=True)),\n ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n bases=('accounts.peer',),\n ),\n migrations.AddField(\n model_name='address',\n name='peer',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='accounts.Peer'),\n ),\n ]\n" }, { "alpha_fraction": 0.6560747623443604, "alphanum_fraction": 0.6691588759422302, "avg_line_length": 27.91891860961914, "blob_id": "c586c018368d030faa56288ee92e7df3b7d086a3", "content_id": "1814e7b7d9f839d53e06f4372be9792ec378b7ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1070, "license_type": "no_license", "max_line_length": 72, "num_lines": 37, "path": "/backend/accounts/tests/test_models.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "import pytest\nfrom mixer.backend.django import mixer\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom .. import schema\n\n# We need to do this so that writing to the DB is possible in our tests.\npytestmark = pytest.mark.django_db\n\n# Great introduction to TDD with Python + Django:\n# https://www.youtube.com/watch?v=41ek3VNx_6Q\n\n\ndef test_peer_str_func():\n name = \"test123\"\n obj = mixer.blend(\"accounts.peer\", name=name)\n assert obj.__str__() == \"[Peer] {}\".format(\n name), \"Should be the peer's name\"\n\n\ndef test_address_str_func():\n address = \"test123\"\n obj = mixer.blend(\"accounts.CryptoAddress\", address=address)\n symbol = obj.coin.symbol\n assert obj.__str__() == \"{}:{}\".format(symbol, address)\n\n\ndef test_account_creation():\n obj = mixer.blend(\"accounts.Account\")\n assert obj.pk > 0, \"Should create an Account instance\"\n\n\ndef test_account_str_func():\n name = \"test123\"\n obj = mixer.blend(\"accounts.Account\", name=name)\n assert obj.__str__() == \"[Account] {}\".format(\n name), \"Should be the accounts's name\"\n" }, { "alpha_fraction": 0.5692754983901978, "alphanum_fraction": 0.5700687170028687, "avg_line_length": 39.66666793823242, "blob_id": "cbd1a1e035a0a9eb29b39717dbe70f92536d04e4", "content_id": "51de4b692023f711d97be9db408f7684e855c092", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3782, "license_type": "no_license", "max_line_length": 83, "num_lines": 93, "path": "/backend/transactions/importers/livecoin.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "\"\"\"Livecoin exchange importer functions\"\"\"\nimport time\n\nfrom backend.utils.utils import get_name_price\nfrom backend.transactions.models import Transaction\nimport arrow\nfrom backend.accounts.models import Peer\n\n\ndef import_data_livecoin(data, user):\n \"\"\"Import data from a CSV file exported from Livecoin\n\n Arguments:\n data {object} -- Object with the Livecoin data\n user {user} -- Current logged in User\n\n Returns:\n Transaction -- List with the imported transactions\n \"\"\"\n\n transactions = []\n peer_cache = {}\n for trx_input in data.transactions: # type: TransactionData\n if trx_input.transaction_type_raw == \"Deposit\":\n continue\n\n trx = Transaction()\n date = arrow.get(trx_input.date, \"DD.MM.YYYY HH:mm:ss\")\n timestamp = date.timestamp\n trx.date = date.datetime\n trx.owner = user\n\n # calculate book price by spent amount\n book_price_ok = False\n if trx_input.spent_amount > 0 and trx_input.spent_currency is not \"\":\n trx.spent_amount = trx_input.spent_amount\n trx.spent_currency = trx_input.spent_currency\n\n trx.book_price_btc = get_name_price(\n trx.spent_amount, trx.spent_currency, \"BTC\", timestamp)\n trx.book_price_eur = get_name_price(\n trx.spent_amount, trx.spent_currency, \"EUR\", timestamp)\n book_price_ok = True\n\n if trx_input.acquired_amount > 0 and trx_input.acquired_currency is not \"\":\n trx.acquired_amount = trx_input.acquired_amount\n trx.acquired_currency = trx_input.acquired_currency\n if not book_price_ok:\n trx.book_price_btc = get_name_price(trx.acquired_amount,\n trx.acquired_currency,\n \"BTC\", timestamp)\n trx.book_price_eur = get_name_price(trx.acquired_amount,\n trx.acquired_currency,\n \"EUR\", timestamp)\n # if trx_input.source_peer not in trx\n trx.source_peer = Peer(pk=trx_input.source_peer)\n trx.target_peer = Peer(pk=trx_input.target_peer)\n\n if trx_input.fee_amount > 0:\n trx.fee_amount = trx_input.fee_amount\n trx.fee_currency = trx_input.fee_currency\n trx.book_price_fee_btc = get_name_price(\n trx.fee_amount, trx.fee_currency, \"BTC\", timestamp)\n trx.book_price_fee_eur = get_name_price(\n trx.fee_amount, trx.fee_currency, \"EUR\", timestamp)\n\n if trx_input.transaction_type == \"exchange\":\n trx.icon = Transaction.TRX_ICON_EXCHANGE\n trx.save()\n trx.tags.add(data.service_type, data.import_mechanism,\n Transaction.TRX_TAG_EXCHANGE)\n elif trx_input.transaction_type == \"income\":\n trx.icon = Transaction.TRX_ICON_INCOME\n trx.save()\n trx.tags.add(data.service_type, data.import_mechanism,\n Transaction.TRX_TAG_INCOME)\n elif trx_input.transaction_type == \"transfer\":\n trx.icon = Transaction.TRX_ICON_TRANSFER\n trx.save()\n trx.tags.add(data.service_type, data.import_mechanism,\n Transaction.TRX_TAG_TRANSFER)\n else:\n trx.icon = Transaction.TRX_ICON_WARNING\n trx.save()\n trx.tags.add(data.service_type, data.import_mechanism,\n Transaction.TRX_TAG_WARNING)\n\n if trx_input.tags:\n for tag in trx_input.tags:\n trx.tags.add(tag)\n trx.save()\n transactions.append(trx)\n return transactions\n" }, { "alpha_fraction": 0.6377171277999878, "alphanum_fraction": 0.6501240730285645, "avg_line_length": 31.675676345825195, "blob_id": "3b251941faf2db03be5a9dadecf7f8151796e806", "content_id": "c896723e2c60022daf107124ead42698d1a45427", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3627, "license_type": "no_license", "max_line_length": 86, "num_lines": 111, "path": "/backend/transactions/models.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "\"\"\"Contains all models necessary for the Transactions application\"\"\"\n\nfrom django.db import models\n\nfrom taggit.managers import TaggableManager\n\n\nclass Transaction(models.Model):\n \"\"\"Database model for a single transaction\"\"\"\n\n # exchange between currencies\n TRX_TAG_EXCHANGE = \"exchange\"\n TRX_ICON_EXCHANGE = \"shuffle\"\n # transfer one coin from one wallet to another\n TRX_TAG_TRANSFER = \"transfer\"\n TRX_ICON_TRANSFER = \"send\"\n # buy cryptos from fiat\n TRX_TAG_BUY = \"buy\"\n TRX_ICON_BUY = \"subdirectory_arrow_right\"\n # sell cryptos for fiat\n TRX_TAG_SELL = \"sell\"\n TRX_ICON_SELL = \"subdirectory_arrow_left\"\n # income for a service or sell of a good (refferal bonus, selling of hardware etc)\n TRX_TAG_INCOME = \"income\"\n TRX_ICON_INCOME = \"arrow_forward\"\n # expense for a service or buy of a good (online subscription, buy of an hardware)\n TRX_TAG_EXPENSE = \"expense\"\n TRX_ICON_EXPENSE = \"arrow_backward\"\n # mining income\n TRX_TAG_MINING = \"mining\"\n TRX_ICON_MINING = \"gavel\"\n\n # for transactions that need attention by the user\n TRX_TAG_WARNING = \"warning\"\n TRX_ICON_WARNING = \"warning\"\n\n class Meta:\n ordering = ('-date', )\n\n id = models.AutoField(primary_key=True)\n\n owner = models.ForeignKey(\n related_name='owner',\n to='auth.user',\n on_delete=models.PROTECT,\n )\n\n date = models.DateTimeField()\n\n # Spent\n spent_currency = models.CharField(max_length=10, default=\"---\")\n spent_amount = models.DecimalField(\n max_digits=19, decimal_places=10, default=0)\n source_peer = models.ForeignKey(\n default=1,\n related_name='source_peer',\n to='accounts.Peer',\n on_delete=models.PROTECT)\n\n # Acquired\n acquired_currency = models.CharField(max_length=10, default=\"---\")\n acquired_amount = models.DecimalField(\n max_digits=19, decimal_places=10, default=0)\n target_peer = models.ForeignKey(\n default=1,\n related_name='target_peer',\n to='accounts.Peer',\n on_delete=models.PROTECT)\n\n # Fees and book prices are calculated using the mean price of the coin at that day\n fee_currency = models.CharField(max_length=10, default=\"---\")\n fee_amount = models.DecimalField(\n max_digits=19, default=0, decimal_places=10)\n\n # book price is the price of the spent amount in BTC and FIAT\n book_price_eur = models.DecimalField(max_digits=19, decimal_places=10)\n book_price_btc = models.DecimalField(max_digits=19, decimal_places=10)\n\n # fee price is the price of the spent amount in BTC and FIAT\n book_price_fee_eur = models.DecimalField(\n max_digits=19, default=0, decimal_places=10)\n book_price_fee_btc = models.DecimalField(\n max_digits=19, default=0, decimal_places=10)\n\n tags = TaggableManager()\n\n icon = models.CharField(default=\"help_outline\", max_length=100)\n\n def __str__(self):\n # convertion to float removes trailing 0's\n return \"{} {} => {} {} ==> {} EUR\".format(\n float(self.spent_amount), self.spent_currency,\n float(self.acquired_amount), self.acquired_currency,\n float(self.book_price_eur))\n\n\nclass TransactionUpdateHistoryEntry(models.Model):\n id = models.AutoField(primary_key=True)\n\n date = models.DateTimeField()\n\n account = models.ForeignKey(\n to='accounts.Account',\n on_delete=models.PROTECT,\n )\n\n fetched_transactions = models.IntegerField()\n\n def __str__(self):\n return \"{} {} {}\".format(self.account.id, self.date,\n self.fetched_transactions)\n" }, { "alpha_fraction": 0.6458966732025146, "alphanum_fraction": 0.6595744490623474, "avg_line_length": 37.70588302612305, "blob_id": "b19dfcec5efd298d28733c70920ac098dda614ca", "content_id": "2ac683fc2dfd47f7f0513741ccb22f23dcbce1be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 658, "license_type": "no_license", "max_line_length": 73, "num_lines": 17, "path": "/backend/views.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "from graphene_django.views import GraphQLView\n\n\n# https://github.com/graphql-python/graphene-django/issues/252\nclass GraphQLErrorFormatView(GraphQLView):\n @staticmethod\n def format_error(error):\n print(error)\n if hasattr(error, 'original_error') and error.original_error:\n formatted = {\"message\": str(error.original_error)}\n if isinstance(error.original_error, UnauthorizedError):\n formatted['code'] = \"401\"\n elif isinstance(error.original_error, PermissionDeniedError):\n formatted['code'] = \"403\"\n return formatted\n\n return GraphQLView.format_error(error)\n" }, { "alpha_fraction": 0.5032851696014404, "alphanum_fraction": 0.5413929224014282, "avg_line_length": 28.269229888916016, "blob_id": "2ea13be03cbf5da29a664168ef957b2e12e33791", "content_id": "2cac47d532ef8a5ef76855a8f2aa17876281d00e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 761, "license_type": "no_license", "max_line_length": 76, "num_lines": 26, "path": "/backend/coins/migrations/0001_initial.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.2 on 2018-03-10 18:00\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Coin',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('cc_id', models.IntegerField(unique=True)),\n ('img_url', models.CharField(max_length=200)),\n ('name', models.CharField(max_length=200)),\n ('symbol', models.CharField(max_length=10)),\n ('coin_name', models.CharField(max_length=200)),\n ('full_name', models.CharField(max_length=200)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.4839572310447693, "alphanum_fraction": 0.5775400996208191, "avg_line_length": 19.77777862548828, "blob_id": "67ec3cd06dde9336ca4fd82838604b870928f282", "content_id": "0217f68691a03833ab085c4c40d3b9c1ad308604", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 374, "license_type": "no_license", "max_line_length": 48, "num_lines": 18, "path": "/backend/accounts/migrations/0005_auto_20180506_1118.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.5 on 2018-05-06 11:18\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('coins', '0001_initial'),\n ('accounts', '0004_auto_20180408_1110'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='Address',\n new_name='CryptoAddress',\n ),\n ]\n" }, { "alpha_fraction": 0.5379830002784729, "alphanum_fraction": 0.5592665076255798, "avg_line_length": 30.8125, "blob_id": "6e6a80bc18e09654901860090f83427dc1f20346", "content_id": "739e7676ef9b0b17ae245a2c27a1fc39e42c1641", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3054, "license_type": "no_license", "max_line_length": 77, "num_lines": 96, "path": "/backend/coins/tests/test_tasks.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "'''Contains all task tests for this application'''\nimport pytest\nfrom django.db.models.query import QuerySet\nfrom backend.coins.models import Coin\nfrom ...celery import app as celery_app\nfrom ..tasks import async_update_supported_coins\n\npytestmark = pytest.mark.django_db\n\n\ndef new_get_coin_list(format_list): #pylint: disable=W0613\n '''\n Fake cryptocompare.get_coin_list function\n Provides two valid and one invalid coins\n '''\n return {\n 'BTC': {\n 'Id': '1000',\n 'ImageUrl': '/media/124/btc.png',\n 'Name': 'Bitcoin',\n 'Symbol': 'BTC',\n 'CoinName': 'Bitcoin',\n 'FullName': 'Bitcoin (BTC)'\n },\n 'LTC': {\n 'Id': '1001',\n 'ImageUrl': '/media/124/ltc.png',\n 'Name': 'Litecoin',\n 'Symbol': 'LTC',\n 'CoinName': 'Litecoin',\n 'FullName': 'Litecoin (LTC)'\n },\n 'BTCC': {\n # Should trigger an exception (Id not an int) and not be imported\n 'Id': 'Except',\n 'ImageUrl': '/media/124/btc.png',\n 'Name': 'BitcoinCrash',\n 'Symbol': 'BTCC',\n 'CoinName': 'Bitcoin Crash',\n 'FullName': 'Bitcoin Crash (BTCC)'\n }\n }\n\n\ndef new_get_coin_list_updated(format_list): #pylint: disable=W0613\n '''\n Fake cryptocompare.get_coin_list function\n Provides one updated and one additional coin\n '''\n return {\n 'LTC': {\n 'Id': '1001',\n 'ImageUrl': '/media/345/ltc_updated.png',\n 'Name': 'Litecoin',\n 'Symbol': 'LTC',\n 'CoinName': 'Litecoin Updated',\n 'FullName': 'Litecoin (LTC)'\n },\n 'XLM': {\n 'Id': '1002',\n 'ImageUrl': '/media/1234/xlm.png',\n 'Name': 'Stellar Lumens',\n 'Symbol': 'XLM',\n 'CoinName': 'Lumens',\n 'FullName': 'Stellar Lumens (XLM)'\n }\n }\n\n\n@celery_app.task\ndef test_async_update_coins(monkeypatch):\n '''Test the supported coin update function'''\n\n # Test import the objects\n monkeypatch.setattr('cryptocompare.get_coin_list', new_get_coin_list)\n async_update_supported_coins() #pylint: disable=E1120\n all_coins: QuerySet = Coin.objects.all()\n assert all_coins.count() == 2\n\n coin: Coin = all_coins.first()\n assert coin.cc_id == 1000\n assert coin.img_url == '/media/124/btc.png'\n assert coin.name == 'Bitcoin'\n assert coin.symbol == 'BTC'\n assert coin.coin_name == 'Bitcoin'\n assert coin.full_name == 'Bitcoin (BTC)'\n\n # Test update the database object\n monkeypatch.setattr('cryptocompare.get_coin_list',\n new_get_coin_list_updated)\n async_update_supported_coins() # pylint: disable=E1120\n assert Coin.objects.all().count() == 3, 'Test add one new coin'\n ltc_updated: Coin = Coin.objects.get(pk=2)\n assert ltc_updated.cc_id == 1001\n assert ltc_updated.img_url == '/media/345/ltc_updated.png'\n assert ltc_updated.coin_name == 'Litecoin Updated'\n" }, { "alpha_fraction": 0.6741629242897034, "alphanum_fraction": 0.6856571435928345, "avg_line_length": 29.78461456298828, "blob_id": "105e6c38835feaca42bae0b5f096874bd1d6fcf1", "content_id": "48f738209a06a1d44b0a92b4ee63f14e80d07c2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2001, "license_type": "no_license", "max_line_length": 92, "num_lines": 65, "path": "/backend/coins/tests/test_schema.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "'''Contains all schema tests for the application'''\nimport pytest\nfrom mixer.backend.django import mixer\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.test import RequestFactory\n\nfrom ...test_utils.utils import mock_resolve_info\n\nimport backend\nfrom .. import schema\n\npytestmark = pytest.mark.django_db\n\n\ndef test_coin_type():\n instance = schema.CoinType()\n assert instance\n\n\ndef test_resolve_all_coins():\n '''Test allCoins Query'''\n user_a = mixer.blend('auth.User')\n\n req = RequestFactory().get('/')\n req.user = AnonymousUser()\n resolve_info = mock_resolve_info(req)\n\n mixer.blend('coins.Coin')\n mixer.blend('coins.Coin')\n mixer.blend('coins.Coin')\n mixer.blend('coins.Coin')\n\n query = schema.Query()\n res = query.resolve_all_coins(resolve_info)\n assert res.count() == 0, 'User not logged in, should return 0 transactions'\n\n req.user = user_a\n res = query.resolve_all_coins(resolve_info)\n assert res.count(\n ) == 4, 'User A is logged in, should return 4 transactions'\n\n\ndef test_update_supported_coins(monkeypatch):\n '''Test the supported coins update mutation '''\n req = RequestFactory().get('/')\n req.user = AnonymousUser()\n resolve_info = mock_resolve_info(req)\n\n data = {'client_mutation_id': '1'}\n\n mut = schema.CoinRefreshTransactionsMutation()\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 403, 'Should return 403 if user is not logged in'\n\n req.user = mixer.blend('auth.User')\n mut = schema.CoinRefreshTransactionsMutation()\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 403, 'Should return 403 if user is not logged in but not superuser'\n\n monkeypatch.setattr(backend.coins.tasks.async_update_supported_coins,\n 'apply_async', lambda task_id: print(\"mock called\"))\n\n req.user.is_superuser = True\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 200, 'Should return 200 as task was started'\n" }, { "alpha_fraction": 0.5105105042457581, "alphanum_fraction": 0.5675675868988037, "avg_line_length": 18.58823585510254, "blob_id": "21a0b79426b29fd6f4dd5b944b3e342ad4b372b1", "content_id": "6cdbb5d69ece7289b081d8e176d2a903fcc50b06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 47, "num_lines": 17, "path": "/backend/coins/migrations/0002_auto_20180510_1515.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.5 on 2018-05-10 15:15\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('coins', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='coin',\n options={'ordering': ('symbol',)},\n ),\n ]\n" }, { "alpha_fraction": 0.690172016620636, "alphanum_fraction": 0.6985257863998413, "avg_line_length": 32.91666793823242, "blob_id": "5c8cc5f4613bc61895370b47d61de16ac8ce0c16", "content_id": "3ccecd4e30963bfeb309c6aaab95cf329f539d55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4076, "license_type": "no_license", "max_line_length": 82, "num_lines": 120, "path": "/backend/transactions/tests/test_schema.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "import pytest\nfrom mixer.backend.django import mixer\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.test import RequestFactory\n\nimport backend.transactions.schema as schema\nfrom backend.transactions.models import Transaction\nfrom backend.test_utils.utils import mock_resolve_info, gen_fake_transaction\n\n# We need to do this so that writing to the DB is possible in our tests.\npytestmark = pytest.mark.django_db\n\n\ndef test_transaction_type():\n instance = schema.TransactionType()\n assert instance\n\n\ndef test_resolve_get_transaction_by_id():\n anonuser = AnonymousUser()\n usera = mixer.blend(\"auth.User\")\n userb = mixer.blend(\"auth.User\")\n\n gen_fake_transaction(owner=usera)\n gen_fake_transaction(owner=usera)\n gen_fake_transaction(owner=usera)\n\n req = RequestFactory().get(\"/\")\n req.user = AnonymousUser()\n resolveInfo = mock_resolve_info(req)\n\n query = schema.Query()\n res = query.resolve_get_transaction(resolveInfo, **{\"id\": 1})\n assert res == None, \"User not logged in, should return None\"\n\n query = schema.Query()\n req.user = usera\n res = query.resolve_get_transaction(resolveInfo, **{\"id\": 1})\n assert isinstance(res, Transaction), \"Should return a transaction object\"\n assert res.id == 1, \"Should return transaction with id 1\"\n\n res = query.resolve_get_transaction(resolveInfo, **{\"id\": 2})\n assert isinstance(res, Transaction), \"Should return a transaction object\"\n assert res.id == 2, \"Should return transaction with id 2\"\n\n req.user = userb\n res = query.resolve_get_transaction(resolveInfo, **{\"id\": 2})\n assert res == None, \"User should not have access to another users transaction\"\n\n with pytest.raises(ObjectDoesNotExist) as excinfo:\n res = query.resolve_get_transaction(resolveInfo, **{\"id\": 5})\n\n\ndef test_resolve_all_transactions():\n anonuser = AnonymousUser()\n usera = mixer.blend(\"auth.User\")\n userb = mixer.blend(\"auth.User\")\n\n req = RequestFactory().get(\"/\")\n req.user = AnonymousUser()\n resolveInfo = mock_resolve_info(req)\n\n gen_fake_transaction(owner=usera)\n gen_fake_transaction(owner=usera)\n\n gen_fake_transaction(owner=userb)\n gen_fake_transaction(owner=userb)\n gen_fake_transaction(owner=userb)\n\n query = schema.Query()\n res = query.resolve_all_transactions(resolveInfo)\n assert res.count() == 0, \"User not logged in, should return 0 transactions\"\n\n req.user = usera\n res = query.resolve_all_transactions(resolveInfo)\n assert res.count(\n ) == 2, \"User A is logged in, should return 2 transactions\"\n\n req.user = userb\n res = query.resolve_all_transactions(resolveInfo)\n assert res.count(\n ) == 3, \"User B is logged in, should return 3 transactions\"\n\n\ndef test_import_csv_data_mutation(mocker):\n \"\"\"\n test if user is authenticated ✓\n test fail at erroneous data input ✓\n test if appropriate import function is called ✓\n \"\"\"\n usera = mixer.blend(\"auth.User\")\n\n req = RequestFactory().get(\"/\")\n req.user = AnonymousUser()\n resolve_info = mock_resolve_info(req)\n\n mut = schema.ImportTransactionsMutation()\n res = mut.mutate(None, resolve_info, {})\n assert res.status == 403, \"Should return 403 if user is not logged in\"\n\n data = schema.ImportTransactionsMutation.Input()\n data.data = schema.ImportTransactionInput()\n data.data.service_type = \"fakeexchange\"\n data.data.import_mechanism = \"csv\"\n data.data.transactions = []\n\n req.user = usera\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 404, \"Service not found, should return 404\"\n assert res.formErrors == \"Service type {} not found\".format(\n data.data.service_type\n ), \"Service not found, send correct error message\"\n\n mocker.patch(\"backend.transactions.schema.import_data_livecoin\")\n\n data.data.service_type = \"livecoin\"\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 200\n schema.import_data_livecoin.assert_called_once() # pylint: disable=E1101\n" }, { "alpha_fraction": 0.5887611508369446, "alphanum_fraction": 0.6113239526748657, "avg_line_length": 48.978721618652344, "blob_id": "57b22c17affa31fb3a2225312baadfd5906a3903", "content_id": "bdb9724f51890830e42c9d41b9799cd554e81163", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2349, "license_type": "no_license", "max_line_length": 153, "num_lines": 47, "path": "/backend/transactions/migrations/0001_initial.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.2 on 2018-03-15 19:24\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('accounts', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Transaction',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('date', models.DateTimeField()),\n ('spent_currency', models.CharField(max_length=10)),\n ('spent_amount', models.DecimalField(decimal_places=10, max_digits=19)),\n ('acquired_currency', models.CharField(max_length=10)),\n ('acquired_amount', models.DecimalField(decimal_places=10, max_digits=19)),\n ('fee_currency', models.CharField(max_length=10)),\n ('fee_amount', models.DecimalField(decimal_places=10, max_digits=19)),\n ('book_price_eur', models.DecimalField(decimal_places=10, max_digits=19)),\n ('book_price_btc', models.DecimalField(decimal_places=10, max_digits=19)),\n ('book_price_fee_eur', models.DecimalField(decimal_places=10, max_digits=19)),\n ('book_price_fee_btc', models.DecimalField(decimal_places=10, max_digits=19)),\n ('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='owner', to=settings.AUTH_USER_MODEL)),\n ('source_account', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='source_account', to='accounts.Account')),\n ('target_account', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='target_account', to='accounts.Account')),\n ],\n ),\n migrations.CreateModel(\n name='TransactionUpdateHistoryEntry',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('date', models.DateTimeField()),\n ('fetched_transactions', models.IntegerField()),\n ('account', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='accounts.Account')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6548410654067993, "alphanum_fraction": 0.6548410654067993, "avg_line_length": 28.413043975830078, "blob_id": "738d9d921ac2dbec53e4b39fbdb346531ca07e6c", "content_id": "6af6827a60a75c6a961d8ea99dbf4be12960269e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1353, "license_type": "no_license", "max_line_length": 70, "num_lines": 46, "path": "/backend/utils/utils.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "\"\"\"Contains various utility functions\"\"\"\n\nimport time\nimport cryptocompare as cc\nfrom diskcache import FanoutCache\n\n\ndef exchange_can_batch(exchange: str) -> bool:\n # For some exchanges it is impossible to get all trades for\n # an account and we have to fetch each symbol individually.\n # Binance, for example. Cryptopia does not have this problem.\n if exchange == \"binance\":\n return False\n elif exchange == \"bitfinex\":\n return False\n\n return True\n\n\n# use a simple cache mechanism to avoid hammering the API\nCACHE = FanoutCache('/tmp/diskcache/fanoutcache')\n\n\ndef get_name_price(amount: float,\n base: str,\n target: str,\n timestamp: float = time.time()) -> float:\n \"\"\"\n Calculated the price of one name in another name.\n Returns a float with the converted value as a decimal.Decimal\n\n Keyword arguments:\n amount -- amount to convert\n base -- name to convert from\n target -- name to convert to\n date -- historic date as a Unix Timestamp (default: time.time())\n \"\"\"\n key = base + target + str(timestamp)\n request_res = CACHE.get(key, None)\n if request_res is None:\n request_res = cc.get_historical_price(base, target, timestamp)\n CACHE.add(key, request_res)\n\n val = request_res[base][target]\n\n return amount * val\n" }, { "alpha_fraction": 0.6182193756103516, "alphanum_fraction": 0.6194151639938354, "avg_line_length": 36.8559684753418, "blob_id": "8095387d31ce76cf9d0c10697bfb6749c318fcd9", "content_id": "f4b8638673e1cd3e997257d8400a3327213da8f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9199, "license_type": "no_license", "max_line_length": 87, "num_lines": 243, "path": "/backend/transactions/fetchers/coinbase.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "\"\"\"\nContains all functions related to importing Coinbase data\n\nNote: We cannot use Transaction.objects.bulk_create since\ndjango-taggit needs object id before saving.\n\"\"\"\n\nimport json\nimport time\nfrom requests.sessions import Session\nfrom datetime import datetime, timezone\nfrom collections import namedtuple\nfrom dateutil import parser\nfrom django.utils.timezone import now\n\nfrom coinbase.wallet.client import Client, APIObject\n\nfrom backend.transactions.models import Transaction, TransactionUpdateHistoryEntry\n\nfrom backend.accounts.models import Account\nfrom backend.utils.utils import get_name_price\n\nTAG_COINBASE = \"coinbase\"\n\n\ndef process_send(cb_trx, timestamp: int, account: Account) -> Transaction:\n \"\"\"Process all Coinbase send transactions\n\n Arguments:\n cb_trx {APIObject} -- the coinbase transaction to import\n timestamp {float} -- timestamp of last import from coinbase\n account {Account} -- the account this transaction originates from\n\n Returns:\n Transaction -- a Transaction object\n \"\"\"\n\n new_trx = Transaction()\n new_trx.date = cb_trx[\"created_at\"]\n\n # minus on coinbase (source peer)\n new_trx.spent_amount = abs(float(cb_trx[\"amount\"][\"amount\"]))\n new_trx.spent_currency = cb_trx[\"amount\"][\"currency\"]\n\n network = cb_trx[\"network\"]\n\n tag = \"\"\n\n if network[\"status\"] == \"off_blockchain\":\n # could be a refferal bonus from Coinbase\n new_trx.acquired_amount = abs(float(cb_trx[\"amount\"][\"amount\"]))\n new_trx.acquired_currency = cb_trx[\"amount\"][\"currency\"]\n tag = Transaction.TRX_TAG_INCOME\n new_trx.icon = Transaction.TRX_ICON_INCOME\n # a refferal bonus has no fee, so use defaults from model\n else:\n # amount received on target peer (spent amount with network fees deducted)\n new_trx.acquired_amount = abs(\n float(network[\"transaction_amount\"][\"amount\"]))\n new_trx.acquired_currency = network[\"transaction_amount\"][\"currency\"]\n\n # network fee for this transaction\n new_trx.fee_amount = abs(float(network[\"transaction_fee\"][\"amount\"]))\n new_trx.fee_currency = network[\"transaction_fee\"][\"currency\"]\n\n new_trx.book_price_fee_eur = get_name_price(\n new_trx.fee_amount, new_trx.fee_currency, \"EUR\", timestamp)\n new_trx.book_price_fee_btc = get_name_price(\n new_trx.fee_amount, new_trx.fee_currency, \"BTC\", timestamp)\n tag = Transaction.TRX_TAG_TRANSFER\n new_trx.icon = Transaction.TRX_ICON_TRANSFER\n\n # calculate book prices\n # number might be negative, make absolute\n new_trx.book_price_eur = abs(float(cb_trx[\"native_amount\"][\"amount\"]))\n new_trx.book_price_btc = get_name_price(\n new_trx.spent_amount, new_trx.spent_currency, \"BTC\", timestamp)\n\n new_trx.owner = account.owner\n new_trx.source_peer = account\n # TODO: get target address and query database for known addresses\n # If it exists, get the parent Peer for this address and set as target\n # new_trx.target_peer = None\n\n new_trx.save()\n new_trx.tags.add(TAG_COINBASE, tag)\n new_trx.save()\n\n\ndef process_buy_sell(cb_trx, timestamp, account: Account) -> Transaction:\n \"\"\"Process all Coinbase buys and sells\n\n Arguments:\n cb_trx {APIObject} -- the coinbase transaction to import\n timestamp {float} -- timestamp of last import from coinbase\n account {Account} -- the account this buy or sell originates from\n\n Raises:\n ValueError -- when resource is not \"buy\" or \"sell\"\n\n Returns:\n Transaction -- a Transaction object\n \"\"\"\n\n new_trx: Transaction = Transaction()\n new_trx.date = cb_trx[\"created_at\"]\n\n tag = \"None\"\n\n if cb_trx[\"resource\"] == \"buy\":\n new_trx.acquired_amount = float(cb_trx[\"amount\"][\"amount\"])\n new_trx.acquired_currency = cb_trx[\"amount\"][\"currency\"]\n\n new_trx.spent_amount = float(cb_trx[\"total\"][\"amount\"])\n new_trx.spent_currency = cb_trx[\"total\"][\"currency\"]\n new_trx.icon = Transaction.TRX_ICON_BUY\n tag = Transaction.TRX_TAG_BUY\n elif cb_trx[\"resource\"] == \"sell\":\n new_trx.acquired_amount = float(cb_trx[\"total\"][\"amount\"])\n new_trx.acquired_currency = cb_trx[\"total\"][\"currency\"]\n\n new_trx.spent_amount = float(cb_trx[\"amount\"][\"amount\"])\n new_trx.spent_currency = cb_trx[\"amount\"][\"currency\"]\n new_trx.icon = Transaction.TRX_ICON_SELL\n tag = Transaction.TRX_TAG_SELL\n else:\n raise ValueError(\"Type of transaction must either be buy or sell\")\n\n if new_trx.acquired_currency == \"BTC\":\n new_trx.book_price_btc = new_trx.acquired_amount\n else:\n new_trx.book_price_btc = get_name_price(new_trx.acquired_amount,\n new_trx.acquired_currency,\n \"BTC\", timestamp)\n\n new_trx.book_price_eur = abs(float(cb_trx[\"total\"][\"amount\"]))\n new_trx.book_price_btc = get_name_price(new_trx.book_price_eur, \"EUR\",\n \"BTC\", timestamp)\n\n new_trx.fee_amount = new_trx.book_price_fee_eur = abs(\n float(cb_trx[\"fees\"][0][\"amount\"][\"amount\"]))\n new_trx.fee_currency = cb_trx[\"fees\"][0][\"amount\"][\"currency\"]\n new_trx.book_price_fee_btc = get_name_price(new_trx.book_price_fee_eur,\n \"EUR\", \"BTC\", timestamp)\n\n new_trx.owner = account.owner\n new_trx.source_peer = account\n new_trx.target_peer = account\n new_trx.save()\n new_trx.tags.add(TAG_COINBASE, tag)\n new_trx.save()\n\n\ndef fetch_from_cb(what_to_fetch: str, cb_client: Client,\n cb_account_id: str) -> []:\n \"\"\"Fetch the specified data from Coinbase\n\n buys and sells: Merchant buyouts like FIAT -> BTC etc.\n transfers: Coin transfers from Coinbase to a wallet address\n\n Arguments:\n what_to_fetch {str} -- either \"buys\", \"sells\" or \"transfers\"\n cb_client {Client} -- coinbase client object\n cb_account_id {str} -- coinbase account id to use\n\n Returns:\n [] -- a list with the APIObjects from Coinbase\n \"\"\"\n\n the_list = []\n data = dict()\n next_uri = \"\"\n while next_uri != None:\n if what_to_fetch == \"buys\":\n ret = cb_client.get_buys(cb_account_id, **data)\n elif what_to_fetch == \"sells\":\n ret = cb_client.get_sells(cb_account_id, **data)\n elif what_to_fetch == \"transfers\":\n ret = cb_client.get_transactions(cb_account_id, **data)\n\n the_list.extend(ret[\"data\"])\n next_uri = ret.pagination[\"next_uri\"]\n if next_uri != None:\n data[\"starting_after\"] = ret[\"data\"][-1][\"id\"]\n return the_list\n\n\ndef update_coinbase_trx(account: Account):\n \"\"\"Synchronizes all transactions from Coinbase\"\"\"\n last_update_query = TransactionUpdateHistoryEntry.objects.filter(\n account=account).order_by('-date')\n latest_update = datetime.utcfromtimestamp(0).replace(tzinfo=timezone.utc)\n if last_update_query.count():\n latest_update = last_update_query[:1][0].date\n\n client: Client = Client(account.api_key, account.api_secret)\n cb_accounts = client.get_accounts()\n\n num_imports = 0\n\n for cb_account in cb_accounts[\"data\"]:\n if cb_account[\"type\"] == \"fiat\":\n continue\n\n # Unfortunately, the coinbase API only returns buys and sells\n # without the fee data when fetching through get_transactions.\n # For that reason we still have to use client.get_buys() and client.get_sells()\n # and cannot use the data returned from client.get_transactions()\n cb_transactions = fetch_from_cb(\"transfers\", client, cb_account[\"id\"])\n for cb_trx in cb_transactions:\n if cb_trx[\"type\"] == \"send\":\n date = parser.parse(cb_trx[\"created_at\"])\n if date <= latest_update:\n continue\n timestamp = time.mktime(date.timetuple())\n process_send(cb_trx, timestamp, account)\n num_imports += 1\n time.sleep(1) # sleep to prevent api spam\n\n buy_sell_list = []\n buy_sell_list.extend(fetch_from_cb(\"buys\", client, cb_account[\"id\"]))\n buy_sell_list.extend(fetch_from_cb(\"sells\", client, cb_account[\"id\"]))\n\n for buy_sell in buy_sell_list:\n if buy_sell[\"resource\"] == \"buy\" or buy_sell[\"resource\"] == \"sell\":\n if buy_sell[\"status\"] != \"completed\":\n # Skip everything not completed.\n # This could be created or canceled.\n continue\n\n date = parser.parse(buy_sell[\"created_at\"])\n if date <= latest_update:\n continue\n timestamp = time.mktime(date.timetuple())\n process_buy_sell(buy_sell, timestamp, account)\n num_imports += 1\n time.sleep(1) # sleep to prevent api spam\n\n entry: TransactionUpdateHistoryEntry = TransactionUpdateHistoryEntry(\n date=now(), account=account, fetched_transactions=num_imports)\n entry.save()\n\n print(\"Imported {} transactions\".format(num_imports))\n" }, { "alpha_fraction": 0.5190712809562683, "alphanum_fraction": 0.5771144032478333, "avg_line_length": 25.217391967773438, "blob_id": "017b61fb59c50090734180c5cff65ba04192990c", "content_id": "6d9f881ac08c2a4747b351dbe6f36e4524958a11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 603, "license_type": "no_license", "max_line_length": 65, "num_lines": 23, "path": "/backend/transactions/migrations/0005_auto_20180412_1710.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.3 on 2018-04-12 17:10\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('transactions', '0004_auto_20180408_1110'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='transaction',\n name='acquired_currency',\n field=models.CharField(default='---', max_length=10),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='spent_currency',\n field=models.CharField(default='---', max_length=10),\n ),\n ]\n" }, { "alpha_fraction": 0.6440389752388, "alphanum_fraction": 0.6590002179145813, "avg_line_length": 37.12975311279297, "blob_id": "89fe78145f3a52020110629d44d2a6623e9f863e", "content_id": "ab66f9f474b4d0f3158044aa7cc106646896c168", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17044, "license_type": "no_license", "max_line_length": 111, "num_lines": 447, "path": "/backend/accounts/tests/test_schema.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "import pytest\nimport ccxt\nfrom mixer.backend.django import mixer\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.test import RequestFactory\n\nfrom ...test_utils.utils import mock_resolve_info\n\nfrom backend.accounts.models import Account, CryptoAddress\nfrom .. import schema\n\n# We need to do this so that writing to the DB is possible in our tests.\npytestmark = pytest.mark.django_db\n\n\ndef test_account_type():\n instance = schema.AccountType()\n assert instance\n\n\ndef test_resolve_get_accounts_by_id():\n mixer.blend(\"accounts.Account\")\n mixer.blend(\"accounts.Account\")\n mixer.blend(\"accounts.Account\")\n query = schema.Query()\n res = query.resolve_get_account(None, **{\"id\": 1})\n assert res.id == 1, \"Should return account with id 1\"\n\n res = query.resolve_get_account(None, **{\"id\": 2})\n assert res.id == 2, \"Should return account with id 2\"\n\n with pytest.raises(ObjectDoesNotExist) as excinfo:\n res = query.resolve_get_account(None, **{\"id\": 5})\n\n\ndef test_resolve_get_account_by_name():\n mixer.blend(\"accounts.Account\", name=\"first\")\n mixer.blend(\"accounts.Account\", name=\"second\")\n mixer.blend(\"accounts.Account\", name=\"third\")\n\n query = schema.Query()\n res = query.resolve_get_account(None, **{\"name\": \"first\"})\n assert res.name == \"first\", \"Should return account with name \\\"first\\\"\"\n\n res = query.resolve_get_account(None, **{\"name\": \"third\"})\n assert res.name == \"third\", \"Should return account with name \\\"third\\\"\"\n\n with pytest.raises(ObjectDoesNotExist) as excinfo:\n res = query.resolve_get_account(None, **{\"name\": \"nonexistend\"})\n\n\ndef test_resolve_all_accounts():\n anonuser = AnonymousUser()\n usera = mixer.blend(\"auth.User\")\n userb = mixer.blend(\"auth.User\")\n\n req = RequestFactory().get(\"/\")\n req.user = AnonymousUser()\n resolveInfo = mock_resolve_info(req)\n\n mixer.blend(\"accounts.Account\", owner=usera)\n mixer.blend(\"accounts.Account\", owner=usera)\n\n mixer.blend(\"accounts.Account\", owner=userb)\n mixer.blend(\"accounts.Account\", owner=userb)\n mixer.blend(\"accounts.Account\", owner=userb)\n\n query = schema.Query()\n res = query.resolve_all_accounts(resolveInfo)\n assert res.count() == 0, \"User not logged in, should return 0 accounts\"\n\n req.user = usera\n res = query.resolve_all_accounts(resolveInfo)\n assert res.count() == 2, \"User A is logged in, should return 2 accounts\"\n\n req.user = userb\n res = query.resolve_all_accounts(resolveInfo)\n assert res.count() == 3, \"User B is logged in, should return 3 accounts\"\n\n\ndef test_resolve_get_crypto_addresses():\n # 1 Should not be able to anonymously get addresses\n # 2 Should not be able to get another users addresses\n # 3 Should return 0 if account does not exist\n # 4 Should return 0 if no peer id is passed in\n # 5 Should successfully receive addresses if conditions are met\n user_a = mixer.blend(\"auth.User\")\n user_b = mixer.blend(\"auth.User\")\n account_a: Account = mixer.blend(\"accounts.Account\", owner=user_a)\n account_b: Account = mixer.blend(\"accounts.Account\", owner=user_b)\n\n req = RequestFactory().get(\"/\")\n req.user = AnonymousUser()\n resolve_info = mock_resolve_info(req)\n\n mixer.blend(\"accounts.CryptoAddress\", peer=account_a)\n mixer.blend(\"accounts.CryptoAddress\", peer=account_a)\n mixer.blend(\"accounts.CryptoAddress\", peer=account_a)\n\n mixer.blend(\"accounts.CryptoAddress\", peer=account_b)\n mixer.blend(\"accounts.CryptoAddress\", peer=account_b)\n mixer.blend(\"accounts.CryptoAddress\", peer=account_b)\n\n query = schema.Query()\n res = query.resolve_get_crypto_addresses(resolve_info,\n **{\"peer_id\": account_a.id})\n assert res.count() == 0, \"User not logged in, should return 0 addresses\"\n\n req.user = user_b\n res = query.resolve_get_crypto_addresses(resolve_info,\n **{\"peer_id\": account_a.id})\n assert res.count() == 0, \"\"\"\n User b requests addresses for account of user a, should return no addresses\"\"\"\n\n req.user = user_a\n res = query.resolve_get_crypto_addresses(resolve_info, **{\"peer_id\": 15})\n assert res.count(\n ) == 0, \"\"\"Non existing peer, should return no addresses\"\"\"\n\n res = query.resolve_get_crypto_addresses(resolve_info, **{})\n assert res.count() == 0, \"No peer ID passed, should return Error\"\n\n res = query.resolve_get_crypto_addresses(resolve_info,\n **{\"peer_id\": account_a.id})\n assert res.count() == 3, \"Valid request should return 3 addresses\"\n\n\ndef test_resolve_supported_services():\n query = schema.Query()\n res = query.resolve_supported_services(None)\n assert len(res) > 0, \"Should return more than one service\"\n\n\ndef test_resolve_supported_symbols():\n query = schema.Query()\n\n req = RequestFactory().get(\"/\")\n req.user = AnonymousUser()\n resolveInfo = mock_resolve_info(req)\n\n res = query.resolve_supported_symbols(resolveInfo,\n **{\"service\": \"binance\"})\n assert len(res) == 0, \"User not logged in, should return 0 symbols\"\n\n req.user = mixer.blend(\"auth.User\")\n res = query.resolve_supported_symbols(resolveInfo,\n **{\"service\": \"binance\"})\n assert len(res) > 0, \"User logged in, should return at least one symbol\"\n\n\ndef test_create_account_mutation():\n mut = schema.CreateAccountMutation()\n\n data = {\n \"name\": \"test1\",\n \"service_type\": \"binance\",\n \"symbols\": '[\"ETH/BTC\", \"XLM/ETH\"]',\n \"api_key\": \"ateswg\",\n \"api_secret\": \"ssdge\"\n }\n\n req = RequestFactory().get(\"/\")\n # AnonymousUser() is equal to a not logged in user\n req.user = AnonymousUser()\n\n resolveInfo = mock_resolve_info(req)\n\n res = mut.mutate(None, resolveInfo, data)\n assert res.status == 403, \"Should return 403 if user is not logged in\"\n\n req.user = mixer.blend(\"auth.User\")\n res = mut.mutate(None, resolveInfo, {})\n assert res.status == 400, \"Should return 400 if there are form errors\"\n assert \"account\" in res.formErrors, \"Should have form error for account in field\"\n\n res = mut.mutate(None, resolveInfo, data)\n assert res.status == 200, 'Should return 200 if user is logged in and submits valid data'\n assert res.account.pk == 1, 'Should create new account'\n\n res = mut.mutate(None, resolveInfo, data)\n assert res.status == 422, 'Should return 422 if account with this name exists'\n\n\ndef test_edit_account_mutation():\n # 1 Should not be able to to edit accounts when unauthenticated (status 403)\n # 2 Should not be able to edit other users accounts (status 403)\n # 3 Should return error message when no id or wrong data type was supplied (status 400)\n # 4 Should return success message when update was successfuly started (status 200)\n mut = schema.EditAccountMutation()\n\n anonuser = AnonymousUser()\n usera = mixer.blend(\"auth.User\")\n userb = mixer.blend(\"auth.User\")\n\n req = RequestFactory().get(\"/\")\n req.user = AnonymousUser()\n resolve_info = mock_resolve_info(req)\n name_initial = \"test1\"\n name_updated = \"test2\"\n\n account: Account = mixer.blend(\n \"accounts.Account\",\n owner=usera,\n name=name_initial,\n service_type=\"binance\",\n symbols='[\"ETH/BTC\", \"XLM/ETH\"]',\n api_key=\"ateswg\",\n api_secret=\"ssdge\")\n\n data = {\n \"account_id\": account.pk,\n \"name\": name_updated,\n \"api_key\": \"1234\",\n \"api_secret\": \"5678\"\n }\n\n req = RequestFactory().get(\"/\")\n # AnonymousUser() is equal to a not logged in user\n req.user = AnonymousUser()\n\n resolve_info = mock_resolve_info(req)\n\n res = mut.mutate(None, resolve_info, data)\n account: Account = Account.objects.get(pk=account.pk)\n assert account.name == name_initial, \"Should not have edited name\"\n assert res.status == 403, \"Should return 403 if user is not logged in\"\n\n req.user = userb\n res = mut.mutate(None, resolve_info, data)\n account: Account = Account.objects.get(pk=account.pk)\n assert account.name == name_initial, \"Should not have edited name\"\n assert res.status == 403, \"Should return 403 if user is trying to modify another users account\"\n\n req.user = usera\n res = mut.mutate(\n None, resolve_info, {\n \"account_id\": 5,\n \"name\": name_updated,\n \"api_key\": \"1234\",\n \"api_secret\": \"5678\"\n })\n assert res.status == 422, \"Should return 422 if account does not exist\"\n\n res = mut.mutate(None, resolve_info, {})\n account: Account = Account.objects.get(pk=account.pk)\n assert account.name == name_initial, \"Should not have edited name\"\n assert res.status == 400, \"Should return 400 if there are form errors\"\n assert \"account\" in res.formErrors, \"Should have form error for account in field\"\n\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 200, 'Should return 200 if user is logged in and submits valid data'\n assert res.account.name == name_updated, 'Name should match'\n assert res.account.api_key == data[\"api_key\"], 'API Key should match'\n assert res.account.api_secret == data[\n \"api_secret\"], 'API secret should match'\n\n\ndef test_create_crypto_address_mutation():\n # 1 Should not be able to to trigger mutation when unauthenticated (status 403)\n # 2 Should return error when account does not exist (status 404)\n # 3 Should return error when account does not belong to the logged in user (status 403)\n # 4 Should return error when coin does not exist (status 404)\n # 5 Should return success message and address info when address was successfully added (status 200)\n # 6 Default value for watch should be false\n\n user_a = mixer.blend(\"auth.User\")\n user_b = mixer.blend(\"auth.User\")\n\n account_a: Account = mixer.blend(\"accounts.Account\", owner=user_a)\n mixer.blend(\"accounts.Account\", owner=user_b)\n\n coin_a = mixer.blend(\"coins.Coin\")\n\n mut = schema.CreateCryptoAddressMutation()\n req = RequestFactory().get(\"/\")\n req.user = AnonymousUser()\n resolve_info = mock_resolve_info(req)\n\n data = {\n \"account_id\": 199, # non existing account id\n \"address\": \"addr_a\",\n \"coin_id\": coin_a.id,\n \"client_mutation_id\": \"test\"\n }\n\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 403, \"\"\"\n Should not be able to to trigger mutation when unauthenticated\"\"\"\n assert res.client_mutation_id == \"test\"\n\n req.user = user_a\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 404, \"Should return error when account does not exist\"\n assert \"account_id\" in res.formErrors, \"\"\"\n Should return an error message containing 'account_id'\"\"\"\n assert res.client_mutation_id == \"test\"\n\n # User B tries to add an address to User A's account\n req.user = user_b\n data[\"account_id\"] = account_a.id\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 403, \"\"\"\n Should return error when account does not belong to the logged in user\"\"\"\n assert res.client_mutation_id == \"test\"\n\n req.user = user_a\n data[\"coin_id\"] = 199\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 404, \"Should return error when coin does not exist\"\n assert \"coin_id\" in res.formErrors, \"\"\"\n Should return an error message containing 'account_id'\"\"\"\n assert res.client_mutation_id == \"test\"\n\n data[\"coin_id\"] = coin_a.id\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 200, \"Should return success message when update was successfully started\"\n assert res.address is not None, \"Address must not be None\"\n assert res.client_mutation_id == \"test\"\n assert not res.address.watch, \"Default watch should be False\"\n\n data[\"watch\"] = True\n res = mut.mutate(None, resolve_info, data)\n assert res.address.watch, \"Watch should be True\"\n\n\ndef test_edit_crypto_address_mutation():\n # 1 Should not be able to to trigger mutation when unauthenticated (status 403)\n # 2 Should return error when address does not exist (status 404)\n # 3 Should return error when address does not belong to the logged in user (status 403)\n # 4 Should return error when coin does not exist (status 404)\n # 5 Should return success message and address info when address was\n # successfully edited (status 200)\n # 6 Default value for watch should be false\n\n user_a = mixer.blend(\"auth.User\")\n user_b = mixer.blend(\"auth.User\")\n\n account_a: Account = mixer.blend(\"accounts.Account\", owner=user_a)\n mixer.blend(\"accounts.Account\", owner=user_b)\n\n coin_a = mixer.blend(\"coins.Coin\")\n coin_b = mixer.blend(\"coins.Coin\")\n\n crypto_address_a: CryptoAddress = mixer.blend(\n \"accounts.CryptoAddress\", peer=account_a, coin_id=coin_b.id)\n\n mut = schema.EditCryptoAddressMutation()\n req = RequestFactory().get(\"/\")\n req.user = AnonymousUser()\n resolve_info = mock_resolve_info(req)\n\n data = {\n \"id\": 199, # non existing address\n \"address\": \"changed_addr\",\n \"coin_id\": coin_a.id,\n \"client_mutation_id\": \"test\",\n \"watch\": True\n }\n\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 403, \"\"\"\n Should not be able to to trigger mutation when unauthenticated\"\"\"\n assert res.client_mutation_id == \"test\"\n\n req.user = user_a\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 404, \"Should return error when address object does not exist\"\n assert \"id\" in res.formErrors, \"\"\"\n Should return an error message containing 'id'\"\"\"\n assert res.client_mutation_id == \"test\"\n\n # User B tries to edit an address of User A\n req.user = user_b\n data[\"id\"] = crypto_address_a.id\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 403, \"\"\"\n Should return error when address does not belong to the logged in user\"\"\"\n assert res.client_mutation_id == \"test\"\n\n req.user = user_a\n data[\"coin_id\"] = 199\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 404, \"Should return error when coin does not exist\"\n assert \"coin_id\" in res.formErrors, \"\"\"\n Should return an error message containing 'account_id'\"\"\"\n assert res.client_mutation_id == \"test\"\n\n data[\"coin_id\"] = coin_a.id\n res = mut.mutate(None, resolve_info, data)\n assert res.status == 200, \"Should return success message when update was successfully started\"\n assert res.address.address == \"changed_addr\", \"Address must be 'changed_addr'\"\n assert res.client_mutation_id == \"test\"\n assert res.address.coin.id == coin_a.id, \"Coin should be Coin A now\"\n assert res.address.watch, \"Watch should be True\"\n\n\ndef test_refresh_transactions_mutation(monkeypatch):\n # 1 Should not be able to to trigger mutation when unauthenticated (status 403)\n # 2 Should not be able to update other users accounts (status 403)\n # 3 Should return error message when no id or wrong data type was supplied (status 400)\n # 4 Should return success message when update was successfuly started (status 200)\n\n usera = mixer.blend(\"auth.User\")\n userb = mixer.blend(\"auth.User\")\n\n mixer.blend(\"accounts.Account\", owner=usera) # id 1\n mixer.blend(\"accounts.Account\", owner=userb) # id 2\n\n mut = schema.AccountRefreshTransactionsMutation()\n req = RequestFactory().get(\"/\")\n req.user = AnonymousUser()\n resolveInfo = mock_resolve_info(req)\n\n data = {\"account_id\": \"1\"}\n res = mut.mutate(None, resolveInfo, data)\n assert res.status == 403, 'Should not be able to to trigger mutation when unauthenticated (status 403)'\n\n req.user = userb\n res = mut.mutate(None, resolveInfo, data)\n assert res.status == 403, 'Should not be able to update other users accounts (status 403)'\n\n res = mut.mutate(None, resolveInfo, {})\n assert res.status == 400, 'Should return error status when supplied no input at all'\n\n data = {\"account_id\": \"a\"}\n res = mut.mutate(None, resolveInfo, data)\n assert res.status == 400, 'Should return error status when supplied incorrect input'\n\n data = {\"account_id\": \"-1\"}\n res = mut.mutate(None, resolveInfo, data)\n assert res.status == 400, 'Should return error status when supplied incorrect input'\n\n # TODO: Find reason why this won't work\n #\n ## This prints True:\n # print(\n # hasattr(backend.transactions.fetchers.generic_exchange,\n # \"update_exchange_trx_generic\"))\n #\n ## but the Lambda is never used\n #monkeypatch.setattr(backend.transactions.fetchers.generic_exchange,\n # \"update_exchange_trx_generic\",\n # new_update_exchange_trx_generic)\n #req.user = usera\n #res = mut.mutate(None, resolveInfo, data)\n #assert res.status == 200, 'Should return success message when update was successfuly started (status 200)'\n" }, { "alpha_fraction": 0.43122807145118713, "alphanum_fraction": 0.5905263423919678, "avg_line_length": 34.63750076293945, "blob_id": "7f55eebe959de74613d9cca8a5967bcc07249871", "content_id": "cb7471541dfd763040ed4df2bd4474fc4dc0e132", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2850, "license_type": "no_license", "max_line_length": 71, "num_lines": 80, "path": "/backend/utils/tests/test_utils.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "\"\"\"Contains all tests for the utility functions\"\"\"\n\nimport pytest\nfrom _pytest.monkeypatch import MonkeyPatch\nimport cryptocompare\n\nfrom ..utils import exchange_can_batch, get_name_price\n\n\ndef test_exchange_can_batch():\n assert exchange_can_batch(\"binance\") == False\n assert exchange_can_batch(\"cryptopia\") == True\n\n\npytestmark = pytest.mark.django_db\n\n\ndef new_get_historical_price(base, target, timestamp):\n \"\"\"\n Replaces calls to cryptocomapere.get_historical_price function\n\n Names | Date\t | timestamp |Rate\n -----------|------------|----------- |-----------\n BTC -> ETH | 2017-12-11\t| 1512950400 | 32.91\n BTC -> EUR | 2017-12-11\t| 1512950400 | 13006.11\n BTC -> EUR | 2018-01-05\t| 1514764800 | 12268.25\n XLM -> BTC | 2018-01-02\t| 1509753600 | 0.00003136\n LTC -> EUR | 2017-11-07\t| 1515110400 | 48.52\n BNB -> BTC | 2017-12-28\t| 1514419200 | 0.0006253\n \"\"\"\n\n if base == \"BTC\" and target == \"ETH\" and timestamp == 1512950400:\n return {\"BTC\": {\"ETH\": 32.91}}\n elif base == \"BTC\" and target == \"EUR\" and timestamp == 1512950400:\n return {\"BTC\": {\"EUR\": 13006.11}}\n elif base == \"BTC\" and target == \"EUR\" and timestamp == 1514764800:\n return {\"BTC\": {\"EUR\": 12268.25}}\n elif base == \"XLM\" and target == \"BTC\" and timestamp == 1509753600:\n return {\"XLM\": {\"BTC\": 0.00003136}}\n elif base == \"LTC\" and target == \"EUR\" and timestamp == 1515110400:\n return {\"LTC\": {\"EUR\": 48.52}}\n elif base == \"BNB\" and target == \"BTC\" and timestamp == 1514419200:\n return {\"BNB\": {\"BTC\": 0.0006253}}\n\n return {} # fail since there is no data for this request\n\n\ndef test_name_converter(monkeypatch: MonkeyPatch):\n \"\"\"\n Tests the conversion of one name to another at a specific date\n\n Amount\t | To | Date\t | Result\n ---------|-----|------------|---------------\n 5\t BTC | ETH | 2017-12-11 | 164.55 ETH\n 1\t BTC | EUR | 2017-12-11 | 13006.11 EUR\n 0.1\t BTC | EUR | 2018-01-05 | 1226.825 EUR\n 1500 XLM | BTC | 2018-01-02 | 0.04704 BTC\n 5\t LTC | EUR | 2017-11-07 | 242.6 EUR\n 300\t BNB | BTC | 2017-12-28 | 0.18759 BTC\n \"\"\"\n monkeypatch.setattr(cryptocompare, \"get_historical_price\",\n new_get_historical_price)\n\n result = get_name_price(5, \"BTC\", \"ETH\", 1512950400)\n assert round(result, 2) == 164.55\n\n result = get_name_price(1, \"BTC\", \"EUR\", 1512950400)\n assert round(result, 2) == 13006.11\n\n result = get_name_price(0.1, \"BTC\", \"EUR\", 1514764800)\n assert round(result, 3) == 1226.825\n\n result = get_name_price(1500, \"XLM\", \"BTC\", 1509753600)\n assert round(result, 5) == 0.04704\n\n result = get_name_price(5, \"LTC\", \"EUR\", 1515110400)\n assert round(result, 1) == 242.6\n\n result = get_name_price(300, \"BNB\", \"BTC\", 1514419200)\n assert round(result, 6) == 0.18759" }, { "alpha_fraction": 0.8359788656234741, "alphanum_fraction": 0.8359788656234741, "avg_line_length": 30.5, "blob_id": "a94e078f3ea6049254fcf1bb439a7faa6fa4dafb", "content_id": "cc6401df50aed986bf04f3969c0e0534a17ecaa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "no_license", "max_line_length": 64, "num_lines": 6, "path": "/backend/accounts/admin.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom backend.accounts.models import CryptoAddress, Peer, Account\n\nadmin.site.register(CryptoAddress)\nadmin.site.register(Peer)\nadmin.site.register(Account)\n" }, { "alpha_fraction": 0.6187499761581421, "alphanum_fraction": 0.6298295259475708, "avg_line_length": 38.550559997558594, "blob_id": "aed2dd7fb87561e8b62e9b6f0d6e2cd48824728d", "content_id": "2e755dc3d0fd24586f4dccc5ba7cdca25dfed958", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3520, "license_type": "no_license", "max_line_length": 83, "num_lines": 89, "path": "/backend/transactions/tests/test_importer_livecoin.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "\"\"\"Contains all tests for the generic exchange fetcher\"\"\"\nimport random\nimport pytest\nfrom _pytest.monkeypatch import MonkeyPatch\nfrom mixer.backend.django import mixer\nfrom faker import Faker\nimport cryptocompare\n\nimport backend.transactions.schema as schema\nfrom backend.accounts.models import Account\n\nfrom backend.transactions.importers.livecoin import import_data_livecoin\n\npytestmark = pytest.mark.django_db\n\n\ndef make_fake_transaction_data(date=None,\n transaction_type=None,\n transaction_type_raw=None,\n spent_currency=None,\n spent_amount=None,\n source_peer=None,\n acquired_currency=None,\n acquired_amount=None,\n target_peer=None,\n fee_currency=None,\n fee_amount=None,\n tags=None):\n \"\"\"Generate a fake transaction data input. Mixer is unable to blend these\"\"\"\n # pylint: disable=E1101\n\n fake = Faker()\n transaction_data = schema.TransactionData()\n transaction_data.date = date or fake.date_time_between(\n start_date=\"-30y\", end_date=\"now\",\n tzinfo=None).strftime(\"%d.%m.%Y %H:%m:%S\")\n transaction_data.transaction_type = transaction_type or \"exchange\"\n transaction_data.transaction_type_raw = transaction_type_raw or \"Buy\"\n transaction_data.spent_currency = \\\n spent_currency or fake.cryptocurrency_code()\n transaction_data.spent_amount = spent_amount or random.uniform(1, 20)\n transaction_data.source_peer = source_peer or 1\n transaction_data.acquired_currency = \\\n acquired_currency or fake.cryptocurrency_code()\n transaction_data.acquired_amount = \\\n acquired_amount or random.uniform(0.001, 10)\n transaction_data.target_peer = target_peer or 1\n transaction_data.fee_currency = fee_currency or transaction_data.spent_currency\n transaction_data.fee_amount = fee_amount or random.uniform(0.000001, 0.001)\n transaction_data.tags = tags or [\"tag1\", \"tag2\"]\n return transaction_data\n\n\ndef new_get_historical_price(base, target, date):\n \"\"\"Fake crypto compare API\"\"\"\n return {base: {target: 10}}\n\n\ndef test_import_csv_livecoin(monkeypatch: MonkeyPatch):\n user = mixer.blend(\"auth.User\")\n livecoin: Account = mixer.blend(\n \"accounts.Account\", owner=user, service_type=\"livecoin\")\n\n monkeypatch.setattr(cryptocompare, \"get_historical_price\",\n new_get_historical_price)\n\n data = schema.ImportTransactionInput()\n data.service_type = \"livecoin\"\n data.import_mechanism = \"csv\"\n data.transactions = [\n make_fake_transaction_data(),\n make_fake_transaction_data(),\n make_fake_transaction_data(\n transaction_type_raw=\"Deposit\"), # should be skipped\n make_fake_transaction_data(\n acquired_amount=10, acquired_currency=\"ETH\"),\n make_fake_transaction_data(transaction_type=\"income\"),\n make_fake_transaction_data(transaction_type=\"transfer\"),\n make_fake_transaction_data(\n spent_amount=0,\n spent_currency=\"h\",\n acquired_amount=0.01,\n acquired_currency=\"BTC\",\n transaction_type=\"income\"),\n make_fake_transaction_data(transaction_type=\"unkown type\")\n ]\n\n res = import_data_livecoin(data, user)\n assert len(res) == 7\n" }, { "alpha_fraction": 0.6316964030265808, "alphanum_fraction": 0.6370535492897034, "avg_line_length": 34, "blob_id": "21f7bd696be7b98552606d80b4365e141d279542", "content_id": "51eb5aaccb84b7d767479ee42a693b5c89d3f83b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2240, "license_type": "no_license", "max_line_length": 79, "num_lines": 64, "path": "/backend/coins/schema.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "'''Contains all Graphql code for the coins application'''\n\nimport graphene\nimport celery\n\nfrom graphene_django.types import DjangoObjectType\n\nfrom backend.coins.models import Coin\nfrom backend.coins.tasks import async_update_supported_coins\n\n\nclass CoinType(DjangoObjectType):\n '''The coin GraphQL type'''\n\n class Meta:\n '''The connection between the ype and the model'''\n model = Coin\n\n\nclass Query(object):\n '''Get all coins where user has access rights'''\n all_coins = graphene.List(CoinType)\n\n def resolve_all_coins(self, info):\n '''Returns all available coins'''\n if not info.context.user.is_authenticated:\n return Coin.objects.none()\n return Coin.objects.all()\n\n\nclass CoinRefreshTransactionsMutation(graphene.relay.ClientIDMutation):\n '''GraphQL Mutation for refreshing supported coins'''\n status = graphene.Int()\n formErrors = graphene.String()\n msg = graphene.String()\n\n @classmethod\n def mutate(cls, root, info, input) -> \"CoinRefreshTransactionsMutation\":\n '''Runs the celery background task to update the coins'''\n if not info.context.user.is_superuser:\n return CoinRefreshTransactionsMutation(\n status=403, client_mutation_id=input['client_mutation_id'])\n\n if hasattr(celery, \"result\") and celery.result.AsyncResult(\n \"task_update_coins\").status == \"RUNNING\":\n print(\"skipping task\")\n return CoinRefreshTransactionsMutation(\n msg=\"Task is already running\",\n status=202,\n client_mutation_id=input['client_mutation_id'])\n else:\n try:\n print(\"starting task\")\n async_update_supported_coins.apply_async(\n task_id=\"task_update_coins\")\n except async_update_supported_coins.OperationalError as err:\n print(\"Sending task raised: %r\", err)\n return CoinRefreshTransactionsMutation(\n status=500, client_mutation_id=input['client_mutation_id'])\n\n return CoinRefreshTransactionsMutation(\n msg=\"Working\",\n status=200,\n client_mutation_id=input['client_mutation_id'])\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6717171669006348, "avg_line_length": 27.285715103149414, "blob_id": "1e1b7b9d15e1ccc8711d1e5a58bddc2194b9a1d1", "content_id": "6fc06fc3fc2cf59fa38f219bc788dbe8324f842d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 594, "license_type": "no_license", "max_line_length": 66, "num_lines": 21, "path": "/backend/coins/tests/test_models.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "'''Contains all model tests for this application'''\nimport pytest\nfrom mixer.backend.django import mixer\nfrom backend.coins.models import Coin\n\npytestmark = pytest.mark.django_db\n\n\ndef test_coin_creation():\n '''Test Coin object creation'''\n obj = mixer.blend(\"coins.Coin\")\n assert obj.pk > 0, \"Should create a Coin instance\"\n\n\ndef test_coin_str_func():\n '''Test Coin object string function'''\n name = \"BTC - Bitcoin\"\n coin: Coin = mixer.blend(\n \"coins.Coin\", cc_id=50, symbol=\"BTC\", full_name=\"Bitcoin\")\n\n assert coin.__str__() == name, \"Should be the coins's name\"\n" }, { "alpha_fraction": 0.5549715161323547, "alphanum_fraction": 0.5803766846656799, "avg_line_length": 34.671875, "blob_id": "1909e5c0a4b8b54c1cd45e8323557ba8f14ed6e9", "content_id": "ffc091cb5734ffb124909851f8e07f8cf78e0833", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2283, "license_type": "no_license", "max_line_length": 162, "num_lines": 64, "path": "/backend/transactions/migrations/0002_auto_20180325_1753.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.3 on 2018-04-01 17:15\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport taggit.managers\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('taggit', '0002_auto_20150616_2121'),\n ('accounts', '0003_peer_class_type'),\n ('transactions', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='transaction',\n options={'ordering': ('-date',)},\n ),\n migrations.RemoveField(\n model_name='transaction',\n name='source_account',\n ),\n migrations.RemoveField(\n model_name='transaction',\n name='target_account',\n ),\n migrations.AddField(\n model_name='transaction',\n name='source_peer',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, related_name='source_peer', to='accounts.Peer'),\n ),\n migrations.AddField(\n model_name='transaction',\n name='tags',\n field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),\n ),\n migrations.AddField(\n model_name='transaction',\n name='target_peer',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, related_name='target_peer', to='accounts.Peer'),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='book_price_fee_btc',\n field=models.DecimalField(decimal_places=10, default=0, max_digits=19),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='book_price_fee_eur',\n field=models.DecimalField(decimal_places=10, default=0, max_digits=19),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='fee_amount',\n field=models.DecimalField(decimal_places=10, default=0, max_digits=19),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='fee_currency',\n field=models.CharField(default='---', max_length=10),\n ),\n ]\n" }, { "alpha_fraction": 0.5465393662452698, "alphanum_fraction": 0.5620524883270264, "avg_line_length": 28.40350914001465, "blob_id": "730cf8589ee480099682465a3e4863ebb34ebf85", "content_id": "ea134f4612b04742231679539f8fe36f825bb2b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1676, "license_type": "no_license", "max_line_length": 75, "num_lines": 57, "path": "/backend/coins/tasks.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "\"\"\"Contains all async tasks necessary for Accounts\"\"\"\n\nfrom __future__ import absolute_import, unicode_literals\nimport cryptocompare\n\nfrom backend.celery import app\nfrom backend.coins.models import Coin\n\n\[email protected](bind=True)\ndef async_update_supported_coins(self):\n \"\"\"Starts a celery async task to update supported coins\"\"\"\n self.update_state(state='RUNNING', meta={'current': 0, 'total': 100})\n coins_list = cryptocompare.get_coin_list(False)\n new_coins = 0\n updated = 0\n length = len(coins_list)\n\n print_counter = 0\n for idx, coin_key in enumerate(coins_list):\n item = coins_list.get(coin_key)\n\n try:\n _id = int(item.get(\"Id\"))\n except ValueError:\n continue\n\n try:\n coin: Coin = Coin.objects.get(cc_id=_id)\n updated += 1\n except Coin.DoesNotExist:\n coin = Coin()\n coin.cc_id = _id\n new_coins += 1\n\n coin.img_url = item.get('ImageUrl', '')\n coin.name = item.get('Name', '')\n coin.symbol = item.get('Symbol', '')\n coin.coin_name = item.get('CoinName', '')\n coin.full_name = item.get('FullName', '')\n coin.save()\n\n percent_done = int((idx + 1) / length * 100)\n\n self.update_state(\n state='RUNNING', meta={\n 'current': percent_done,\n 'total': 100\n })\n\n print_counter += 1\n if print_counter is 30:\n print(\"Status: {}%\".format(percent_done))\n print_counter = 0\n\n print(\"new: {} updated: {}\".format(new_coins, updated))\n self.update_state(state='SUCCESS', meta={'current': 100, 'total': 100})\n" }, { "alpha_fraction": 0.676070511341095, "alphanum_fraction": 0.696221649646759, "avg_line_length": 31.016128540039062, "blob_id": "176f046d034a970cbc206ee8c7a574c43b1da303", "content_id": "e5b3f0ccd2d31dbbfab08af6192d959b6e1211de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1985, "license_type": "no_license", "max_line_length": 72, "num_lines": 62, "path": "/backend/transactions/tests/test_models.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "import pytest\nfrom django.utils.timezone import now\nfrom mixer.backend.django import mixer\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom backend.accounts.models import Account\nfrom backend.transactions.models import Transaction\nfrom backend.transactions.models import TransactionUpdateHistoryEntry\nfrom backend.test_utils.utils import gen_fake_transaction\n\n# We need to do this so that writing to the DB is possible in our tests.\npytestmark = pytest.mark.django_db\n\n# Great introduction to TDD with Python + Django:\n# https://www.youtube.com/watch?v=41ek3VNx_6Q\n\n\ndef test_transaction_creation():\n obj = gen_fake_transaction()\n assert obj.pk > 0, \"Should create an Transaction instance\"\n\n\ndef test_transaction_str_func():\n name = \"50.0 BTC => 150.01 ETH ==> 300.0 EUR\"\n t = gen_fake_transaction(\n spent_amount=50.0000,\n spent_currency=\"BTC\",\n acquired_amount=150.0100,\n acquired_currency=\"ETH\",\n book_price_eur=300)\n\n assert t.__str__() == name, \"Should be the transaction's name\"\n\n\ndef test_transaction_history_entry_creation():\n account: Account = mixer.blend(\"accounts.Account\")\n obj = mixer.blend(\n \"transactions.TransactionUpdateHistoryEntry\", account=account)\n assert obj.pk > 0, \"Should create an Transaction instance\"\n\n\ndef test_transaction_history_entry_str_func():\n account: Account = mixer.blend(\"accounts.Account\")\n\n datea = now()\n dateb = now()\n\n entrya: TransactionUpdateHistoryEntry = mixer.blend(\n \"transactions.TransactionUpdateHistoryEntry\",\n date=datea,\n account=account,\n fetched_transactions=3)\n entryb: TransactionUpdateHistoryEntry = mixer.blend(\n \"transactions.TransactionUpdateHistoryEntry\",\n date=dateb,\n account=account,\n fetched_transactions=6)\n\n namea = \"{} {} {}\".format(1, datea, 3)\n nameb = \"{} {} {}\".format(1, dateb, 6)\n\n assert entrya.__str__() == namea\n assert entryb.__str__() == nameb\n" }, { "alpha_fraction": 0.732891857624054, "alphanum_fraction": 0.7373068332672119, "avg_line_length": 36.75, "blob_id": "6e6015e76732eb7e25e7ed67a364872b122cc413", "content_id": "e4f702be7602d732029eaa78dccf6b175f2dbd50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 906, "license_type": "no_license", "max_line_length": 86, "num_lines": 24, "path": "/backend/accounts/tasks.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "\"\"\"Contains all async tasks necessary for Accounts\"\"\"\n\nfrom __future__ import absolute_import, unicode_literals\n\nfrom backend.celery import app\nfrom backend.accounts.models import Account\n\nfrom backend.transactions.fetchers.generic_exchange import update_exchange_trx_generic\nfrom backend.transactions.fetchers.coinbase import update_coinbase_trx\n\n\[email protected](bind=True)\ndef async_update_account_trx(self, account_id):\n \"\"\"Starts a celery async task to update transaction for an account\"\"\"\n\n account: Account = Account.objects.get(pk=account_id)\n print(\"Starting task update transactions for account: \", account.name)\n\n self.update_state(state='RUNNING', meta={'current': 0, 'total': 3})\n if account.service_type == \"coinbase\":\n update_coinbase_trx(account)\n else:\n update_exchange_trx_generic(account)\n self.update_state(state='SUCCESS', meta={'current': 3, 'total': 3})\n" }, { "alpha_fraction": 0.6167800426483154, "alphanum_fraction": 0.6240362524986267, "avg_line_length": 34.564517974853516, "blob_id": "0251e85f96ac5bc5bc7f631f168804fbd116e66e", "content_id": "02d3778cf79f71beeebc39f8c4829ff4d72c356c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2205, "license_type": "no_license", "max_line_length": 79, "num_lines": 62, "path": "/backend/test_utils/utils.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "import pytest\n\nimport random\nfrom faker import Faker\nfrom mixer.backend.django import mixer\n\nfrom django.utils import timezone\n\nfrom graphql.execution.base import ResolveInfo\n\nfrom backend.transactions.models import Transaction\n\npytestmark = pytest.mark.django_db\n\n\ndef mock_resolve_info(req) -> ResolveInfo:\n return ResolveInfo(None, None, None, None, None, None, None, None, None,\n req)\n\n\ndef gen_fake_transaction(owner=None,\n date=None,\n spent_currency=None,\n spent_amount=None,\n source_peer=None,\n acquired_currency=None,\n acquired_amount=None,\n target_peer=None,\n fee_currency=None,\n fee_amount=None,\n book_price_btc=None,\n book_price_eur=None,\n tags=None) -> Transaction:\n \"\"\"Generate a fake Transaction. Mixer cannot handle this class\"\"\"\n fake = Faker()\n transaction = Transaction()\n transaction.owner = owner or mixer.blend(\"auth.User\")\n transaction.date = date or timezone.make_aware(\n fake.date_time_between(start_date=\"-30y\", end_date=\"now\", tzinfo=None))\n\n transaction.spent_currency = spent_currency or fake.cryptocurrency_code()\n transaction.spent_amount = spent_amount or random.uniform(1, 20)\n transaction.source_peer = source_peer or mixer.blend(\"accounts.Peer\")\n\n transaction.acquired_currency = \\\n acquired_currency or fake.cryptocurrency_code()\n transaction.acquired_amount = acquired_amount or random.uniform(1, 20)\n transaction.target_peer = target_peer or mixer.blend(\"accounts.Peer\")\n\n transaction.fee_currency = fee_currency or fake.cryptocurrency_code()\n transaction.fee_amount = fee_amount or random.uniform(0, 1)\n transaction.book_price_btc = book_price_btc or random.uniform(0, 20)\n transaction.book_price_eur = book_price_eur or random.uniform(0, 50)\n\n transaction.save()\n\n if tags:\n for tag in tags:\n transaction.tags.add(tag)\n transaction.save()\n\n return transaction\n" }, { "alpha_fraction": 0.37911438941955566, "alphanum_fraction": 0.40715867280960083, "avg_line_length": 30.95754623413086, "blob_id": "ffa6b6686a423d98fa0bc6273df22413e666c3b3", "content_id": "6a8924baf18654919ff1df74cbe8090d35c744d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13550, "license_type": "no_license", "max_line_length": 97, "num_lines": 424, "path": "/backend/transactions/tests/test_fetcher_coinbase.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "from datetime import datetime, timedelta\nimport json\nimport pytest\nfrom django.utils.timezone import now\nfrom _pytest.monkeypatch import MonkeyPatch\nfrom mixer.backend.django import mixer\nimport coinbase\nimport cryptocompare\n\nfrom backend.accounts.models import Account\nfrom backend.transactions.models import Transaction\n\nfrom ..fetchers.coinbase import update_coinbase_trx\n\npytestmark = pytest.mark.django_db\n\n\nclass MockAPIObject(dict):\n \"\"\"Mock of coinbase APIObject\"\"\"\n\n def __init__(self, pagination=None, data=None):\n self.__pagination = pagination or {\"next_uri\": None}\n self[\"data\"] = data or []\n super(MockAPIObject, self).__init__()\n\n @property\n def pagination(self):\n \"\"\"Return the pagination data\"\"\"\n return self.__pagination\n\n\ndef new_get_accounts(self):\n \"\"\"Fake coinbase get accounts for user\"\"\"\n return MockAPIObject(data=[{\n \"id\": \"fiat_id\",\n \"type\": \"fiat\"\n }, \n {\n \"id\": \"wallet_id_btc\",\n \"type\": \"wallet\"\n }, \n {\n \"id\": \"wallet_id_ltc\",\n \"type\": \"wallet\"\n }])\n\n\ndef new_get_buys(self, cb_account_id):\n \"\"\"Fake get buys for account\"\"\"\n if cb_account_id == \"wallet_id_btc\":\n return MockAPIObject(data=[\n {\n \"created_at\": \"2017-12-27T15:16:22Z\",\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 0.04,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 300,\n \"currency\": \"EUR\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 4.4,\n \"currency\": \"EUR\"\n }\n }]\n },\n {\n \"created_at\": \"2017-12-27T15:16:22Z\",\n \"resource\": \"buy\",\n # should be skipped since it was canceled\n \"status\": \"canceled\"\n },\n {\n \"created_at\": \"2018-01-28T13:11:35Z\",\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 0.05,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 350,\n \"currency\": \"EUR\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 4.50,\n \"currency\": \"EUR\"\n }\n }]\n },\n {\n \"created_at\": \"2018-01-28T13:11:35Z\",\n # should be skipped and not end up in the database (neither sell nor buy)\n # and it's status is canceled\n \"resource\": \"should be skipped\",\n \"status\": \"canceled\",\n \"amount\": {\n \"amount\": 0.05,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 350,\n \"currency\": \"EUR\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 4.50,\n \"currency\": \"EUR\"\n }\n }]\n }\n ])\n elif cb_account_id == \"wallet_id_ltc\":\n return MockAPIObject(\n data=[{\n \"created_at\": \"2018-01-22T12:26:35Z\",\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 2.2,\n \"currency\": \"LTC\"\n },\n \"total\": {\n \"amount\": 260,\n \"currency\": \"EUR\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 5,\n \"currency\": \"EUR\"\n }\n }]\n }, {\n \"created_at\": \"2018-01-22T11:04:01Z\",\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 1.4,\n \"currency\": \"LTC\"\n },\n \"total\": {\n \"amount\": 100,\n \"currency\": \"EUR\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 3,\n \"currency\": \"EUR\"\n }\n }]\n }])\n else:\n return MockAPIObject()\n\n\ndef new_get_sells(self, cb_account_id):\n \"\"\"Fake get sells for account\"\"\"\n if cb_account_id == \"wallet_id_btc\":\n return MockAPIObject(\n data=[{\n \"created_at\": \"2018-01-25T11:24:52Z\",\n \"resource\": \"sell\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 0.06,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 800,\n \"currency\": \"EUR\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 7,\n \"currency\": \"EUR\"\n }\n }]\n }])\n elif cb_account_id == \"wallet_id_ltc\":\n return MockAPIObject(\n data=[{\n \"created_at\": \"2018-01-23T07:23:54Z\",\n \"resource\": \"sell\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 0.3,\n \"currency\": \"LTC\"\n },\n \"total\": {\n \"amount\": 80,\n \"currency\": \"EUR\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 2,\n \"currency\": \"EUR\"\n }\n }]\n }])\n else:\n return MockAPIObject()\n\n\ndef new_get_transactions(self, cb_account_id):\n \"\"\"Fake get transactions for account\"\"\"\n if cb_account_id == \"wallet_id_ltc\":\n return MockAPIObject(data=[{\n \"id\": \"12234-6666-8888-0000-1111111111\",\n \"type\": \"send\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": \"-0.2\",\n \"currency\": \"LTC\"\n },\n \"native_amount\": {\n \"amount\": \"-46.00\",\n \"currency\": \"EUR\"\n },\n \"description\": None,\n \"created_at\": \"2017-12-15T15:00:00Z\",\n \"updated_at\": \"2017-12-15T15:00:00Z\",\n \"resource\": \"transaction\",\n \"network\": {\n \"status\": \"confirmed\",\n \"hash\": \"123456789\",\n \"transaction_fee\": {\n \"amount\": \"0.001\",\n \"currency\": \"LTC\"\n },\n \"transaction_amount\": {\n \"amount\": \"0.199\",\n \"currency\": \"LTC\"\n },\n \"confirmations\": 54000\n },\n \"to\": {\n \"resource\": \"litecoin_address\",\n \"address\": \"LcnAddress1\",\n \"currency\": \"LTC\"\n },\n \"details\": {\n \"title\": \"Sent Litecoin\",\n \"subtitle\": \"To Litecoin address\"\n }\n }, \n {\n \"id\": \"aaaaaaaaa-aaaa-aaaaaa-eeee-aaaaaa\",\n \"type\": \"send\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": \"-0.4\",\n \"currency\": \"LTC\"\n },\n \"native_amount\": {\n \"amount\": \"-90.00\",\n \"currency\": \"EUR\"\n },\n \"description\": None,\n \"created_at\": \"2017-12-11T19:00:00Z\",\n \"updated_at\": \"2017-12-11T19:00:00Z\",\n \"resource\": \"transaction\",\n \"instant_exchange\": False,\n \"network\": {\n \"status\": \"confirmed\",\n \"hash\": \"123456789\",\n \"transaction_fee\": {\n \"amount\": \"0.001\",\n \"currency\": \"LTC\"\n },\n \"transaction_amount\": {\n \"amount\": \"0.399\",\n \"currency\": \"LTC\"\n },\n \"confirmations\": 15387\n },\n \"to\": {\n \"resource\": \"litecoin_address\",\n \"address\": \"LcnAddress2\",\n \"currency\": \"LTC\"\n },\n \"details\": {\n \"title\": \"Sent Litecoin\",\n \"subtitle\": \"To Litecoin address\"\n }\n }, \n {\n \"id\": \"aaaaaaaaa-aaaa-aaaaaa-eeee-aaaaaa\",\n \"type\": \"send\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": \"1.0\",\n \"currency\": \"LTC\"\n },\n \"native_amount\": {\n \"amount\": \"90.00\",\n \"currency\": \"EUR\"\n },\n \"description\": None,\n \"created_at\": \"2017-12-11T19:00:00Z\",\n \"updated_at\": \"2017-12-11T19:00:00Z\",\n \"resource\": \"transaction\",\n \"instant_exchange\": False,\n \"network\": {\n \"status\": \"off_blockchain\",\n },\n }])\n else:\n return MockAPIObject()\n\n\ndef new_get_historical_price(base, target, date):\n \"\"\"Fake crypto compare API\"\"\"\n if base == \"BTC\" and target == \"EUR\":\n return {\"BTC\": {\"EUR\": 10000}}\n elif base == \"EUR\" and target == \"BTC\":\n return {\"EUR\": {\"BTC\": 0.00012}}\n elif base == \"LTC\" and target == \"BTC\":\n return {\"LTC\": {\"BTC\": 0.02}}\n elif base == \"LTC\" and target == \"EUR\":\n return {\"LTC\": {\"EUR\": 250}}\n\n\ndef test_refresh_coinbase_trx(monkeypatch: MonkeyPatch):\n \"\"\"Test import coinbase transactions\"\"\"\n user = mixer.blend(\"auth.User\")\n account: Account = mixer.blend(\n \"accounts.Account\", owner=user, service_type=\"coinbase\", api_key=\"123\", api_secret=\"456\")\n\n monkeypatch.setattr(cryptocompare, \"get_historical_price\",\n new_get_historical_price)\n monkeypatch.setattr(coinbase.wallet.client.Client, \"get_accounts\",\n new_get_accounts)\n monkeypatch.setattr(coinbase.wallet.client.Client, \"get_transactions\",\n new_get_transactions)\n monkeypatch.setattr(coinbase.wallet.client.Client, \"get_buys\",\n new_get_buys)\n monkeypatch.setattr(coinbase.wallet.client.Client, \"get_sells\",\n new_get_sells)\n\n update_coinbase_trx(account)\n transaction = Transaction.objects.filter(target_peer=account)\n assert transaction.count() == 9, \"Should import nine transations\"\n\n\ndef new_get_buys_transaction_history(self, cb_account):\n \"\"\"Fake coinbase get buys transation history\"\"\"\n date: datetime = now()\n if cb_account == \"wallet_id_btc\":\n return MockAPIObject(\n data=[{\n \"created_at\": str(date + timedelta(days=-1)),\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 10,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 10,\n \"currency\": \"BTC\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 1,\n \"currency\": \"EUR\"\n }\n }]\n }, {\n \"created_at\": str(date + timedelta(days=1)),\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 5,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 5,\n \"currency\": \"BTC\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 0.5,\n \"currency\": \"EUR\"\n }\n }]\n }])\n else:\n return MockAPIObject()\n\n\ndef test_update_trx_coinbase_transaction_history(monkeypatch: MonkeyPatch):\n \"\"\" Test, that the update function does not import \"\"\"\n user = mixer.blend(\"auth.User\")\n account: Account = mixer.blend(\n \"accounts.Account\", owner=user, service_type=\"coinbase\", api_key=\"123\", api_secret=\"456\")\n\n date: datetime = now()\n\n monkeypatch.setattr(cryptocompare, \"get_historical_price\",\n new_get_historical_price)\n monkeypatch.setattr(coinbase.wallet.client.Client, \"get_accounts\",\n new_get_accounts)\n monkeypatch.setattr(coinbase.wallet.client.Client, \"get_transactions\",\n new_get_transactions)\n monkeypatch.setattr(coinbase.wallet.client.Client, \"get_buys\",\n new_get_buys_transaction_history)\n monkeypatch.setattr(coinbase.wallet.client.Client, \"get_sells\",\n lambda self, cb_account: MockAPIObject())\n\n mixer.blend(\n \"transactions.TransactionUpdateHistoryEntry\",\n date=date,\n account=account,\n fetched_transactions=3)\n\n update_coinbase_trx(account)\n transaction = Transaction.objects.filter(target_peer=account)\n assert transaction.count(\n ) == 1, \"Should not import transactions older than last update time\"\n" }, { "alpha_fraction": 0.7288590669631958, "alphanum_fraction": 0.7315436005592346, "avg_line_length": 32.8636360168457, "blob_id": "15962b1df2aa6f91a3b2c2805fe735fc2a9df0e3", "content_id": "dcd002963f840197c2792ad1888a1e47af896298", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 745, "license_type": "no_license", "max_line_length": 82, "num_lines": 22, "path": "/backend/accounts/tests/test_tasks.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "import pytest\nfrom ...celery import app as celery_app\nfrom ..tasks import async_update_account_trx\nfrom mixer.backend.django import mixer\nfrom backend.accounts.models import Account\n\npytestmark = pytest.mark.django_db\n\n\ndef new_update_exchange_trx_generic(account_id):\n return \"running ...\"\n\n\n# TODO: Write a working test, update_exchange_trx_generic is not patched correctly\n@celery_app.task\ndef test_async_update_exchange_trx_generic(monkeypatch):\n obj: Account = mixer.blend(\"accounts.Account\")\n monkeypatch.setattr(\"backend.transactions.fetchers.generic_exchange\",\n async_update_account_trx)\n assert True\n # assert async_update_exchange_trx_generic.delay(\n # obj.id).get(timeout=10) == \"running\"\n" }, { "alpha_fraction": 0.6016949415206909, "alphanum_fraction": 0.6419491767883301, "avg_line_length": 20.454545974731445, "blob_id": "02faa1750bf1f2623016227d6946cbf3e31c8c90", "content_id": "bbb8a39b47016c83587f7d80d1fe36955d163aa9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 472, "license_type": "no_license", "max_line_length": 62, "num_lines": 22, "path": "/backend/accounts/migrations/0002_auto_generate_default_objects.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.2 on 2018-03-17 07:16\n\nfrom django.db import migrations\n\n\ndef gen_default(apps, schema_editor):\n \"\"\"Generate default models for the accounts application\"\"\"\n Peer = apps.get_model(\"accounts\", \"Peer\")\n peer = Peer()\n peer.name = \"Unknown Peer\"\n peer.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"accounts\", \"0001_initial\"),\n ]\n\n operations = [\n migrations.RunPython(gen_default),\n ]\n" }, { "alpha_fraction": 0.5684717893600464, "alphanum_fraction": 0.5755599737167358, "avg_line_length": 29.669565200805664, "blob_id": "92838973fff2bf0e5fe1fc71e36b954264dbe843", "content_id": "e6c9a47801684a0a2db6729b64bec26a7d6cc5c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3527, "license_type": "no_license", "max_line_length": 74, "num_lines": 115, "path": "/backend/accounts/models.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "from django.db import models\n\nfrom backend.coins.models import Coin\n\n\nclass Peer(models.Model):\n \"\"\"\n Database model for a peer. A peer is something that can\n send or receive value. Usually it has an address or\n multiple addresses associated with it.\n \"\"\"\n id = models.AutoField(primary_key=True)\n\n owner = models.ForeignKey(\n to='auth.user',\n on_delete=models.PROTECT,\n )\n\n name = models.CharField(max_length=100)\n\n class_type = models.CharField(max_length=50, editable=False)\n\n def save(self,\n force_insert=False,\n force_update=False,\n using=None,\n update_fields=None,\n class_type=\"Peer\"):\n \"\"\"Set class type\"\"\"\n self.class_type = class_type\n super(Peer, self).save(force_insert, force_update, using,\n update_fields)\n\n def __str__(self):\n return \"[{}] {}\".format(self.class_type, self.name)\n\n\nclass CryptoAddress(models.Model):\n \"\"\"A crypto address to identify value flows\"\"\"\n\n class Meta:\n ordering = (\"id\", )\n\n id = models.AutoField(primary_key=True)\n\n # The peers this address belongs to\n peer = models.ForeignKey(Peer, on_delete=models.PROTECT)\n\n coin = models.ForeignKey(Coin, on_delete=models.PROTECT)\n\n address = models.CharField(max_length=256)\n\n address_str = models.CharField(max_length=300, blank=True)\n\n watch = models.BooleanField(default=False)\n\n def save(self,\n force_insert=False,\n force_update=False,\n using=None,\n update_fields=None):\n \"\"\"Calculate the address string before save\"\"\"\n self.address_str = \"{}:{}\".format(self.coin.symbol, self.address)\n super(CryptoAddress, self).save(force_insert, force_update, using,\n update_fields)\n\n def __str__(self):\n return self.address_str\n\n\nclass Account(Peer):\n '''\n An Account represents an Exchange like Binance or Cryptopia.\n Transactions from accounts are usually fetched via an API.\n In some cases only csv file import might be available.\n\n To see which account supports which type of import see the\n SERVICE_TYPES tuple.\n '''\n SERVICE_TYPES = (('binance', 'Binance',\n 'api'), ('bitfinex', 'Bitfinex',\n 'api'), ('coinbase', 'Coinbase', 'api'),\n ('cryptopia', 'Cryptopia',\n 'api'), ('ethereum_wallet', 'Ethereum Wallet',\n 'public_address_import'),\n ('kraken', 'Kraken', 'api'), ('livecoin', 'Livecoin',\n 'manual'))\n\n slug = models.SlugField(max_length=50)\n\n service_type = models.CharField(max_length=50)\n\n api_key = models.CharField(max_length=100, blank=True, null=True)\n\n api_secret = models.CharField(max_length=100, blank=True, null=True)\n\n creation_date = models.DateTimeField(auto_now_add=True)\n\n symbols = models.CharField(\n max_length=1000,\n blank=True,\n null=True,\n )\n\n def save(self,\n force_insert=False,\n force_update=False,\n using=None,\n update_fields=None):\n '''\n Save is overridden to set properly call Peers save method with the\n class_type parameter\n '''\n super(Account, self).save(force_insert, force_update, using,\n update_fields, \"Account\")\n" }, { "alpha_fraction": 0.865217387676239, "alphanum_fraction": 0.865217387676239, "avg_line_length": 37.33333206176758, "blob_id": "ea0e33cd1891e72d813200859d83aad747d912de", "content_id": "8e4212ec62c87535e23acd733060927f55ae8634", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 230, "license_type": "no_license", "max_line_length": 82, "num_lines": 6, "path": "/backend/transactions/admin.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom backend.transactions.models import Transaction, TransactionUpdateHistoryEntry\n# Register your models here.\n\nadmin.site.register(Transaction)\nadmin.site.register(TransactionUpdateHistoryEntry)\n" }, { "alpha_fraction": 0.6339622735977173, "alphanum_fraction": 0.6339622735977173, "avg_line_length": 25.5, "blob_id": "6b2698f38182462338c0305b1733ff8ce5a9343c", "content_id": "7c720da2d92eee9992df9642a31e70be59a72a2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 265, "license_type": "no_license", "max_line_length": 65, "num_lines": 10, "path": "/backend/user_profile/handlers.py", "repo_name": "fusion44/crypternity-backend", "src_encoding": "UTF-8", "text": "from backend.user_profile.serializers import UserSerializer\n\n\ndef jwt_response_payload_handler(token, user=None, request=None):\n return {\n 'token': token,\n 'user': UserSerializer(user, context={\n 'request': request\n }).data\n }\n" } ]
47
vladamatena/avr-voltmeter
https://github.com/vladamatena/avr-voltmeter
4415ddd3bcb901ba3b7386ab02c78feadce097b2
07b559330add61bb2570406bc9eb720f28714880
41b0ad6396d20eba8c42c37976225149140271f3
refs/heads/master
2021-01-20T18:39:52.613316
2016-06-10T07:16:00
2016-06-10T07:16:00
60,801,025
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6430445909500122, "alphanum_fraction": 0.6607611775398254, "avg_line_length": 25.120689392089844, "blob_id": "4b97f3effea880c8da37a080f32e049f01746f19", "content_id": "88f9d536469472b4648b91ae14fa00ac260527e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1524, "license_type": "no_license", "max_line_length": 81, "num_lines": 58, "path": "/process.py", "repo_name": "vladamatena/avr-voltmeter", "src_encoding": "UTF-8", "text": "import sys\n\n# Source datafile (one voltage reading per line as float, 10 readings per second)\nsource = \"data\"\n\n# Load resistance in ohms\nresistance = 5\n\n# Threshold voltage when battery is still usable by desired application\nthreshold = 0.8\n\nif len(sys.argv) == 2:\n source = sys.argv[1]\n print(\"Using datafile \\\"\" + source + \"\\\"\")\nelse:\n print(\"Datafile not provided as firs argument trying \\\"data\\\".\")\n\nprint(\"Reading input voltages...\")\nignored = 0\nvoltages = []\nwith open(source) as file:\n for line in file:\n try:\n voltage = float(line)\n #print(voltage)\n voltages.append(voltage)\n except Exception as e:\n ignored = ignored + 1\n\n#print(voltages)\nprint(str(ignored) + \" lines skipped\")\nprint(str(len(voltages)) + \" lines processed\")\n\n\n\nprint(\"Computing capacity...\")\nusableCapacity = 0\ntotalCapacity = 0;\nusableEnergy = 0;\ntotalEnergy = 0;\n\nfor voltage in voltages:\n current = voltage / resistance\n currentPerHour = current / 3600 / 10\n energyPerHour = currentPerHour * voltage\n\n totalCapacity = totalCapacity + currentPerHour\n totalEnergy = totalEnergy + energyPerHour\n\n if voltage > threshold:\n usableCapacity = usableCapacity + currentPerHour\n usableEnergy = usableEnergy + energyPerHour\n\nprint(\"Results:\")\nprint(\"Total : \" + str(totalCapacity * 1000) + \" mAh\")\nprint(\"Total : \" + str(totalEnergy) + \" Wh\")\nprint(\"Usable: \" + str(usableCapacity * 1000) + \" mAh\")\nprint(\"Usable: \" + str(usableEnergy) + \" Wh\")\n \n" }, { "alpha_fraction": 0.5603070259094238, "alphanum_fraction": 0.5745614171028137, "avg_line_length": 15.581818580627441, "blob_id": "adf92b5ec6b9c61871a0cf17a11d3ba6a75422a5", "content_id": "7dfac1a901c6f1918b40a791b049e1977b7d92ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 912, "license_type": "no_license", "max_line_length": 68, "num_lines": 55, "path": "/serial.h", "repo_name": "vladamatena/avr-voltmeter", "src_encoding": "UTF-8", "text": "#ifndef SERIAL_H\n#define SERIAL_H\n\n#include <avr/io.h>\n\n#define SERIAL_MAGIC_SLOWDOWN 27\n#define SERIAL_DELAY (1000000 / SERIAL_BAUD - SERIAL_MAGIC_SLOWDOWN)\n\nstatic void inline serial_hi() {\n SERIAL_PORT |= SERIAL_MASK;\n}\n\nstatic void inline serial_lo() {\n SERIAL_PORT &= ~SERIAL_MASK;\n}\n\nstatic void inline serial_delay() {\n _delay_us(SERIAL_DELAY);\n}\n\nvoid serial_init() {\n // Start mark\n serial_hi();\n}\n\nvoid serial_sendChar(char character) {\n // Start bit\n serial_lo();\n serial_delay();\n\n // Data bits\n for(int i = 0; i < 8; ++i) {\n if((1 << i) & character)\n serial_hi();\n else\n serial_lo();\n\n serial_delay();\n }\n\n // Stop bit\n serial_lo();\n serial_delay();\n\n // Set mark signal\n serial_hi();\n}\n\nint serial_sendString(char *string) {\n while(*string != 0)\n serial_sendChar(*string++);\n}\n\n\n#endif //SERIAL_H\n" }, { "alpha_fraction": 0.6727272868156433, "alphanum_fraction": 0.713131308555603, "avg_line_length": 18.799999237060547, "blob_id": "3a7f683942706844956a4207d6a65998a55c5cd4", "content_id": "66c270e5ba51626e72afc7868ad0f69331359c64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 495, "license_type": "no_license", "max_line_length": 65, "num_lines": 25, "path": "/Makefile", "repo_name": "vladamatena/avr-voltmeter", "src_encoding": "UTF-8", "text": "program=voltmeter\n\nvoltemeter: main.hex\n\nmain.elf: main.c\n\tavr-gcc -Os -std=c99 -mmcu=attiny13 -o main.elf main.c\n\nmain.hex: main.elf\n\tobjcopy -R .eeprom -O ihex main.elf main.hex\n\nclean:\n\trm -f main.bin\n\trm -f main.hex\n\trm -f main.elf\n\nflash: main.hex\n\tavrdude -u -c usbasp -p attiny13 -U flash:w:main.hex:i\n\nlog:\n\tstty -F /dev/ttyUSB1 raw speed 9600 -crtscts cs8 -parenb -cstopb\n\tcat /dev/ttyUSB1\n\nserverlog:\n\tstty -F /dev/ttyUSB0 raw speed 9600 -crtscts cs8 -parenb -cstopb\n\tcat /dev/ttyUSB0\n" }, { "alpha_fraction": 0.6242038011550903, "alphanum_fraction": 0.668789803981781, "avg_line_length": 14.699999809265137, "blob_id": "1dfdb095531f4fbe61284731eebbbb644e405fd2", "content_id": "9011ba9bd3c121590adf09af1969accace2ab40f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 157, "license_type": "no_license", "max_line_length": 65, "num_lines": 10, "path": "/logger.sh", "repo_name": "vladamatena/avr-voltmeter", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nmv data data.`date +%s`\n\nwhile true; do\n\n\tstty -F /dev/ttyUSB0 raw speed 9600 -crtscts cs8 -parenb -cstopb\n\tcat /dev/ttyUSB0 | tee -a data\n\ndone;\n" }, { "alpha_fraction": 0.5150115489959717, "alphanum_fraction": 0.5605410933494568, "avg_line_length": 22.496124267578125, "blob_id": "f94e9a58346b7efd78e7f3129b58b1c1b5bd859d", "content_id": "b25dbc9130655b871eab2ebd637574fdc35e2150", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3031, "license_type": "no_license", "max_line_length": 68, "num_lines": 129, "path": "/main.c", "repo_name": "vladamatena/avr-voltmeter", "src_encoding": "UTF-8", "text": "#include <avr/io.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n#define F_CPU 1200000UL\n#include <util/delay.h>\n\n#define SERIAL_PORT PORTB\n#define SERIAL_MASK 0b00010000\n#define SERIAL_BAUD 9600\n#include \"serial.h\"\n\n//#define MAX_VOL 4950UL\n#define MAX_VOL 4848UL\n#define MAX_VAL 1023UL\n\n#define LIION_DISABLED 0\n#define LIION_ENABLED 1\n#define LIION_LOW 2\n\n#define BATT_DEF 0\n#define BATT_LION 1\n\n#define STATE_UNDF 0\n#define STATE_LO 1\n#define STATE_HI 2\n\n\n#define LIION_LED_MASK 0b00000010\n#define LIION_DISCHARGE_MASK 0b00000001\n\n//#define MAGIC_TIME_BALANCE 28423\n#define MAGIC_TIME_BALANCE 28100\n\nuint16_t adc_read() {\n // Start conversion\n ADCSRA |= _BV(ADSC);\n\n // Loop until conversion is complete\n while(!bit_is_set(ADCSRA,ADIF));\n\n // Clear ADIF by writing a 1 (this sets the value to 0)\n ADCSRA |= _BV(ADIF);\n\n return(ADC);\n}\n\nvoid adc_init() {\n // Enable ADC and set 128 prescale\n ADCSRA = _BV(ADEN) | _BV(ADPS2) | _BV(ADPS1) | _BV(ADPS0);\n\n // Select ADC3 as source\n ADMUX = 0b00000011;\n}\n\nint main(void) {\n // Set Port B I/O direction\n DDRB = SERIAL_MASK | LIION_LED_MASK | LIION_DISCHARGE_MASK;\n\n serial_init();\n\n char buf1[5];\n char buf2[5];\n\n adc_init();\n\n int batt = BATT_DEF;\n int state = STATE_UNDF;\n\n while(1) {\n // Sample the voltage value\n const int samples = 42;\n uint16_t val = 0;\n for(int i = 0; i < samples; i++)\n val += adc_read(0);\n\n // Get voltage and it's decimal part\n uint16_t voltage = MAX_VOL * val / MAX_VAL / 1000 / samples;\n int decimals = MAX_VOL * val / MAX_VAL / samples % 1000;\n\n // Detect lion battery\n if(voltage > 3 && state != STATE_LO) {\n batt = BATT_LION;\n state = STATE_HI;\n }\n\n // Detect def battery\n if(voltage > 1 && state != STATE_LO && batt == BATT_DEF)\n state = STATE_HI;\n\n // Handle li-ion battery discharge\n if(batt == BATT_LION) {\n if((voltage == 3 && decimals < 300) || voltage < 3)\n state = STATE_LO;\n }\n\n // Handle def battery discharge\n if(batt == BATT_DEF) {\n if(voltage < 1 && decimals < 900)\n state = STATE_LO;\n }\n\n // Control discharge gate\n if(state == STATE_HI) {\n // Start discharge\n PORTB |= LIION_DISCHARGE_MASK;\n PORTB |= LIION_LED_MASK;\n } else {\n // Stop discharge\n PORTB &= ~LIION_DISCHARGE_MASK;\n PORTB &= ~LIION_LED_MASK;\n }\n\n // Send voltage over serial line\n itoa(voltage, buf1, 10);\n itoa(decimals, buf2, 10);\n serial_sendString(buf1);\n serial_sendChar('.');\n if(decimals < 10)\n serial_sendChar('0');\n if(decimals < 100)\n serial_sendChar('0');\n serial_sendString(buf2);\n serial_sendChar('\\n');\n\n // Slow down to 10 measurements per second\n _delay_us(MAGIC_TIME_BALANCE);\n }\n}\n" } ]
5
scko823/rental-car-thanksgiving-2015
https://github.com/scko823/rental-car-thanksgiving-2015
ef975acaf96995cbdc3f97f7e271a5ff929adf55
35424e2bb50b6d35bd874db32c3cea3936b0403c
658a55450fbec055366498027e128d8e704909c5
refs/heads/master
2016-09-05T16:43:25.680321
2015-10-10T18:14:31
2015-10-10T18:14:31
39,974,720
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6940242052078247, "alphanum_fraction": 0.7197428345680237, "avg_line_length": 32.871795654296875, "blob_id": "118152412bc57b1c8afc6ed9b060d6b3b4258aab", "content_id": "39a54f2dfcde93eeafb51ac2e8745f83d941ed98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2644, "license_type": "no_license", "max_line_length": 115, "num_lines": 78, "path": "/costco-rental-car-Thanksgiving-week.py", "repo_name": "scko823/rental-car-thanksgiving-2015", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[99]:\n\nfrom selenium import webdriver\nimport urllib\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.common.keys import Keys\nimport time as time\nimport datetime as dt\nfrom lxml import html\n# import dependencies\n\n\n# In[100]:\n\n#use selenium to key in the rental car parameters\nbrowser = webdriver.Firefox() #I only tested in firefox\nbrowser.get('http://costcotravel.com/Rental-Cars')\nbrowser.implicitly_wait(5)#wait for webpage download\nbrowser.find_element_by_xpath('//div/input[@id=\"pickupAirportTextWidget\"]').send_keys('PHX')\nbrowser.implicitly_wait(5) #wait for the airport suggestion box to show\nbrowser.find_element_by_xpath('//li[@class=\"sayt-result\"]').click() \n#click the airport suggestion box \n\nbrowser.find_element_by_xpath('//input[@id=\"pickupDateWidget\"]').send_keys('11/21/2015')\nbrowser.find_element_by_xpath('//input[@id=\"dropoffDateWidget\"]').send_keys('11/29/2015',Keys.RETURN)\n\nbrowser.find_element_by_xpath('//select[@id=\"pickupTimeWidget\"]/option[@value=\"11:00 AM\"]').click()\nbrowser.find_element_by_xpath('//select[@id=\"dropoffTimeWidget\"]/option[@value=\"05:00 PM\"]').click()\nbrowser.implicitly_wait(5) #wait for the clicks to be completed\nbrowser.find_element_by_link_text('SEARCH').click()\n#click the search box\n\ntime.sleep(8) #wait for firefox to download and render the page\nn = browser.page_source #grab the html source code\n\n\n# In[14]:\n\n\nsoup = BeautifulSoup(n) #we use bs here\n\n\n# In[137]:\n\n#grab all the tags of interest (from the main table, the cells that display the car class)\n\n#exclude the first row about vendor and put into a list\nall_tags = [i for i in soup.findAll('th',{\"class\":\"tar\"}) if i.text.startswith('Tax')==False]\n\nmasterlist = []\n#go to the entir row, grab all the text of that rows, and clean it up (replace, split....) \nfor i in range(len(all_tags)):\n masterlist.append(str(all_tags[i].findParent().text).replace('Not Available','$NA').replace(',','').split('$'))\n#create dataframe from masterlist\ndf = pd.DataFrame(masterlist)\n#clean up the name of the columns\ndf.columns= [['car class', 'Alamo','Avis', 'Budget','Enterprise']]\n#calc the days away from first date of rental 11/21/2015 at 11AM\ndf['days away']= abs((dt.datetime.now()-dt.datetime(2015,11,21,11)).days)\n#setup the index. Will do groupby later?\ndf = df.set_index(['days away','car class'])\n\n\n# In[138]:\n\n#save current df\ndf.to_csv('data/%s.csv' %dt.datetime.now().strftime(\"%m-%d-%H-%M%p\"))\n\n\n# In[144]:\n\n#open up master df, then append it with current df\nmaster= pd.read_csv('data/master.csv',index_col=[0,1])\nmaster = pd.concat([master, df])\nmaster.to_csv('data/master.csv')\n\n" } ]
1
iankreisberguc/metodos_g_12
https://github.com/iankreisberguc/metodos_g_12
d11f43f5a5a2ecad414bba1bc17c64107ea97fb1
48ea58b75e4b4833c3d31c56dac737d218aba77e
886698e7d1bfc7c97af732f67f9f8df19a24b211
refs/heads/main
2023-06-10T17:05:47.372047
2021-06-26T19:59:20
2021-06-26T19:59:20
354,155,590
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6279357075691223, "alphanum_fraction": 0.6407086849212646, "avg_line_length": 33.92086410522461, "blob_id": "223e637b1c28c7243f89e4ac30a2b88581b3a125", "content_id": "bd65d38500c1555a99606f6fb370c1cd79d1dc41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4861, "license_type": "no_license", "max_line_length": 110, "num_lines": 139, "path": "/Gradiente y Newton/steepest.py", "repo_name": "iankreisberguc/metodos_g_12", "src_encoding": "UTF-8", "text": "__author__ = \"Moises Saavedra Caceres\"\n__email__ = \"[email protected]\"\n\n\n# Modulos nativos de python\nimport numpy as np\nimport time\nimport scipy.optimize\nimport numdifftools as nd\n\n# Modulo creado por usuario\nfrom parametros import generar_datos, gradient_f, func_objetivo\n\n# Se crea un decorador (googlear) del tipo timer para testear el tiempo\n# de ejecucion del programa\ndef timer(funcion):\n def inner(*args, **kwargs):\n\n inicio = time.time()\n resultado = funcion(*args, **kwargs)\n final = round(time.time() - inicio, 3)\n print(\"\\nTiempo de ejecucion total: {}[s]\".format(final))\n\n return resultado\n return inner\n\n# Se define la evaluacion de valores dentro de cada iteracion\n# de la rutina del gradiente\ndef subrutina(x, A, b, Grad):\n \"\"\"\n Esta funcion va creando el paso de cada iteracion. Ocupando la teoría\n estudiada. Retorna el valor de la funcion, su gradiente y su hessiano segun\n la iteracion estudiada.\n \"\"\"\n # Funcion a optimizar, gradiente y hessiano\n valor_FO = func_objetivo(x, A, b)\n\n #gradiente = Grad(x, A, b) \n gradiente = gradient_f(x, A, b)\n\n return valor_FO, gradiente\n\ndef funcion_enunciado(lambda_, x, A, b, direccion_descenso):\n \"\"\"\n Funcion original evaluada en: x + lambda*direccion_descenso\n \"\"\"\n # Se actualiza el valor de x\n x = x + lambda_ * direccion_descenso\n return func_objetivo(x, A, b)\n\n@timer\ndef gradiente(x0, A, b, epsilon, iteracion_maxima):\n \"\"\"\n Esta funcion es una aplicacion del metodo del gradiente, la que\n va a ir devolviendo valor objetivo, gradiente actual.\n\n Su entrada posee:\n - Q : matriz cuadrada que constituye la funcion definida\n - c : vector asociado que constituye la funcion definida\n - x0 : punto inicial de prueba\n - epsilon : error/ tolerancia deseada\n - iteracion_maxima : numero maximo de iteraciones\n\n Su retorno (salida) es:\n - valor : valor de la funcion evaluada en x en la iteracion actual\n - x : solucion en la que se alcanza el valor objetivo\n - R : matriz con la informacion de cada iteracion. Es una fila por iteracion\n y esta constituida por:\n - Numero de iteracion\n - valor\n - norma del gradiente\n - paso (lambda)\n \"\"\"\n # 1º paso del algoritmo: Se definen los parametros iniciales\n iteracion = 0\n stop = False\n x = x0\n\n Grad = nd.Gradient(func_objetivo)\n\n # Se prepara el output del codigo para en cada iteracion\n # entregar la informacion correspondiente\n print(\"\\n\\n********* METODO DE GRADIENTE **********\\n\")\n print(\"ITERACION VALOR OBJ NORMA LAMBDA\")\n\n # Se inicia el ciclo para las iteraciones maximas seteadas por el usuario\n while (stop == False) and (iteracion <= iteracion_maxima):\n\n # 2º paso del algoritmo: Se obtiene la informacion para determinar\n # el valor de la direccion de descenso\n valor, gradiente = subrutina(x, A, b, Grad)\n direccion_descenso = -gradiente\n\n # 3º paso del algoritmo: Se analiza el criterio de parada\n norma = np.linalg.norm(gradiente, 2)\n if norma <= epsilon:\n stop = True\n\n else:\n # 4º paso del algoritmo: Se busca el peso (lambda) optimo\n # Se resuelve el subproblema de lambda\n lambda_ = scipy.optimize.fminbound(funcion_enunciado, 0, 1e-3, args=(x, A, b, direccion_descenso))\n # La rutina del gradiente muestra en pantalla para cada iteracion:\n # nº de iteracion, valor de la funcion evaluada en el x de la iteracion,\n # la norma del gradiente y el valor de peso de lambda\n retorno_en_pantalla = [iteracion, valor, norma, lambda_]\n print(\"%12.6f %12.6f %12.6f %12.6f\" % (retorno_en_pantalla[0], retorno_en_pantalla[1], \n retorno_en_pantalla[2], retorno_en_pantalla[3]))\n\n\n # 5º paso del algoritmo: Se actualiza el valor de x para la siguiente\n # iteracion del algoritmo\n x = x + lambda_*direccion_descenso\n iteracion += 1\n\n #print(\"\\nSOLUCION:\\n\", x)\n return retorno_en_pantalla\n\nif __name__ == '__main__':\n print(\"\\n\\n ---------------- GRADIENTE ----------------\\n\")\n # Testeo de Newton, primero se generan datos para la funcion\n m = 300\n n = 50\n np.random.seed(220399)\n A, b = generar_datos(m, n)\n\n # Se ocupa el vector de \"unos\" como punto de inicio\n # (notar el salto que pega) de la iteracion 1 a la 2 el valor objetivo\n # -- Queda a tu eleccion que vector ingresar como solucion para la iteracion 1 --\n #x0 = np.random.uniform(-3, 3, size=n)\n x0 = np.zeros(n)\n print(\"Valor de la FO:\", func_objetivo(x0, A, b), \"\\n\")\n\n # Error asociado 10% este caso\n epsilon = 0.1\n\n # Maximo de iteraciones (para que no quede un loop infinito)\n iteracion_maxima = 3000\n gradiente(x0, A, b, epsilon, 100)\n" }, { "alpha_fraction": 0.5432196855545044, "alphanum_fraction": 0.5725614428520203, "avg_line_length": 27.636363983154297, "blob_id": "6cdfa814870999ae1ecfde50f032715127adeed2", "content_id": "e65bd7023d74c5620e933196a9d564d1b7a8dbb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1262, "license_type": "no_license", "max_line_length": 85, "num_lines": 44, "path": "/Gradiente y Newton/parametros.py", "repo_name": "iankreisberguc/metodos_g_12", "src_encoding": "UTF-8", "text": "__author__ = \"Moises Saavedra Caceres\"\n__email__ = \"[email protected]\"\n\n# Modificado por J. Vera el 21-03-2021. Correcciones a la generación\n\n\n# Modulos nativos de python\nimport numpy as np\n\n# Constante para a\nconstant = 100\n\n# Calcula el error entre Ax y b\nresiduo = lambda x, A, b: np.dot(A, x) - b.T\n\n# Calcula la funcion de penalizacion\npenalizacion = lambda x, a: np.piecewise(x, [abs(x) < a, abs(x) >= a], \n [lambda x: - a**2 * np.log(1 - (x/a)**2),\n lambda x: np.inf])\n\n# Calcula la funcion objetivo\nfunc_objetivo = lambda x, A, b, a=constant: np.sum(penalizacion(residuo(x, A, b), a))\n\n# Calcula el gradiente de la FO analiticamente\ndef gradient_f(x, A, b):\n a = constant\n aux_vector = a**2 * 2 * residuo(x, A, b) / (a**2 - residuo(x, A, b)**2)\n return np.dot(aux_vector, A)\n\ndef generar_datos(m, n):\n \"\"\"\n Recibe parametros m y n, con m > n, retorna una matriz A de mxn con valores\n entre -10 y 10 y un vector b de mx1 con valores entre -10 y 10\n \"\"\"\n assert (m > n), \"m debe ser mayor a n\"\n A = np.random.uniform(-10, 10, size=(m, n))\n b = np.random.uniform(-10, 10, size=m)\n return A, b\n\n\nif __name__ == '__main__':\n A, c = generar_datos(3, 5)\n print(A)\n print(c)\n\n" }, { "alpha_fraction": 0.6036001443862915, "alphanum_fraction": 0.636920690536499, "avg_line_length": 28.314607620239258, "blob_id": "840103432cea17efa535911366e48edfa14ad781", "content_id": "4aeb822e8e8f77960a1bd17fc873bfbf634dcba6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2615, "license_type": "no_license", "max_line_length": 97, "num_lines": 89, "path": "/Tarea 4/Definición de datos.py", "repo_name": "iankreisberguc/metodos_g_12", "src_encoding": "UTF-8", "text": "from gurobipy import GRB, Model, quicksum\nimport numpy as np\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 7 18:40:48 2021\n\n@author: jrver\n\"\"\"\n\n# Acá se definen los datos, con la notacuión de enunciado\nm = 12 # plantas de proceso\nn = 140 # parcelas\nhor = 12\nM = range(m)\nN = range(n)\nT = range(hor)\n\nnp.random.seed(71099)\n\nc = np.random.randint(150,400,size=(n,hor)) # costo por kilo cosechado\nf = np.random.randint(200000,300000,size=(n,hor)) # costo fijo por cosechar una parcela\np = np.random.randint(800,1000,size=m) # precio dependiendo de la planta en donde se proceso\ne = np.random.randint(10,15,size=(m,hor)) # costo de procesarlo\nB = np.random.randint(n*10,n*30,size=hor) # Mano de obra disponible\nK = np.random.randint(n*700,n*1000,size=m) # Kilos que puede recibir un planta\nalpha = np.random.randint(7,10,size=(n,hor))/50 # requerimiento de mano de obra\nL = np.random.randint(1500,2000,size=n) # disponibilidad de fruta en cada parcela\n\n\n\n\n# Generación del modelo\nmodel = Model(\"Cosechas\")\n\n# Crear y rellenar diccionarios de variables manufactura b_t,p , almacenada s_t,p , vendida u_t,p\nX_jt = {} # kilos cosechados en la parcela j el dia t\nY_jt = {} # vale 1 si se cosecha la parcela j en el día t, 0 en caso contrario\nZ_it = {} # kilos procesados en la planta i en el periodo t\n\nfor t in T:\n for j in N:\n X_jt[j, t] = model.addVar(vtype=GRB.CONTINUOUS, lb=0, name=\"X_{}_{}\".format(j, t))\n Y_jt[j, t] = model.addVar(vtype=GRB.BINARY, name=\"Y_{}_{}\".format(j, t))\n for i in M:\n Z_it[i, t] = model.addVar(vtype=GRB.CONTINUOUS, lb=0, name=\"Z_{}_{}\".format(i, t))\n\n\n# Llama a update para agregar las variables al modelo\nmodel.update()\n \n# Restriccion 1\n\nfor t in T:\n model.addConstr(quicksum(alpha[j][t] * X_jt[j, t] for j in N) <= B[t])\n\n# Restriccion 2\nfor t in T:\n for j in N:\n model.addConstr(X_jt[j, t] <= L[j]* Y_jt[j, t])\n\n# Restriccion 3\nfor j in N:\n model.addConstr(quicksum(Y_jt[j, t] for t in T) <= 1 )\n\n# Restriccion 4\nfor t in T:\n model.addConstr(quicksum(X_jt[j, t] for j in N) == quicksum(Z_it[i, t] for i in M))\n\n# Restriccion 5\nfor i in M:\n model.addConstr(quicksum(Z_it[i, t] for t in T) <= K[i])\n\n# Funcion objetivo\n\nobj= quicksum(quicksum(p[i]*Z_it[i, t] for i in M) for t in T) -\\\n quicksum(\n (\n quicksum(c[j][t]*X_jt[j, t] for j in N) +\\\n quicksum(f[j][t]*Y_jt[j, t] for j in N) + \\\n quicksum(e[i][t]*Z_it[i, t] for i in M)\n ) for t in T\n )\n\nmodel.setObjective(obj, GRB.MAXIMIZE)\n\nmodel.optimize()\n\n# Mostrar los valores de las soluciones\nmodel.printAttr(\"X\")\n\n\n" }, { "alpha_fraction": 0.643216073513031, "alphanum_fraction": 0.6834170818328857, "avg_line_length": 26.045454025268555, "blob_id": "bb8cc6cecb9a12c23744b094045abd5014c33168", "content_id": "aa3c6bdace098d07867977c635fe97377d839de9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 597, "license_type": "no_license", "max_line_length": 69, "num_lines": 22, "path": "/Gradiente y Newton/test.py", "repo_name": "iankreisberguc/metodos_g_12", "src_encoding": "UTF-8", "text": "import numpy as np\nimport numdifftools as nd\nfrom scipy.optimize import fminbound\nfrom parametros import generar_datos, func_objetivo, gradient_f\nfrom numdifftools import Gradient\n\nndGrad = Gradient(func_objetivo)\nfunc_lineal = lambda l, x, A, b, dir: func_objetivo(x + l*dir, A, b) \nnp.random.seed(220399)\n\nm = 300\nn = 50\nA, b = generar_datos(m, n)\nx0 = np.zeros(n)\n#x0 = np.random.randint(-3, 3, size=n)\nprint(\"\\n\")\nprint(func_objetivo(x0, A, b))\ndir = -gradient_f(x0, A, b)\nl = fminbound(func_lineal, 0, 1e-4, args=(x0, A, b, dir))\n#l = 1e-6\nprint(l)\nprint(func_objetivo(x0 + l * dir, A, b))\n\n\n" }, { "alpha_fraction": 0.4488832950592041, "alphanum_fraction": 0.603334367275238, "avg_line_length": 32.463157653808594, "blob_id": "d5ec3958f4de0014767cd4cf0a6aad43025f186d", "content_id": "51bc469f635be5d627e4a08ab3bfe98ab2bed9f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3179, "license_type": "no_license", "max_line_length": 113, "num_lines": 95, "path": "/Gradiente y Newton/Test_bfgs.py", "repo_name": "iankreisberguc/metodos_g_12", "src_encoding": "UTF-8", "text": "__author__ = \"Moises Saavedra Caceres\"\n__email__ = \"[email protected]\"\n\n\n# Modulos nativos de python\nimport numpy as np\nimport time\nimport scipy.optimize\n\n# Modulo creado por nosotros (parametros.py)\nfrom parametros import generar_datos\n\n#### Creacion de la funcion objetivo ####\n\n# Constante para a\nconstant = 250\n\n# Calcula el error entre Ax y b\nresiduo = lambda x, A, b: np.dot(A, x) - b\n\n# Calcula la funcion de penalizacion\npenalizacion = lambda x, a: np.piecewise(x, [abs(x) < a, abs(x) >= a], \n [lambda x: - a**2 * np.log(1 - (x/a)**2),\n lambda x: np.inf])\n\n# Calcula la funcion objetivo\nfunc_objetivo = lambda x, A, b, a=constant: np.sum(penalizacion(residuo(x, A, b), a))\n\ndef timer(funcion):\n \"\"\"\n Se crea un decorador (googlear) del tipo timer para testear el tiempo\n de ejecucion del programa\n \"\"\"\n def inner(*args, **kwargs):\n\n inicio = time.time()\n resultado = funcion(*args, **kwargs)\n final = round(time.time() - inicio, 3)\n print(\"\\nTiempo de ejecucion total: {}[s]\".format(final))\n\n return resultado\n return inner\n\n\ndef BFGS(x0, A, b, tol):\n t0 = time.time()\n res = scipy.optimize.minimize(func_objetivo, x0, (A, b), method='BFGS', options={'gtol': tol, 'disp': True})\n t1 = time.time() - t0\n print(\"\\nTiempo de demora de BFGS:\", t1, \"\\n\\n\")\n print(res.message)\n print()\n #x_opt = res.x\n #print(\"Solucion BFGS:\", x_opt, sep=\"\\n\")\n\n\nif __name__ == '__main__':\n print(\"\\n\\n ---------------- BFGS ----------------\\n\")\n # Testeo de BFGS, primero se generan datos para la funcion\n m = 300\n n = 50\n \n# np.random.seed(1610)\n np.random.seed(220399)\n \n A, b = generar_datos(m, n)\n\n # Se ocupa el vector de \"unos\" como punto de inicio\n # (notar el salto que pega) de la iteracion 1 a la 2 el valor objetivo\n # -- Queda a tu eleccion que vector ingresar como solucion para la iteracion 1 --\n x0 = np.zeros(n)\n print(\"Valor de la FO:\", func_objetivo(x0, A, b), \"\\n\")\n\n # Error asociado 10% este caso\n #epsilon = 0.1\n\n # Maximo de iteraciones (para que no quede un loop infinito)\n #iteracion_maxima = 5\n t0 = time.time()\n res = scipy.optimize.minimize(func_objetivo, x0, (A, b), method='BFGS', options={'gtol': 1e-6, 'disp': True})\n t1 = time.time() - t0\n print(\"\\nTiempo de demora:\", t1, \"\\n\\n\")\n print(res.message)\n print()\n x_opt = res.x\n print(np.linalg.norm(residuo(x_opt, A, b)))\n x_opt2 = [-0.00090309, 0.00118256, -0.00100717, -0.00056781, -0.00205902, 0.00368465,\n -0.0022257, 0.00025377, -0.00182315, 0.00117174, 0.00123833, 0.00172285,\n 0.00424657, -0.00023223, 0.0037284, 0.00054259, -0.00204912, 0.00091248,\n 0.00335923, -0.00336994, 0.00051575, -0.00194586, 0.00196556, -0.00144571,\n 0.00215869, 0.00218757, -0.00121628, -0.00055436, -0.00313988, 0.00229273,\n -0.00149506, 0.00287618, -0.00169493, -0.00168946, -0.00079053, 0.00655203,\n 0.0007888, -0.0018132, -0.00192264, 0.0014942, 0.00222382, 0.00028745,\n 0.00208139, -0.00229559, -0.00244832, -0.00284838, 0.00101864, -0.00155116,\n -0.00158425, -0.00217585]\n print(res.x)\n" }, { "alpha_fraction": 0.5541601181030273, "alphanum_fraction": 0.5949764251708984, "avg_line_length": 22.629629135131836, "blob_id": "d20fbaee8ad136338724b6073f8a2460b4aff5a2", "content_id": "b25849e23cd9b90a996dada8b25bd586f04d3870", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 637, "license_type": "no_license", "max_line_length": 55, "num_lines": 27, "path": "/Gradiente y Newton/main.py", "repo_name": "iankreisberguc/metodos_g_12", "src_encoding": "UTF-8", "text": "from newton import newton\nfrom steepest import gradiente\nfrom parametros import generar_datos, func_objetivo\nimport numpy as np\nfrom Test_bfgs import BFGS\n\nm = 300\nn = 50\nnp.random.seed(220399)\nA, b = generar_datos(m, n)\nx0 = np.zeros(n)\n\nepsilon = 0.01\niters_newton = 100\niters_grad = 1000\n\nprint(\"\\n\\nCORRIENDO MAIN\")\nprint(\"Valor de la FO:\", func_objetivo(x0, A, b))\n\nprint(\"\\n ---------------- NEWTON ----------------\")\nnewton(x0, A, b, epsilon, iters_newton)\n\nprint(\"\\n ---------------- GRADIENTE ----------------\")\ngradiente(x0, A, b, epsilon, iters_grad)\n\nprint(\"\\n ---------------- BFGS ----------------\")\nBFGS(x0, A, b, epsilon)" } ]
6
msjha-vedi1995/sudoku-solver-with-image-processing
https://github.com/msjha-vedi1995/sudoku-solver-with-image-processing
e3ff6450192de7fc86be7a0fd415305766240e0d
ab054aa8c355ce9f3c153c16755f0d24ff9c4acf
37f789ee27f6a1af3cc8d63b0ab1d8939746a324
refs/heads/main
2023-02-17T11:18:38.130038
2021-01-20T03:40:33
2021-01-20T03:40:33
331,183,530
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6312236189842224, "alphanum_fraction": 0.6573839783668518, "avg_line_length": 30.945945739746094, "blob_id": "70f52ef1316b940553817c903362f1d426670e21", "content_id": "5aad298042c8987ff81d1e09843dc2edd9dd6503", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1185, "license_type": "no_license", "max_line_length": 114, "num_lines": 37, "path": "/videocam.py", "repo_name": "msjha-vedi1995/sudoku-solver-with-image-processing", "src_encoding": "UTF-8", "text": "\nimport cv2\nfrom matplotlib import pyplot as plt\n\nfrom opencv_part import get_sudo_grid, get_sudoku, solve_sudoku, create_sudoku_img, change_perspective_to_original\n\n# cap = cv2.VideoCapture(0)\n# images = []\n# while 1:\n# ret, frame = cap.read()\n# try:\n# crp_img, orgnl, pts1, pts2 = get_sudo_grid(frame,900)\n# images.append(crp_img)\n# if crp_img.shape[0] == 900:\n# cv2.imshow('frame',crp_img)\n# break\n# except:\n# if cv2.waitKey(1) & 0xFF == ord('q'):\n# break\n# \n# cap.release()\n# cv2.destroyAllWindows()\n\nfolder = 'images/' \nimg = cv2.imread(\"cropped.jpg\",0)\norgnl = cv2.imread(\"original.jpg\",0)\nimg = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\nsd_img, unsolved_sd_lst = get_sudoku(img, 900)\ncv2.imwrite(folder + \"sd_img.jpg\",sd_img)\nprint(\"Numbers are extracted\")\n\nsolved_sd_lst, unsolved_sd_img = solve_sudoku(unsolved_sd_lst, sd_img.shape)\ncv2.imwrite(folder + \"unsolved_sd_img.jpg\",unsolved_sd_img)\nprint(\"Unsolved Sudoku image ready\")\n\nsolved_sd_img = create_sudoku_img(img, solved_sd_lst, unsolved_sd_lst, False)\ncv2.imwrite(folder + \"solved_sd_img.jpg\",solved_sd_img)\nprint(\"Solved sudoku image ready\")\n\n\n" }, { "alpha_fraction": 0.7358133792877197, "alphanum_fraction": 0.7509457468986511, "avg_line_length": 27.836362838745117, "blob_id": "bf59706274afd57104c0c4e1659cad6a6d4644cd", "content_id": "a80ce06316d3c222ad2364408088b4f32b9f7661", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1586, "license_type": "no_license", "max_line_length": 114, "num_lines": 55, "path": "/main.py", "repo_name": "msjha-vedi1995/sudoku-solver-with-image-processing", "src_encoding": "UTF-8", "text": "import cv2\nfrom matplotlib import pyplot as plt\n\nfrom opencv_part import get_sudo_grid, get_sudoku, solve_sudoku, create_sudoku_img, change_perspective_to_original\n\n'''\nget_sudoku_grid:- \nInput: Img array, Size\nOutput: cropped_img, original, pts1, pts2\n\nget_sudoku \nInput: Cropped_img, size \nOutput: sudoku_image_with_eroded_digits, unsolved_sudoku_list\n\nsolve_sudoku \nInput: sudoku_unsolved, shape\nOutput: sudoku_solved_list, sudoku_unsolved_image\n\ncreate_sudoku_img\nInput: sudoku_image_original, sudoku_solved, sudoku_unsolved, with_lines:bool\nOutput: solved_sudoku_image\n\nchange_perspective_to_original\nInput: pts2, pts1, sudoku_image, original\noutput: Final_Image\n'''\nfolder = 'output/'\nname = 'sudoku_images/sudoku5.jpg'\nimg = cv2.imread(name,1)\n\ncrp_img, orgnl, pts1, pts2 = get_sudo_grid(img,900)\n\ncv2.imwrite(folder + \"crpzimg.jpg\",crp_img)\ncv2.imwrite(folder + \"orgnl.jpg\",orgnl)\nprint(\"Image is cropped\")\n\nsd_img, unsolved_sd_lst = get_sudoku(crp_img, 900)\n\ncv2.imwrite(folder + \"sd_img.jpg\",sd_img)\nprint(\"Numbers are extracted\")\n\nsolved_sd_lst, unsolved_sd_img = solve_sudoku(unsolved_sd_lst, sd_img.shape)\ncv2.imwrite(folder + \"unsolved_sd_img.jpg\",unsolved_sd_img)\nprint(\"Unsolved Sudoku image ready\")\n\nsolved_sd_img = create_sudoku_img(crp_img, solved_sd_lst, unsolved_sd_lst, False)\ncv2.imwrite(folder + \"solved_sd_img.jpg\",solved_sd_img)\nprint(\"Solved sudoku image ready\")\n\nfinal = change_perspective_to_original(pts2, pts1, solved_sd_img, orgnl)\ncv2.imwrite(folder + \"final.jpg\",final)\nprint(\"Perspective changed to original image\")\n\nplt.imshow(final)\nplt.show()\n" }, { "alpha_fraction": 0.3806818127632141, "alphanum_fraction": 0.4130973219871521, "avg_line_length": 37.779659271240234, "blob_id": "710220490547e61cf64c24986388ac21dc81c390", "content_id": "d0fda587b64a39be1537943c2c2e6c8beeb2ebbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13728, "license_type": "no_license", "max_line_length": 119, "num_lines": 354, "path": "/opencv_part.py", "repo_name": "msjha-vedi1995/sudoku-solver-with-image-processing", "src_encoding": "UTF-8", "text": "from typing import List, Any, Union\n\nimport cv2\nfrom imutils import contours as cnt_sort\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom prediction import predict\nSIZE = 9\nmatrix=[[]]\n\n\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n\n\n\ndef get_sudo_grid(name,size):\n #img = cv2.imread(name,0)\n img = name\n original = img.copy()\n #img = cv2.medianBlur(img,5)\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n greymain = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\n \n th2 = cv2.adaptiveThreshold(greymain,255,cv2.ADAPTIVE_THRESH_MEAN_C,\\\n cv2.THRESH_BINARY_INV,39,10)\n \n \n #contours,heirarchy = cv2.findContours(th2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n major = cv2.__version__.split('.')[0]\n if major == '3':\n ret, contours, hierarchy = cv2.findContours(th2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n else:\n contours, hierarchy = cv2.findContours(th2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n maxarea = 0\n cnt = contours[0]\n for i in contours:\n if cv2.contourArea(i)>maxarea:\n cnt = i\n maxarea = cv2.contourArea(i)\n blank = np.zeros(img.shape,np.uint8)\n image = cv2.drawContours(blank,[cnt],-1,(255,255,255),2)\n edges = cv2.Canny(image,40,150,apertureSize = 3)\n lines = cv2.HoughLines(edges,1,np.pi/180,100)\n createhor = []\n createver = []\n created = []\n anglediff=10\n rhodiff=10\n flag=0\n count = 2\n \n for line in lines:\n for (rho,theta) in line:\n flag=0\n for (rho1,theta1) in created:\n if abs(rho-rho1)<rhodiff and abs(theta-theta1)<anglediff:\n flag=1\n \n if flag==0:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n d = np.linalg.norm(np.array((x1,y1,0))-np.array((x2,y2,0)))\n cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)\n m=abs(1/np.tan(theta))\n if m<1:\n createhor.append((rho,theta))\n else:\n createver.append((rho,theta))\n created.append((rho,theta))\n \n points=[]\n for (rho,theta) in createhor:\n for (rho1,theta1) in createver:\n if (rho,theta)!=(rho1,theta1):\n a=[[np.cos(theta),np.sin(theta)],[np.cos(theta1),np.sin(theta1)]]\n b=[rho,rho1]\n cor=np.linalg.solve(a,b)\n if list(cor) not in points:\n points.append(list(cor))\n \n \n points.sort()\n if (points[0][1]>points[1][1]):\n points[0],points[1]=points[1],points[0]\n if (points[-1][1]<points[-2][1]):\n points[-1],points[-2]=points[-2],points[-1]\n \n points[1],points[2]=points[2],points[1]\n for i in points:\n images = cv2.circle(image,(int(i[0]),int(i[1])),4,(0,0,255),-1)\n pts1 = np.float32(points)\n pts2 = np.float32([[0,0],[size,0],[0,size],[size,size]])\n M = cv2.getPerspectiveTransform(pts1,pts2)\n \n warped2 = cv2.warpPerspective(blank,M,(size,size))\n img = cv2.warpPerspective(original,M,(size,size))\n return [img, original,pts1,pts2]\n\n\n\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n\n\n\ndef get_sudoku(img ,size=900):\n img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\n thresh = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\\\n cv2.THRESH_BINARY_INV,39,10)\n thresh1 = thresh.copy()\n kernel = np.ones((1,1),np.uint8)\n thresh = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel)\n thresh = cv2.dilate(thresh,kernel,iterations=3)\n kernel = np.ones((1,10),np.uint8)\n thresh = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel)\n kernel = np.ones((10,1),np.uint8)\n thresh = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel)\n \n #contours,heirarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n thresh = cv2.bitwise_not(thresh)\n #contours,heirarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n major = cv2.__version__.split('.')[0]\n if major == '3':\n ret, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n else:\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n blank = np.zeros(img.shape,np.uint8)\n finalContours = []\n for cnt in contours:\n epsilon = 0.04*cv2.arcLength(cnt,True)\n approx = cv2.approxPolyDP(cnt,epsilon,True)\n approx = cv2.convexHull(cnt)\n area = cv2.contourArea(approx)\n if area <= 9000:\n finalContours.append(approx)\n sudoku_rows,_ = cnt_sort.sort_contours(finalContours,method=\"left-to-right\")\n kernel = np.ones((3,3),np.uint8)\n thresh1 = cv2.erode(thresh1,kernel,iterations=1)\n blank_base = blank.copy()\n for c in sudoku_rows:\n blank = cv2.drawContours(blank,[c],-1,(255),-1)\n blank_base = cv2.drawContours(blank_base,[c],-1,(255),-1)\n blank = cv2.bitwise_and(thresh1,blank,mask=blank)\n \n kernel = np.ones((5,1),np.uint8)\n blank = cv2.erode(blank,kernel,iterations=1)\n kernel = np.ones((6,6),np.uint8)\n blank = cv2.morphologyEx(blank,cv2.MORPH_CLOSE,kernel)\n kernel = np.ones((1,5),np.uint8)\n blank = cv2.erode(blank,kernel,iterations=1)\n kernel = np.ones((9,9),np.uint8)\n blank = cv2.morphologyEx(blank,cv2.MORPH_CLOSE,kernel)\n kernel = np.ones((6,6),np.uint8)\n blank = cv2.dilate(blank,kernel,iterations=1)\n factor = blank.shape[0]//9\n sudoku_unsolved = []\n for i in range(9):\n for j in range(9):\n part = blank[i*factor:(i+1)*factor, j*factor:(j+1)*factor ]\n part = cv2.resize(part,(28,28))\n cv2.imwrite(\"images/{}_{}.jpg\".format(i,j),part)\n num,_ = predict(part)\n sudoku_unsolved.append(str(num))\n for i in range(10):\n cv2.line(blank,(0,factor*i),(blank.shape[1],factor*i),(255),2,2)\n cv2.line(blank,(factor*i,0),(factor*i,blank.shape[0]),(255),2,2)\n matrix=[row[:] for row in sudoku_unsolved]\n return [blank, sudoku_unsolved]\n\n\n\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n\n\n\n\ndef number_unassigned(row, col):\n num_unassign = 0\n for i in range(0,SIZE):\n for j in range (0,SIZE):\n #cell is unassigned\n if matrix[i][j] == 0:\n row = i\n col = j\n num_unassign = 1\n a = [row, col, num_unassign]\n return a\n a = [-1, -1, num_unassign]\n return a\n#function to check if we can put a\n#value in a paticular cell or not\n\n\n\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n\n\n\n\ndef is_safe(n, r, c):\n #checking in row\n for i in range(0,SIZE):\n #there is a cell with same value\n if matrix[r][i] == n:\n return False\n #checking in column\n for i in range(0,SIZE):\n #there is a cell with same value\n if matrix[i][c] == n:\n return False\n row_start = (r//3)*3\n col_start = (c//3)*3\n #checking submatrix\n for i in range(row_start,row_start+3):\n for j in range(col_start,col_start+3):\n if matrix[i][j]==n:\n return False\n return True\n\n#function to check if we can put a\n#value in a paticular cell or not\n\n\n\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n\n\n\n\ndef solve_sudoku():\n row = 0\n col = 0\n #if all cells are assigned then the sudoku is already solved\n #pass by reference because number_unassigned will change the values of row and col\n a = number_unassigned(row, col)\n if a[2] == 0:\n return True\n row = a[0]\n col = a[1]\n #number between 1 to 9\n for i in range(1,10):\n #if we can assign i to the cell or not\n #the cell is matrix[row][col]\n if is_safe(i, row, col):\n matrix[row][col] = i\n #backtracking\n if solve_sudoku():\n return True\n #f we can't proceed with this solution\n #reassign the cell\n matrix[row][col]=0\n return False\n\n\n\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n\n\n\n\ndef solve_sudoku(sudoku_unsolved,shape):\n sudoku_image = np.zeros(shape,np.uint8)\n y=-1\n x=0\n sudoku_solved = [row[:] for row in matrix]\n factor = shape[0]//9\n for num in sudoku_unsolved:\n if (x%9)==0:\n x=0\n y+=1\n textX = int( factor*x+factor/2 )\n textY = int( factor*y+factor/2 )\n font = cv2.FONT_HERSHEY_SIMPLEX\n if num!='0':\n cv2.putText(sudoku_image,str(num),(textX,textY),font,1,(255,255,255),6)\n x+=1\n \n for i in range(10):\n cv2.line(sudoku_image,(0,factor*i),(shape[1],factor*i),(255),2,2)\n cv2.line(sudoku_image,(factor*i,0),(factor*i,shape[0]),(255),2,2)\n \n return sudoku_solved,sudoku_image\n\n\n\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n\n\n\n\ndef create_sudoku_img(sudoku_image,sudoku,sudoku_unsolved,with_lines = True):\n x=0\n y=-1\n sudoku_image = np.zeros(sudoku_image.shape,np.uint8)\n factor = sudoku_image.shape[0]//9\n for num in range(len(sudoku)):\n if (x%9)==0:\n x=0\n y+=1\n textX = int( factor*x+factor/2 )\n textY = int( factor*y+factor/2 + factor//4)\n font = cv2.FONT_HERSHEY_SIMPLEX\n if sudoku_unsolved[num] == '0':\n cv2.putText(sudoku_image,sudoku[num],(textX,textY),font,1.75,(0,255,255),4)\n x+=1\n if with_lines:\n for i in range(10):\n cv2.line(sudoku_image,(0,factor*i),(sudoku_image.shape[1],factor*i),(0),2,2)\n cv2.line(sudoku_image,(factor*i,0),(factor*i,sudoku_image.shape[0]),(0),2,2)\n return sudoku_image\n\n\n\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n#======================================================================================================================\n\n\n\n\ndef change_perspective_to_original(pts2,pts1,sudoku_image,original):\n M = cv2.getPerspectiveTransform(pts2,pts1)\n \n img = cv2.warpPerspective(sudoku_image,M,(original.shape[1],original.shape[0]))\n img = cv2.bitwise_not(img)\n img = cv2.bitwise_and(img,original)\n return img\n" }, { "alpha_fraction": 0.729468584060669, "alphanum_fraction": 0.750402569770813, "avg_line_length": 35.47058868408203, "blob_id": "0c43743d570099b347a0344e4474f725df82ea2f", "content_id": "3fb69e9d2cff544a9444bf47cffcc2db7bfbae31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 621, "license_type": "no_license", "max_line_length": 101, "num_lines": 17, "path": "/prediction.py", "repo_name": "msjha-vedi1995/sudoku-solver-with-image-processing", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nimport scipy.ndimage\nfrom skimage.feature import hog\nfrom skimage import data, color, exposure\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nimport joblib\n\nknn = joblib.load('models/knn_model.pkl')\ndef feature_extraction(image):\n return hog(color.rgb2gray(image), orientations=8, pixels_per_cell=(4, 4), cells_per_block=(7, 7))\ndef predict(img):\n df = feature_extraction(img)\n predict = knn.predict(df.reshape(1,-1))[0]\n predict_proba = knn.predict_proba(df.reshape(1,-1))\n return predict, predict_proba[0][predict]\n\n" }, { "alpha_fraction": 0.7953216433525085, "alphanum_fraction": 0.7953216433525085, "avg_line_length": 84.5, "blob_id": "31dbc979f0cf92b179500f5c6d7d9ba127c5f9ea", "content_id": "106ffaceee14e6b8c778e02abc9d4840da0c5c72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 171, "license_type": "no_license", "max_line_length": 132, "num_lines": 2, "path": "/README.md", "repo_name": "msjha-vedi1995/sudoku-solver-with-image-processing", "src_encoding": "UTF-8", "text": "# sudoku-solver-with-image-processing\nfrom a given picture, video inout of a sudoku, it detects the edges, the grid and the numbers and then uses backtracking to solve it\n" } ]
5
BXBots/AutoFilter-BXBot
https://github.com/BXBots/AutoFilter-BXBot
5ed7278329171fd1a044a641f3c9c8eb8409b4c4
084e006b31ba21f39e92e7921ee44c042dae3043
070a8f9b46b5ee205066ab4f979525c80b202e56
refs/heads/main
2023-06-04T04:43:38.528941
2021-06-24T08:04:05
2021-06-24T08:04:05
378,854,154
1
0
MIT
2021-06-21T08:06:34
2021-06-10T12:36:53
2021-06-10T12:36:51
null
[ { "alpha_fraction": 0.6078431606292725, "alphanum_fraction": 0.6078431606292725, "avg_line_length": 18.34482765197754, "blob_id": "daccad8eca09dde5a544748b005527e78bb8d1c9", "content_id": "a07d241900ba5590bd803aeca142802ebdea8094", "detected_licenses": [ "MIT", "LicenseRef-scancode-free-unknown" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 682, "license_type": "permissive", "max_line_length": 84, "num_lines": 29, "path": "/script.py", "repo_name": "BXBots/AutoFilter-BXBot", "src_encoding": "UTF-8", "text": "class script(object):\n\n\n START_MSG = \"\"\"Hai {}\nഞാൻ Mallu Movies ഗ്രൂപ്പിൽ വർക്ക് ചെയ്യുന്ന ഒരു അടിപൊളി AutoFilter ബോട്ടാണ്. \n\n`You Can't Use Me In Your Groups`\n\n**Please Join Our Movie GROUP 🔻**\n🎖@MalluRockerzzHD\n\"\"\"\n\n\n HELP_MSG = \"\"\"Hai \n Just Add Me Your Groups 🤩. Iam a Best Assistant Filter Bot For You.. \n @MalluRockerzzHD\n\"\"\"\n\n\n ABOUT_MSG = \"\"\"\n\n🛡️<b> Movie Channel:</b> <a href='https://t.me/MalluRockerzzHD'>🔰CLICK TO JOIN🔰</a> \n🛡️<b> Movie Group :</b> <a href='https://t.me/MalluMovies_Grp'>🔰CLICK TO JOIN🔰</a> \n\n\n\n@MalluRockerzzHD\n\n\"\"\"\n" } ]
1
ermidebebe/Twitter-Data-Analysis
https://github.com/ermidebebe/Twitter-Data-Analysis
a03e50996b993cbd6a248d88199adecce7db2725
d130d8f58dafbb64e0f5d2a4793ec61a1bad2aa8
efe8e53c0d327ffe549b768f6d240a354bd3c281
refs/heads/main
2023-06-01T00:57:59.174080
2021-06-22T19:24:46
2021-06-22T19:24:46
378,941,072
0
0
null
2021-06-21T13:32:04
2021-06-22T07:54:53
2021-06-22T19:24:46
Python
[ { "alpha_fraction": 0.5935580134391785, "alphanum_fraction": 0.5947202444076538, "avg_line_length": 35.06586837768555, "blob_id": "6140f167e4b36093bacd15f2728626d93f4d5856", "content_id": "51286caddf5be4740414ad9ed6faa9b1639ff9b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6023, "license_type": "no_license", "max_line_length": 140, "num_lines": 167, "path": "/extract_dataframe.py", "repo_name": "ermidebebe/Twitter-Data-Analysis", "src_encoding": "UTF-8", "text": "import json\nimport pandas as pd\nfrom textblob import TextBlob\nimport sys\n\nsys.path.insert(0, '\\\\')\n\n\ndef read_json(json_file: str) -> list:\n \"\"\"\n json file reader to open and read json files into a list\n Args:\n -----\n json_file: str - path of a json file\n \n Returns\n -------\n length of the json file and a list of json\n \"\"\"\n\n tweets_data = []\n for tweets in open(json_file, 'r'):\n tweets_data.append(json.loads(tweets))\n\n return len(tweets_data), tweets_data\n\n\nclass TweetDfExtractor:\n \"\"\"\n this function will parse tweets json into a pandas dataframe\n \n Return\n ------\n dataframe\n \"\"\"\n\n def __init__(self, tweets_list):\n\n self.tweets_list = tweets_list\n\n # an example function\n def find_statuses_count(self) -> list:\n statuses_count = [x['statuses_count'] for x in self.tweets_list]\n return list(statuses_count)\n\n def find_full_text(self) -> list:\n text=[]\n for x in self.tweets_list:\n if 'retweeted_status' in x:\n if 'extended_tweet' in x['retweeted_status']:\n text.append(x['retweeted_status']['extended_tweet']['full_text'])\n else:\n text.append(x['retweeted_status']['text'])\n else:\n text.append(x['text'])\n return text\n\n def find_sentiments(self, text) -> list:\n polarity = []\n subjectivity = []\n for i in text:\n blob = TextBlob(i)\n sentiment = blob.sentiment\n polarity.append(sentiment.polarity)\n subjectivity.append(sentiment.subjectivity)\n return [polarity, subjectivity]\n\n def find_created_time(self) -> list:\n\n return [x['created_at'] for x in self.tweets_list]\n\n def find_source(self) -> list:\n source = [x['source'] for x in self.tweets_list]\n\n return source\n\n def find_screen_name(self) -> list:\n screen_name = [x['user']['screen_name'] for x in self.tweets_list]\n return screen_name\n\n def find_followers_count(self) -> list:\n followers_count = [x['user']['followers_count'] for x in self.tweets_list]\n return followers_count\n\n def find_friends_count(self) -> list:\n friends_count = [x['user']['friends_count'] for x in self.tweets_list]\n return friends_count\n\n def is_sensitive(self) -> list:\n is_sensitive = [x['possibly_sensitive'] if 'possibly_sensitive' in x else 'None' for x in self.tweets_list]\n return is_sensitive\n\n def find_favourite_count(self) -> list:\n return [x['retweeted_status']['favorite_count'] if 'retweeted_status'in x else x['favorite_count'] for x in self.tweets_list]\n\n def find_retweet_count(self) -> list:\n retweet_count = [x['retweeted_status']['retweet_count'] if 'retweeted_status'in x else x['retweet_count'] for x in self.tweets_list]\n return retweet_count\n\n def find_hashtags(self) -> list:\n hashtags=[]\n for x in self.tweets_list:\n if len(x['entities']['hashtags'])>0:\n hashtags.append('\\n'.join([i['text'] for i in x['entities']['hashtags']]))\n else:\n hashtags.append('')\n return hashtags\n\n def find_mentions(self) -> list:\n mensions=[]\n for x in self.tweets_list:\n if len(x['entities']['user_mentions'])>0:\n mensions.append('\\n'.join([i['screen_name'] for i in x['entities']['user_mentions'] ]))\n else:\n mensions.append('')\n return mensions\n def find_location(self) -> list:\n location = [x['user']['location'] if 'user' in x and 'location' in x['user'] else '' for x in self.tweets_list]\n return location\n\n def find_lang(self):\n return [x['lang'] for x in self.tweets_list]\n\n def get_tweet_df(self, save=False) -> pd.DataFrame:\n \"\"\"required column to be generated you should be creative and add more features\"\"\"\n\n columns = ['created_at', 'source', 'original_text', 'polarity', 'subjectivity', 'lang', 'favorite_count',\n 'retweet_count',\n 'original_author', 'followers_count', 'friends_count', 'possibly_sensitive', 'hashtags',\n 'user_mentions', 'place']\n\n created_at = self.find_created_time()\n source = self.find_source()\n text = self.find_full_text()\n polarity, subjectivity = self.find_sentiments(text)\n lang = self.find_lang()\n fav_count = self.find_favourite_count()\n retweet_count = self.find_retweet_count()\n screen_name = self.find_screen_name()\n follower_count = self.find_followers_count()\n friends_count = self.find_friends_count()\n sensitivity = self.is_sensitive()\n hashtags = self.find_hashtags()\n mentions = self.find_mentions()\n location = self.find_location()\n data = zip(created_at, source, text, polarity, subjectivity, lang, fav_count, retweet_count, screen_name,\n follower_count, friends_count, sensitivity, hashtags, mentions, location)\n df = pd.DataFrame(data=data, columns=columns)\n\n if save:\n df.to_csv('processed_tweet_data.csv', index=False)\n print('File Successfully Saved.!!!')\n\n return df\n\n\nif __name__ == \"__main__\":\n # required column to be generated you should be creative and add more features\n columns = ['created_at', 'source', 'original_text', 'clean_text', 'sentiment', 'polarity', 'subjectivity', 'lang',\n 'favorite_count', 'retweet_count',\n 'original_author', 'screen_count', 'followers_count', 'friends_count', 'possibly_sensitive', 'hashtags',\n 'user_mentions', 'place', 'place_coord_boundaries']\n _, tweet_list = read_json(\"data/covid19.json\")\n print(tweet_list[0:5])\n tweet = TweetDfExtractor(tweet_list)\n tweet_df = tweet.get_tweet_df()\n # use all defined functions to generate a dataframe with the specified columns above\n" } ]
1
hmudimi/Junit_xml_allure_testreport
https://github.com/hmudimi/Junit_xml_allure_testreport
3efc078ad56d2e4559ae7b73ffeae13a42ce80f4
8fd20e1153c1531b0a077a234685c265adda6945
d820a7abffd69b06e6c95f72efaddc555cf679e9
refs/heads/master
2020-09-09T07:43:05.623204
2019-11-06T18:12:19
2019-11-06T18:12:19
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7279293537139893, "alphanum_fraction": 0.7560192346572876, "avg_line_length": 52.956520080566406, "blob_id": "364085d22406fa97cbd1dccd91d445c1e778e9aa", "content_id": "829b07ed7e9833b7af5136cca0972227e33f4686", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1246, "license_type": "no_license", "max_line_length": 172, "num_lines": 23, "path": "/README.md", "repo_name": "hmudimi/Junit_xml_allure_testreport", "src_encoding": "UTF-8", "text": "# Junit_xml_allure_testreport\nCreating the junit xml files using the python junitparser . and generate the test report using the allure \n\n## Some links for more in depth learning and installation\n### installation in windows\n* [scoop installation in windows](https://www.onmsft.com/how-to/how-to-install-the-scoop-package-manager-in-windows-10) A website for instalation process description.\n* [JDK installation](https://www.oracle.com/technetwork/java/javase/downloads/jdk13-downloads-5672538.html) A website for instalation of JDK.\n* [JAVA_HOME setup](https://stackoverflow.com/questions/2619584/how-to-set-java-home-on-windows-7/17142065#17142065) Information to set up Java_home in environmental setup.\n\nThe Powershell Flow commands\n==================\n- Set-ExecutionPolicy RemoteSigned -scope CurrentUser\n- iex (new-object net.webclient).downloadstring('https://get.scoop.sh')\n- scoop install allure\n- allure --version\n- allure serve </Folder path of xml files>\n\nCreating Junit xml test suite files\n------------\nUsing the python packages we can create the xml files\n- Pip install below packages\n - pip install junitparser {{https://pypi.org/project/junitparser/}}\n - pip install junit-xml {{https://pypi.org/project/junit-xml/}}\n \n" }, { "alpha_fraction": 0.7042514085769653, "alphanum_fraction": 0.7264325618743896, "avg_line_length": 26.578947067260742, "blob_id": "c3d96b0ada2219022d3dcd2b9d92b9242205b469", "content_id": "e9951e55790a22cc7929cbe4010bdb2d1655df70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 541, "license_type": "no_license", "max_line_length": 69, "num_lines": 19, "path": "/junit_test_file_create.py", "repo_name": "hmudimi/Junit_xml_allure_testreport", "src_encoding": "UTF-8", "text": "from junitparser import TestCase, TestSuite, JUnitXml, Skipped, Error\r\n\r\n# Create cases\r\ncase1 = TestCase('case1')\r\ncase1.result = Skipped()\r\ncase2 = TestCase('case2')\r\ncase2.result = Error('Example error message', 'the_error_type')\r\n\r\n# Create suite and add cases\r\nsuite = TestSuite('suite1')\r\nsuite.add_property('build', '55')\r\nsuite.add_testcase(case1)\r\nsuite.add_testcase(case2)\r\nsuite.remove_testcase(case2)\r\n\r\n# Add suite to JunitXml\r\nxml = JUnitXml()\r\nxml.add_testsuite(suite)\r\nxml.write('C:/Users/RAG/Desktop/venky-python/junit.xml')" }, { "alpha_fraction": 0.692052960395813, "alphanum_fraction": 0.7152317762374878, "avg_line_length": 48.33333206176758, "blob_id": "477ba46ef0b60278ac8b352c52adc5b5ffc60a99", "content_id": "85ad8408fbd39421924a4947de1a5b3b915d11d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "no_license", "max_line_length": 92, "num_lines": 6, "path": "/junit_xml.py", "repo_name": "hmudimi/Junit_xml_allure_testreport", "src_encoding": "UTF-8", "text": "from junit_xml import TestSuite, TestCase\r\n\r\ntest_cases = [TestCase('Test1', 'some.class.name', 123.345, 'I am stdout!', 'I am stderr!')]\r\nts = TestSuite(\"my test suite\", test_cases)\r\n# pretty printing is on by default but can be disabled using prettyprint=False\r\nprint(TestSuite.to_xml_string([ts]))\r\n" } ]
3
ToddTurnbull/reload
https://github.com/ToddTurnbull/reload
8f2746cfb88c380d1d93af01bbae502995033cc1
d2e82c76c1d7c19103c79012a28c2ca2a242a386
b4fed0da317dd37d64c964929ecee27448e6a9d9
refs/heads/master
2021-11-23T12:50:32.240030
2021-11-05T14:08:09
2021-11-05T14:08:09
57,924,958
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5927889943122864, "alphanum_fraction": 0.6831035614013672, "avg_line_length": 75.45945739746094, "blob_id": "ca164035b119419f698e55ffcc29e41f38cdcb1b", "content_id": "b9092d6ac5ca755fe044c27dcc05b751215fff10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 5658, "license_type": "no_license", "max_line_length": 90, "num_lines": 74, "path": "/sql/init_scripts/03-exec-copy.sql", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "SET search_path TO tempdb;\n\\copy data from '../unload/172.dat' csv quote '''' encoding 'WIN1252';\n\\copy thes from '../unload/179.dat' csv quote '''' encoding 'WIN1252';\n\\copy thes_cat from '../unload/180.dat' csv quote '''' encoding 'WIN1252';\n\\copy thes_tree from '../unload/181.dat' csv quote '''' encoding 'WIN1252';\n\\copy city from '../unload/187.dat' csv quote '''' encoding 'WIN1252';\n\\copy pub from '../unload/188.dat' csv quote '''' encoding 'WIN1252';\n\\copy thes_related from '../unload/189.dat' csv quote '''' encoding 'WIN1252';\n\\copy thes_reject from '../unload/191.dat' csv quote '''' encoding 'WIN1252';\n\\copy tlkpAddressType from '../unload/198.dat' csv quote '''' encoding 'WIN1252';\n\\copy tblAddress from '../unload/199.dat' csv quote '''' encoding 'WIN1252';\n\\copy tlkpAccessibility from '../unload/200.dat' csv quote '''' encoding 'WIN1252';\n\\copy trelAddressAccessibility from '../unload/201.dat' csv quote '''' encoding 'WIN1252';\n\\copy tlkpCommType from '../unload/202.dat' csv quote '''' encoding 'WIN1252';\n\\copy tblComm from '../unload/203.dat' csv quote '''' encoding 'WIN1252';\n\\copy tblContact from '../unload/204.dat' csv quote '''' encoding 'WIN1252';\n\\copy tblService from '../unload/205.dat' csv quote '''' encoding 'WIN1252';\n\\copy tlkpLanguage from '../unload/206.dat' csv quote '''' encoding 'WIN1252';\n\\copy trelServiceLanguage from '../unload/207.dat' csv quote '''' encoding 'WIN1252';\n\\copy tlkpArea from '../unload/208.dat' csv quote '''' encoding 'WIN1252';\n\\copy trelServiceArea from '../unload/209.dat' csv quote '''' encoding 'WIN1252';\n\\copy tblOrgName from '../unload/210.dat' csv quote '''' encoding 'WIN1252';\n\\copy tlkpOrgNameType from '../unload/211.dat' csv quote '''' encoding 'WIN1252';\n\\copy org_names from '../unload/218.dat' csv quote '''' encoding 'WIN1252';\n\\copy org from '../unload/220.dat' csv quote '''' encoding 'WIN1252';\n\\copy org_comm_rel from '../unload/225.dat' csv quote '''' encoding 'WIN1252';\n\\copy org_address_rel from '../unload/226.dat' csv quote '''' encoding 'WIN1252';\n\\copy org_contact_rel from '../unload/227.dat' csv quote '''' encoding 'WIN1252';\n\\copy org_rel_del from '../unload/228.dat' csv quote '''' encoding 'WIN1252';\n\\copy org_service_rel from '../unload/229.dat' csv quote '''' encoding 'WIN1252';\n\\copy org_del from '../unload/230.dat' csv quote '''' encoding 'WIN1252';\n\\copy pub_org from '../unload/231.dat' csv quote '''' encoding 'WIN1252';\n\\copy thes_original from '../unload/232.dat' csv quote '''' encoding 'WIN1252';\n\\copy thes_rel from '../unload/242.dat' csv quote '''' encoding 'WIN1252';\n\\copy org_thes from '../unload/245.dat' csv quote '''' encoding 'WIN1252';\n\\copy pub_entry from '../unload/247.dat' csv quote '''' encoding 'WIN1252';\n\\copy area from '../unload/248.dat' csv quote '''' encoding 'WIN1252';\n\\copy taxonomy from '../unload/252.dat' csv quote '''' encoding 'WIN1252';\n\\copy taxRel from '../unload/254.dat' csv quote '''' encoding 'WIN1252';\n\\copy locations from '../unload/272.dat' csv quote '''' encoding 'WIN1252';\n\\copy pubGroupName from '../unload/294.dat' csv quote '''' encoding 'WIN1252';\n\\copy pubGroup from '../unload/295.dat' csv quote '''' encoding 'WIN1252';\n\\copy orgNotes from '../unload/297.dat' csv quote '''' encoding 'WIN1252';\n\\copy orgNoteTypes from '../unload/298.dat' csv quote '''' encoding 'WIN1252';\n\\copy pubThes from '../unload/299.dat' csv quote '''' encoding 'WIN1252';\n\\copy taxGroups from '../unload/341.dat' csv quote '''' encoding 'WIN1252';\n\\copy temptaxgroup from '../unload/342.dat' csv quote '''' encoding 'WIN1252';\n\\copy taxChanges from '../unload/343.dat' csv quote '''' encoding 'WIN1252';\n\\copy orgUpdated from '../unload/350.dat' csv quote '''' encoding 'WIN1252';\n\\copy taxLink from '../unload/356.dat' csv quote '''' encoding 'WIN1252';\n\\copy orgTaxLink from '../unload/357.dat' csv quote '''' encoding 'WIN1252';\n\\copy taxLinkNote from '../unload/358.dat' csv quote '''' encoding 'WIN1252';\n\\copy cioc from '../unload/361.dat' csv quote '''' encoding 'WIN1252';\n\\copy ciocExport from '../unload/362.dat' csv quote '''' encoding 'WIN1252';\n\\copy taxRelTemp from '../unload/364.dat' csv quote '''' encoding 'WIN1252';\n\\copy tempTaxNames from '../unload/374.dat' csv quote '''' encoding 'WIN1252';\n\\copy tempTaxAlso from '../unload/375.dat' csv quote '''' encoding 'WIN1252';\n\\copy tempTaxOld from '../unload/376.dat' csv quote '''' encoding 'WIN1252';\n\\copy tempTaxDetails from '../unload/377.dat' csv quote '''' encoding 'WIN1252';\n\\copy pubTax from '../unload/381.dat' csv quote '''' encoding 'WIN1252';\n\\copy ic_agencies from '../unload/382.dat' csv quote '''' encoding 'WIN1252';\n\\copy ic_agency_sites from '../unload/383.dat' csv quote '''' encoding 'WIN1252';\n\\copy ic_site_services from '../unload/384.dat' csv quote '''' encoding 'WIN1252';\n\\copy pub_tree from '../unload/386.dat' csv quote '''' encoding 'WIN1252';\n\\copy site from '../unload/387.dat' csv quote '''' encoding 'WIN1252';\n\\copy org_tree from '../unload/390.dat' csv quote '''' encoding 'WIN1252';\n\\copy org_site from '../unload/391.dat' csv quote '''' encoding 'WIN1252';\n\\copy org_site_name from '../unload/392.dat' csv quote '''' encoding 'WIN1252';\n\\copy org_thes_pub from '../unload/394.dat' csv quote '''' encoding 'WIN1252';\n\\copy tempTaxActive from '../unload/395.dat' csv quote '''' encoding 'WIN1252';\n\\copy tempCCAC from '../unload/396.dat' csv quote '''' encoding 'WIN1252';\n\\copy contact_comm from '../unload/398.dat' csv quote '''' encoding 'WIN1252';\n\\copy external from '../unload/399.dat' csv quote '''' encoding 'WIN1252';\n\\copy external_data from '../unload/400.dat' csv quote '''' encoding 'WIN1252';\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.79347825050354, "avg_line_length": 22, "blob_id": "9fcc0e1d730034a56b16bcbf9b0f67d26ac68137", "content_id": "16117604dea60d00bd1d4d7156b8a63f761fe19e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 92, "license_type": "no_license", "max_line_length": 54, "num_lines": 4, "path": "/Dockerfile", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "FROM postgres\n\nRUN apt-get update -y\nRUN apt-get -y install python3 postgresql-plpython3-14\n" }, { "alpha_fraction": 0.5934959053993225, "alphanum_fraction": 0.6016260385513306, "avg_line_length": 17.923076629638672, "blob_id": "117001d68ae87ab7a4b4a7163c06d906c672bdcd", "content_id": "98acaaa22f0018fe5a27410cf8dc5e79a829e7ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 246, "license_type": "no_license", "max_line_length": 55, "num_lines": 13, "path": "/docker-compose.yml", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "version: '3.7'\n\nservices:\n\n db:\n build: .\n restart: always\n environment:\n - POSTGRES_USER=postgres\n - POSTGRES_PASSWORD=postgres\n volumes:\n - ./unload:/unload/\n - ./sql/init_scripts:/docker-entrypoint-initdb.d/\n" }, { "alpha_fraction": 0.7599999904632568, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 23, "blob_id": "09e6af9c46d8084e4fe74f0dc7bd3aa915fc6f27", "content_id": "7fbddce45f3541e1f330e85fc2e69dbdf97ae6da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25, "license_type": "no_license", "max_line_length": 23, "num_lines": 1, "path": "/tests/__init__.py", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "from .context import db\n\n" }, { "alpha_fraction": 0.7326388955116272, "alphanum_fraction": 0.7465277910232544, "avg_line_length": 29.3157901763916, "blob_id": "aa6de331196221de714fb47ecb7063964a665eb7", "content_id": "4c2de56de00c102eecd2e479f9a031e8f23536ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 576, "license_type": "no_license", "max_line_length": 99, "num_lines": 19, "path": "/readme.md", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "# Convert a database from Sybase Adaptive Server Anywhere 6 to PostgreSQL 9\n\n## Running the PostgreSQL database with Docker\n\n1. Ensure that you have Docker Compose > 1.25\n2. Place your .dat files (the data to be copied into Postgres) in the `./unload` directory\n3. Run `docker-compose up`\n\nOnce the database has been initialized, you can access the data through the command-line like this:\n\n```\ndocker-compose exec db psql -U postgres\n```\n\nBefore you run any queries on the database, make sure to set the right schema in `psql`:\n\n```\npostgres=# SET search_path TO tempdb;\n```\n" }, { "alpha_fraction": 0.6626870632171631, "alphanum_fraction": 0.6751760840415955, "avg_line_length": 27.759492874145508, "blob_id": "2f9a30555b98c60c38dec3c19d416cbce9bf58ac", "content_id": "9650ae0a6677080c993cb37937cc279eea551826", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36352, "license_type": "no_license", "max_line_length": 99, "num_lines": 1264, "path": "/db/models/__init__.py", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "from sqlalchemy import Boolean\nfrom sqlalchemy import CheckConstraint\nfrom sqlalchemy import Column\nfrom sqlalchemy import Date\nfrom sqlalchemy import DateTime\nfrom sqlalchemy import DECIMAL\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy import func\nfrom sqlalchemy import Index\nfrom sqlalchemy import Integer\nfrom sqlalchemy import PrimaryKeyConstraint\nfrom sqlalchemy import Sequence\nfrom sqlalchemy import String\nfrom sqlalchemy import Text\nfrom sqlalchemy import UniqueConstraint\n\nfrom sqlalchemy.ext.declarative import declarative_base\n\nfrom sqlalchemy.orm import relationship\n\nfrom functools import partial\ndef name_table(table, schema=None):\n \"\"\"Return a schema qualified table name\"\"\"\n return \"{}.{}\".format(schema, table) if schema else table\n\nwith_schema = partial(name_table, schema=\"tempdb\") # to do: use config.schema\n \nBase = declarative_base()\nBase.metadata.schema = \"tempdb\" # to do: use config.schema\nBase.metadata.naming_convention = {\n \"ix\": \"%(table_name)s_%(column_0_name)s_index\",\n \"uq\": \"%(table_name)s_%(column_0_name)s_key\",\n # \"ck\": \"%(table_name)s_%(column_0_name)s_check\",\n \"fk\": \"%(table_name)s_%(column_0_name)s_%(referred_table_name)s_%(referred_column_0_name)s_fkey\",\n \"pk\": \"%(table_name)s_pkey\"\n}\n\nclass Data(Base):\n __tablename__ = \"data\"\n\n # Columns\n recordnumber = Column(String(5), nullable=False, unique=True)\n internalmemo = Column(Text)\n comments = Column(Text)\n recnum = Column(\n String(7),\n CheckConstraint(\n \"left(RecNum, 3) = 'WRN'\",\n name = \"data_recnum_check\"\n ),\n nullable=False,\n unique=True\n )\n org1 = Column(String(100))\n org2 = Column(String(70))\n org3 = Column(String(70))\n org4 = Column(String(70))\n org5 = Column(String(70))\n altorg = Column(Text)\n formerorg = Column(Text)\n xref = Column(Text)\n streetbuilding = Column(String(90))\n streetaddress = Column(String(90))\n streetcity = Column(String(40))\n mailcareof = Column(String(60))\n building = Column(String(90))\n address = Column(String(90))\n city = Column(String(40))\n province = Column(String(25))\n postalcode = Column(String(7))\n accessibility = Column(Text)\n location = Column(String(60))\n intersection = Column(String(60))\n officephone = Column(Text)\n fax = Column(Text)\n email = Column(Text)\n www = Column(String(255))\n afterhoursphone = Column(Text)\n crisisphone = Column(Text)\n tddphone = Column(Text)\n data = Column(String(30))\n description = Column(Text)\n pubdescription = Column(Text)\n generalinfo = Column(Text)\n bnd = Column(Text)\n otherresource = Column(Text)\n fees = Column(Text)\n hours = Column(Text)\n dates = Column(Text)\n areaserved = Column(Text)\n eligibility = Column(Text)\n application = Column(Text)\n languages = Column(Text)\n contact1 = Column(String(60))\n contact1title = Column(String(120))\n contact1org = Column(String(90))\n contact1phone = Column(Text)\n contact2 = Column(String(60))\n contact2title = Column(String(120))\n printedmaterial = Column(Text)\n contact2org = Column(String(90))\n contact2phone = Column(Text)\n contact3 = Column(String(60))\n contact3title = Column(String(120))\n contact3org = Column(String(90))\n contact3phone = Column(Text)\n contact4 = Column(String(60))\n contact4title = Column(String(120))\n contact4org = Column(String(90))\n contact4phone = Column(Text)\n dateestablished = Column(String(60))\n elections = Column(String(120))\n funding = Column(Text)\n ddcode = Column(String(10))\n levelofservice = Column(String(60))\n subject = Column(Text)\n usedfor = Column(Text)\n blue = Column(Text)\n seealso = Column(Text)\n localsubjects = Column(Text)\n typeofrecord = Column(String(2))\n qualitylevel = Column(String(20))\n tobedeleted = Column(String(20))\n distribution = Column(Text)\n pub = Column(Text)\n sourceofinfo = Column(String(60))\n sourcetitle = Column(String(60))\n sourceorg = Column(String(60))\n sourcebuilding = Column(String(30))\n sourceaddress = Column(String(60))\n sourcecity = Column(String(30))\n sourceprovince = Column(String(2))\n sourcepostalcode = Column(String(7))\n sourcephone = Column(Text)\n collectedby = Column(String(40))\n datecollected = Column(String(10))\n createdby = Column(String(40))\n updatedby = Column(String(40))\n updatedate = Column(String(10))\n updateschedule = Column(String(10))\n historyofupdate = Column(String(10))\n lastmodified = Column(Text)\n org1_sort = Column(String(100))\n id = Column(Integer, primary_key=True)\n org_name_id = Column(Integer, nullable=False)\n\n# delete?\nclass Thes(Base):\n __tablename__ = \"thes\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n term = Column(String(60), nullable=False, index=True)\n note = Column(Text, nullable=False)\n action = Column(String(6))\n cat_id = Column(Integer, ForeignKey(\"thes_cat.id\"))\n sort = Column(String(6))\n\n# delete?\nclass ThesCat(Base):\n __tablename__ = \"thes_cat\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n category = Column(String(30), nullable=False)\n\n# delete?\nclass ThesTree(Base):\n __tablename__ = \"thes_tree\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n term = Column(Text, nullable=False)\n parent_id = Column(Integer, ForeignKey(\"thes.id\"))\n cat_id = Column(Integer, nullable=False)\n\n# delete?\nclass City(Base):\n __tablename__ = \"city\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n city = Column(String(20), nullable=False)\n\nclass Pub(Base):\n __tablename__ = \"pub\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n code = Column(String(20), nullable=False, unique=True)\n title = Column(String(50), nullable=False, index=True)\n isdefault = Column(Boolean, nullable=False, default=False)\n lastupdated = Column(DateTime)\n note = Column(Text)\n\n # Relationships\n taxonomy = relationship( # many-to-many\n \"TaxLinkNote\",\n secondary = with_schema(\"pubtax\")\n )\n\n# delete?\nclass ThesRelated(Base):\n __tablename__ = \"thes_related\"\n __table_args__ = (\n PrimaryKeyConstraint(\"thes_id\", \"related_id\"),\n )\n\n # Columns\n thes_id = Column(Integer, ForeignKey(\"thes.id\"), nullable=False)\n related_id = Column(Integer, ForeignKey(\"thes.id\"), nullable=False)\n\n# delete?\nclass ThesReject(Base):\n __tablename__ = \"thes_reject\"\n # SQLAlchemy needs a primary key\n __table_args__ = (\n PrimaryKeyConstraint(\"thes_id\", \"accept_id\"),\n )\n\n # Columns\n thes_id = Column(Integer, ForeignKey(\"thes.id\"), nullable=False)\n accept_id = Column(Integer, ForeignKey(\"thes.id\"), nullable=False)\n\nclass AddressType(Base):\n __tablename__ = \"tlkpaddresstype\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n name = Column(String(50), nullable=False)\n\nclass Address(Base):\n __tablename__ = \"tbladdress\"\n __table_args__ = (\n CheckConstraint(\"\"\"\n (utm_x is null and utm_y is null)\n or\n (utm_x is not null and utm_y is not null)\n or\n (latitude is null and longitude is null)\n or\n (latitude is not null and longitude is not null)\n \"\"\",\n name = \"tbladdress_check\"\n ),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n addresstypeid = Column(Integer, ForeignKey(\"tlkpaddresstype.id\"), nullable=False)\n incareof = Column(String(60))\n building = Column(String(50))\n address = Column(String(50))\n city = Column(String(50), nullable=False)\n province = Column(String(2), default=\"ON\")\n postalcode = Column(\n String(7),\n CheckConstraint(\n \"postalcode ~* '[a-z][0-9][a-z] [0-9][a-z][0-9]'\",\n name = \"tbladdress_postalcode_check\"\n )\n )\n intersection = Column(String(255))\n unit = Column(String(10))\n unitvalue = Column(String(10))\n streetnumber = Column(String(10))\n streetsuffix = Column(String(10))\n streetdirection = Column(String(2))\n unitextra = Column(String(25))\n deliverynumber = Column(String(10))\n deliverystation = Column(String(30))\n deliverymode = Column(String(20))\n busroute = Column(String(50))\n utm_x = Column(Integer)\n utm_y = Column(Integer)\n ismappable = Column(Boolean)\n latitude = Column(DECIMAL(11,9))\n longitude = Column(DECIMAL(11,9))\n\n # Relationships\n type = relationship(\"AddressType\") # many-to-one\n access = relationship(\n \"Accessibility\",\n secondary= with_schema(\"treladdressaccessibility\"),\n uselist=False # one-to-one\n )\n org = relationship(\n \"Org\",\n secondary = with_schema(\"org_address_rel\"),\n uselist = False # Org-to-Address is one-to-many\n )\n\nclass Accessibility(Base):\n __tablename__ = \"tlkpaccessibility\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n name = Column(String(100), nullable=False)\n\nclass AddressAccessibility(Base):\n __tablename__ = \"treladdressaccessibility\"\n\n # Columns\n addressid = Column(Integer, ForeignKey(\"tbladdress.id\"), primary_key=True)\n accessibilityid = Column(Integer, ForeignKey(\"tlkpaccessibility.id\"), nullable=False)\n\nclass CommType(Base):\n __tablename__ = \"tlkpcommtype\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n name = Column(String(50), nullable=False, unique=True)\n\nclass Comm(Base):\n __tablename__ = \"tblcomm\"\n __table_args__ = (\n CheckConstraint(\"\"\"\n (commtypeid in (1, 2, 3, 5, 6) and value ~* '[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]')\n or\n (commtypeid = 2 and value = '911')\n or\n (commtypeid = 4 and value like '_%@%.%')\n or\n (commtypeid = 7 and value like '%.__%')\n or\n commtypeid > 7\n \"\"\",\n name = \"tblcomm_check\"\n ),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n commtypeid = Column(Integer, ForeignKey(\"tlkpcommtype.id\"), nullable=False)\n value = Column(String(255), nullable=False, index=True)\n comment = Column(Text)\n\n # Relationships\n type = relationship(\"CommType\") # many-to-one\n org = relationship(\n \"Org\",\n secondary = with_schema(\"org_comm_rel\"),\n uselist = False # Org-to-Comm is one-to-many\n )\n\nclass Contact(Base):\n __tablename__ = \"tblcontact\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n name = Column(String(60))\n title = Column(String(120))\n org = Column(String(90))\n comm = Column(Text)\n contacttype = Column(Integer, default=0, index=True)\n\n # Relationships\n org = relationship(\n \"Org\",\n secondary = with_schema(\"org_contact_rel\"),\n uselist = False # Org-to-Contact is one-to-many\n )\n comms = relationship(\n \"Comm\",\n secondary = with_schema(\"contact_comm\")\n )\n\nclass Service(Base):\n __tablename__ = \"tblservice\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n description = Column(Text)\n eligibility = Column(Text)\n info = Column(Text)\n fees = Column(Text)\n hours = Column(Text)\n dates = Column(Text)\n application = Column(Text)\n updated = Column(DateTime)\n ciocdescription = Column(Text)\n cioceligibility = Column(Text)\n ciocapplication = Column(Text)\n\n # Relationships\n language = relationship(\n \"Language\",\n secondary = with_schema(\"trelservicelanguage\"),\n uselist = False # one-to-one\n )\n area = relationship(\n \"Area\",\n secondary = with_schema(\"trelservicearea\"),\n uselist = False # one-to-one\n )\n org = relationship(\n \"Org\",\n secondary = with_schema(\"org_service_rel\"),\n uselist = False # Org-to-Service is one-to-one\n )\n\nclass Language(Base):\n __tablename__ = \"tlkplanguage\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n name = Column(Text, nullable=False)\n\nclass ServiceLanguage(Base):\n __tablename__ = \"trelservicelanguage\"\n __table_args__ = (\n PrimaryKeyConstraint(\"serviceid\", \"languageid\"),\n )\n\n # Columns\n serviceid = Column(Integer, ForeignKey(\"tblservice.id\"), nullable=False)\n languageid = Column(Integer, ForeignKey(\"tlkplanguage.id\"), nullable=False)\n\nclass Area(Base): # see also Areas for area\n __tablename__ = \"tlkparea\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n name = Column(Text, nullable=False)\n\nclass ServiceArea(Base):\n __tablename__ = \"trelservicearea\"\n __table_args__ = (\n PrimaryKeyConstraint(\"serviceid\", \"areaid\"),\n )\n\n # Columns\n serviceid = Column(Integer, ForeignKey(\"tblservice.id\"), nullable=False)\n areaid = Column(Integer, ForeignKey(\"tlkparea.id\"), nullable=False)\n\nclass OrgName(Base):\n __tablename__ = \"tblorgname\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n orgnametypeid = Column(Integer, ForeignKey(\"tlkporgnametype.id\"), nullable=False)\n name = Column(String(100), nullable=False, index=True)\n parentid = Column(Integer, ForeignKey(\"tblorgname.id\"))\n level = Column(Integer)\n sort = Column(String(100), index=True)\n sort_key = Column(String(100), index=True)\n added = Column(DateTime, default=func.now())\n\n # Relationships\n type = relationship(\"OrgNameType\") # many-to-one\n org = relationship(\n \"Org\",\n secondary = with_schema(\"org_names\"),\n back_populates = \"names\",\n uselist = False # Org-to-Orgname is one-to-many\n )\n\nclass OrgNameType(Base):\n __tablename__ = \"tlkporgnametype\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n type = Column(String(20), nullable=False)\n\nclass OrgNames(Base):\n __tablename__ = \"org_names\"\n __table_args__ = (\n UniqueConstraint(\"org_id\", \"org_name_id\"),\n Index(\"org_names_org_name_id_org_id_index\", \"org_name_id\", \"org_id\")\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n org_id = Column(Integer, ForeignKey(\"org.id\"), nullable=False, index=True)\n org_name_id = Column(Integer, ForeignKey(\"tblorgname.id\"), nullable=False, index=True)\n added = Column(DateTime, default=func.now())\n \n # Relationships\n name = relationship(\"OrgName\") # many-to-one\n\nclass Org(Base):\n __tablename__ = \"org\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n org_name_id = Column(Integer, ForeignKey(\"tblorgname.id\"), nullable=False)\n update_note = Column(Text)\n cic_id = Column(String(7), nullable=False, unique=True)\n updated = Column(DateTime, default=func.now())\n service_level = Column(String(60), nullable=False)\n created = Column(DateTime, nullable=False, default=func.now(), index=True)\n isactive = Column(Boolean, nullable=False, default=True, index=True)\n iscomplete = Column(Boolean, nullable=False, default=False, index=True)\n modified = Column(DateTime)\n established = Column(\n String(4),\n CheckConstraint(\n \"established ~* '[1-2][0-9][0-9][0-9]'\",\n name = \"org_established_check\"\n )\n )\n bn = Column(\n String(15),\n CheckConstraint(\n \"bn ~* '[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]RR[0-9][0-9][0-9][0-9]'\",\n name = \"org_bn_check\"\n )\n )\n deleted = Column(DateTime)\n\n # Relationships\n names = relationship( # official names, one-to-many\n \"OrgName\",\n secondary = with_schema(\"org_names\"),\n back_populates = \"org\",\n primaryjoin = \"and_(Org.id == OrgNames.org_id, OrgName.orgnametypeid == 1)\",\n order_by = \"OrgName.level\"\n )\n alt_names = relationship( # one-to-many\n \"OrgName\",\n secondary = \"tempdb.org_names\",\n back_populates = \"org\",\n primaryjoin = \"and_(Org.id == OrgNames.org_id, OrgName.orgnametypeid != 1)\"\n )\n comms = relationship( # one-to-many\n \"Comm\",\n secondary = with_schema(\"org_comm_rel\"),\n back_populates = \"org\"\n )\n addresses = relationship( # one-to-many\n \"Address\",\n secondary = with_schema(\"org_address_rel\"),\n back_populates = \"org\"\n )\n contacts = relationship( # one-to-many\n \"Contact\",\n secondary = with_schema(\"org_contact_rel\"),\n back_populates = \"org\"\n )\n service = relationship(\n \"Service\",\n secondary = with_schema(\"org_service_rel\"),\n uselist = False # Org-to-Service is one-to-one\n )\n # http://docs.sqlalchemy.org/en/rel_1_0/orm/basic_relationships.html#association-object\n pubs = relationship(\"PubOrg\") # one-to-many\n thes_all = relationship( # many-to-many\n \"Thesaurus\",\n secondary = with_schema(\"org_thes\"),\n secondaryjoin = \"OrgThes.thes_id == Thesaurus.id\"\n )\n thes_official = relationship( # many-to-many\n \"Thesaurus\",\n secondary = with_schema(\"org_thes\"),\n secondaryjoin = \"and_(OrgThes.thes_id == Thesaurus.id, OrgThes.thes_id == OrgThes.official_id)\"\n )\n notes = relationship(\"OrgNotes\") # one-to-many\n updates = relationship(\"OrgUpdated\") # one-to-many\n taxonomy_links = relationship(\n \"TaxLinkNote\",\n secondary = with_schema(\"orgtaxlink\")\n )\n ic_agency = relationship( # one-to-one\n \"ICAgency\",\n uselist = False,\n back_populates = \"org\"\n )\n ic_site = relationship( # one-to-one\n \"ICSite\",\n uselist = False,\n back_populates = \"org\"\n )\n ic_service = relationship( # one-to-one\n \"ICService\",\n uselist = False,\n back_populates = \"org\"\n )\n sites = relationship(\"OrgSite\") # one-to-many\n\nclass OrgComm(Base):\n __tablename__ = \"org_comm_rel\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n org_id = Column(Integer, ForeignKey(\"org.id\"), nullable=False)\n comm_id = Column(Integer, ForeignKey(\"tblcomm.id\"), nullable=False)\n added = Column(DateTime, nullable=False, default=func.now())\n note = Column(Text)\n\nclass OrgAddress(Base):\n __tablename__ = \"org_address_rel\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n org_id = Column(Integer, ForeignKey(\"org.id\"), nullable=False)\n address_id = Column(Integer, ForeignKey(\"tbladdress.id\"), nullable=False)\n added = Column(DateTime, nullable=False, default=func.now())\n note = Column(String(100))\n label = Column(String(50))\n\nclass OrgContact(Base):\n __tablename__ = \"org_contact_rel\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n org_id = Column(Integer, ForeignKey(\"org.id\"), nullable=False)\n contact_id = Column(Integer, ForeignKey(\"tblcontact.id\"), nullable=False)\n added = Column(DateTime, nullable=False, default=func.now())\n note = Column(Text)\n\nclass OrgRelatedDeletions(Base):\n __tablename__ = \"org_rel_del\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n org_id = Column(Integer, nullable=False)\n rel_id = Column(Integer, nullable=False)\n added = Column(DateTime, nullable=False)\n note = Column(Text)\n deleted = Column(DateTime, nullable=False)\n table_id = Column(Integer, nullable=False)\n\nclass OrgService(Base):\n __tablename__ = \"org_service_rel\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n org_id = Column(Integer, ForeignKey(\"org.id\"), nullable=False)\n service_id = Column(Integer, ForeignKey(\"tblservice.id\"), nullable=False)\n added = Column(DateTime, nullable=False, default=func.now())\n note = Column(Text)\n\nclass OrgDeletions(Base):\n __tablename__ = \"org_del\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n org_name_id = Column(Integer, nullable=False)\n update_note = Column(Text)\n cic_id = Column(String(7), nullable=False, unique=True)\n updated = Column(DateTime)\n service_level = Column(String(60))\n\nclass PubOrg(Base):\n __tablename__ = \"pub_org\"\n __table_args__ = (\n UniqueConstraint(\"pub_id\", \"org_id\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n pub_id = Column(Integer, ForeignKey(\"pub.id\"), nullable=False)\n org_id = Column(Integer, ForeignKey(\"org.id\"), nullable=False)\n added = Column(DateTime, nullable=False, default=func.now())\n org_contact_id = Column(\n Integer,\n # SQLAlchemy defaults to \"on delete set null\"?\n ForeignKey(\"org_contact_rel.id\", ondelete=\"set null\")\n )\n deleted = Column(DateTime)\n isactive = Column(Boolean, nullable=False, default=True)\n xml = Column(Text)\n\n # Relationships\n contact = relationship(\n \"Contact\",\n secondary = with_schema(\"org_contact_rel\"),\n uselist = False # PubOrg-to-Contact is one-to-one\n )\n # http://docs.sqlalchemy.org/en/rel_1_0/orm/basic_relationships.html#association-object\n pub = relationship(\"Pub\") # many-to-one\n\nclass Thesaurus(Base):\n __tablename__ = \"thes_original\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n de = Column(String(100), nullable = False, unique=True)\n use = Column(String(100))\n woo = Column(String(1))\n eq = Column(String(100))\n uf = Column(Text)\n sn = Column(Text)\n bt = Column(String(100))\n nt = Column(Text)\n rt = Column(String(150))\n ca = Column(String(50))\n input = Column(String(50))\n act = Column(String(10), nullable=False)\n msg = Column(String(50))\n cr = Column(String(50))\n up = Column(String(50))\n sort = Column(String(100))\n comments = Column(Text)\n\n # Relationships\n relations = relationship( # one-to-many\n \"ThesRel\",\n primaryjoin = \"Thesaurus.id == ThesRel.thes_id\"\n )\n used_fors = relationship( # one-to-many\n \"ThesRel\",\n primaryjoin = \"and_(Thesaurus.id == ThesRel.thes_id, ThesRel.rel_type == 'uf')\"\n )\n see_alsos = relationship( # one-to-many\n \"ThesRel\",\n primaryjoin = \"and_(Thesaurus.id == ThesRel.thes_id, ThesRel.rel_type == 'rt')\"\n )\n broader_terms = relationship( # one-to-many but not often\n \"ThesRel\",\n primaryjoin = \"and_(Thesaurus.id == ThesRel.thes_id, ThesRel.rel_type == 'bt')\"\n )\n\nclass ThesRel(Base):\n __tablename__ = \"thes_rel\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n thes_id = Column(Integer, ForeignKey(\"thes_original.id\"), nullable=False)\n rel_id = Column(Integer, ForeignKey(\"thes_original.id\"), nullable=False)\n rel_type = Column(String(2), nullable=False, index=True)\n ca = Column(Integer, ForeignKey(\"thes_cat.id\"))\n sort_key = Column(String(100))\n comments = Column(Text)\n\n # Relationships\n related = relationship( # many-to-one\n \"Thesaurus\",\n primaryjoin = \"ThesRel.rel_id == Thesaurus.id\"\n )\n\nclass OrgThes(Base):\n __tablename__ = \"org_thes\"\n __table_args__ = (\n UniqueConstraint(\"org_id\", \"thes_id\", \"official_id\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n org_id = Column(Integer, ForeignKey(\"org.id\"), nullable=False)\n thes_id = Column(Integer, ForeignKey(\"thes_original.id\"), nullable=False)\n official_id = Column(Integer, ForeignKey(\"thes_original.id\"), nullable=False)\n\nclass PubEntry(Base):\n __tablename__ = \"pub_entry\"\n __table_args__ = (\n UniqueConstraint(\"pub_org_id\", \"pub_year\"),\n Index(\"pub_entry_pub_year_entry_index\", \"pub_year\", \"entry\")\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n pub_org_id = Column(Integer, ForeignKey(\"pub_org.id\"), nullable=False)\n entry = Column(Integer, nullable=False)\n pub_year = Column(\n Integer,\n CheckConstraint(\n \"pub_year > 2000\",\n name = \"pub_entry_pub_year_check\"\n ),\n nullable = False\n )\n\nclass Areas(Base): # see also Area for tlkparea\n __tablename__ = \"area\"\n __table_args__ = (\n UniqueConstraint(\"name\", \"locatedin\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n name = Column(String(255), nullable=False)\n locatedin = Column(Integer, ForeignKey(\"area.id\"))\n alt = String(255)\n\n # Relationships\n surrounds = relationship(\"Areas\") # one-to-many\n surrounded_by = relationship(\"Areas\", remote_side=[id]) # many-to-one\n\nclass Taxonomy(Base):\n __tablename__ = \"taxonomy\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n name = Column(String(100), nullable=False, index=True)\n code = Column(String(19), unique=True)\n ispreferred = Column(Boolean, nullable=False)\n definition = Column(Text)\n created = Column(Date)\n modified = Column(Date, index=True)\n parentid = Column(Integer, ForeignKey(\"taxonomy.id\"))\n cicmodified = Column(DateTime)\n\n # Relationships\n relations = relationship( # one-to-many\n \"TaxRel\",\n primaryjoin = \"Taxonomy.id == TaxRel.taxid\"\n )\n\nclass TaxRel(Base):\n __tablename__ = \"taxrel\"\n __table_args__ = (\n UniqueConstraint(\"taxid\", \"relid\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n taxid = Column(Integer, ForeignKey(\"taxonomy.id\"), nullable=False)\n relid = Column(Integer, ForeignKey(\"taxonomy.id\"), nullable=False)\n reltype = Column(String(2), nullable=False)\n\n # Relationships\n related = relationship( # one-to-one\n \"Taxonomy\",\n primaryjoin = \"TaxRel.relid == Taxonomy.id\"\n )\n\nclass Locations(Base): # same as tempdb.area/Areas?\n __tablename__ = \"locations\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n officialname = Column(String(100), nullable=False)\n locatedin = Column(Integer, ForeignKey(\"locations.id\"))\n sortas = Column(String(100))\n displayas = Column(String(100))\n\nclass PubGroupName(Base):\n __tablename__ = \"pubgroupname\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n groupname = Column(String(50), nullable=False)\n\nclass PubGroup(Base):\n __tablename__ = \"pubgroup\"\n __table_args__ = (\n UniqueConstraint(\"pubid\", \"groupid\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n pubid = Column(Integer, ForeignKey(\"pub.id\"), nullable=False)\n groupid = Column(Integer, ForeignKey(\"pubgroupname.id\"), nullable=False)\n\nclass OrgNotes(Base):\n __tablename__ = \"orgnotes\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n orgid = Column(Integer, ForeignKey(\"org.id\"), nullable=False)\n notetype = Column(Integer, ForeignKey(\"orgnotetypes.id\"), nullable=False)\n note = Column(Text, nullable=False)\n added = Column(DateTime, nullable=False, default=func.now())\n modified = Column(DateTime)\n isactive = Column(Boolean, nullable=False, default=True)\n ispublic = Column(Boolean, nullable=False, default=True)\n alertdate = Column(Date)\n\n # Relationships\n type = relationship(\"OrgNoteTypes\") # many-to-one\n\nclass OrgNoteTypes(Base):\n __tablename__ = \"orgnotetypes\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n value = Column(String(30), nullable=False)\n\nclass PubThes(Base):\n __tablename__ = \"pubthes\"\n __table_args__ = (\n UniqueConstraint(\"pubid\", \"thesid\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n pubid = Column(Integer, ForeignKey(\"pub.id\"), nullable=False)\n thesid = Column(Integer, ForeignKey(\"thes_original.id\"), nullable=False)\n isactive = Column(Boolean, nullable=False, default=True)\n\nclass TaxGroups(Base):\n __tablename__ = \"taxgroups\"\n __table_args__ = (\n UniqueConstraint(\"taxgroup\", \"taxid\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n taxgroup = Column(Integer, nullable=False)\n taxid = Column(Integer, ForeignKey(\"taxonomy.id\"), nullable=False)\n isactive = Column(Boolean, nullable=False)\n haschildren = Column(Boolean, nullable=False)\n added = Column(DateTime, nullable=False, default=func.now())\n islocal = Column(Boolean, nullable=False, default=False)\n modified = Column(DateTime)\n\nclass TempTaxGroup(Base):\n __tablename__ = \"temptaxgroup\"\n # SQLAlchemy needs a primary key\n __table_args__ = (\n PrimaryKeyConstraint(\"groupid\", \"taxcode\"),\n )\n\n # Columns\n groupid = Column(Integer, nullable=False)\n taxcode = Column(String(13), nullable=False)\n\nclass TaxChanges(Base):\n __tablename__ = \"taxchanges\"\n # SQLAlchemy needs a primary key\n __table_args__ = (\n PrimaryKeyConstraint(\"changetype\", \"oldcode\", \"newcode\"),\n )\n\n # Columns\n changetype = Column(Integer, nullable=False)\n oldcode = Column(String(13), nullable=False)\n newcode = Column(String(13), nullable=False)\n oldname = Column(String(60), nullable=False)\n newname = Column(String(60), nullable=False)\n dateus = Column(String(10), nullable=False)\n\nclass OrgUpdated(Base):\n __tablename__ = \"orgupdated\"\n __table_args__ = (\n UniqueConstraint(\"orgid\", \"updated\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n orgid = Column(Integer, ForeignKey(\"org.id\"), nullable=False)\n updated = Column(DateTime, nullable=False)\n\nclass TaxLink(Base):\n __tablename__ = \"taxlink\"\n __table_args__ = (\n UniqueConstraint(\"linkid\", \"taxid\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n linkid = Column(Integer, ForeignKey(\"taxlinknote.id\"), nullable=False)\n taxid = Column(Integer, ForeignKey(\"taxonomy.id\"), nullable=False)\n\nclass OrgTaxLink(Base):\n __tablename__ = \"orgtaxlink\"\n __table_args__ = (\n UniqueConstraint(\"orgid\", \"linkid\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n orgid = Column(Integer, ForeignKey(\"org.id\"), nullable=False)\n linkid = Column(Integer, ForeignKey(\"taxlinknote.id\"), nullable=False)\n added = Column(DateTime, default=func.now())\n\nclass TaxLinkNote(Base):\n __tablename__ = \"taxlinknote\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n note = Column(Text, nullable=False)\n\n # Relationships\n taxonomy = relationship( # many-to-many\n \"Taxonomy\",\n secondary = with_schema(\"taxlink\")\n )\n\nclass Cioc(Base):\n __tablename__ = \"cioc\"\n __table_args__ = (\n UniqueConstraint(\"xid\", \"ptype\", \"pid\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n pid = Column(Integer, ForeignKey(\"pub.id\"), nullable=False)\n ptype = Column(Integer, nullable=False)\n xid = Column(Integer, ForeignKey(\"ciocexport.id\"), nullable=False)\n\nclass CiocExport(Base):\n __tablename__ = \"ciocexport\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n updated = Column(DateTime)\n notes = Column(Text, nullable=False)\n\nclass TaxRelTemp(Base):\n __tablename__ = \"taxreltemp\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n taxcode = Column(String(19), nullable=False)\n relcode = Column(String(19), nullable=False)\n reltype = Column(String(2), nullable=False)\n\nclass TempTaxNames(Base):\n __tablename__ = \"temptaxnames\"\n # SQLAlchemy needs a primary key\n __table_args__ = (\n PrimaryKeyConstraint(\"code\", \"name\"),\n )\n\n # Columns\n code = Column(String(19), nullable=False, index=True)\n name = Column(String(100), nullable=False)\n ispreferred = Column(Boolean, nullable=False)\n release = Column(Text)\n\nclass TempTaxAlso(Base):\n __tablename__ = \"temptaxalso\"\n # SQLAlchemy needs a primary key\n __table_args__ = (\n PrimaryKeyConstraint(\"code\", \"see\"),\n )\n\n # Columns\n code = Column(String(19), nullable=False, index=True)\n see = Column(String(19), nullable=False, index=True)\n release = Column(Text)\n\nclass TempTaxOld(Base):\n __tablename__ = \"temptaxold\"\n # SQLAlchemy needs a primary key\n __table_args__ = (\n PrimaryKeyConstraint(\"code\", \"old\"),\n )\n\n # Columns\n code = Column(String(19), nullable=False, index=True)\n old = Column(String(19), nullable=False, index=True)\n release = Column(Text)\n\nclass TempTaxDetails(Base):\n __tablename__ = \"temptaxdetails\"\n\n # Columns\n code = Column(String(19), primary_key=True) # SQLAlchemy needs a primary key\n definition = Column(Text, nullable=False)\n created = Column(Date, nullable=False)\n modified = Column(Date, nullable=False)\n release = Column(Text)\n\nclass PubTax(Base):\n __tablename__ = \"pubtax\"\n __table_args__ = (\n UniqueConstraint(\"pubid\", \"taxid\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n pubid = Column(Integer, ForeignKey(\"pub.id\"), nullable=False)\n taxid = Column(Integer, ForeignKey(\"taxlinknote.id\"), nullable=False)\n added = Column(DateTime, nullable=False, default=func.now())\n\nclass ICAgency(Base):\n __tablename__ = \"ic_agencies\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n orgid = Column(Integer, ForeignKey(\"org.id\"), nullable=False, unique=True)\n cnd = Column(String(8))\n name_1 = Column(String(100))\n name_level_1 = Column(Integer)\n name_2 = Column(String(100))\n name_level_2 = Column(Integer)\n\n # Relationships\n org = relationship( # one-to-one\n \"Org\",\n back_populates = \"ic_agency\"\n )\n sites = relationship( # one-to-many\n \"ICSite\",\n back_populates = \"agency\"\n )\n\nclass ICSite(Base):\n __tablename__ = \"ic_agency_sites\"\n __table_args__ = (\n UniqueConstraint(\"agencyid\", \"siteid\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n agencyid = Column(Integer, ForeignKey(\"ic_agencies.id\"), nullable=False)\n siteid = Column(Integer, ForeignKey(\"org.id\"), nullable=False)\n cnd = Column(String(8))\n site_name = Column(String(200), nullable=False) # added nullable=False\n site_name_level = Column(Integer)\n site_name_other = Column(String(3))\n\n # Relationships\n agency = relationship( # many-to-one\n \"ICAgency\",\n back_populates = \"sites\"\n )\n services = relationship( # one-to-many\n \"ICService\",\n back_populates = \"site\"\n )\n org = relationship( # one-to-one\n \"Org\",\n back_populates = \"ic_site\"\n )\n\nclass ICService(Base):\n __tablename__ = \"ic_site_services\"\n __table_args__ = (\n UniqueConstraint(\"siteid\", \"serviceid\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n siteid = Column(Integer, ForeignKey(\"ic_agency_sites.id\"), nullable=False)\n serviceid = Column(Integer, ForeignKey(\"org.id\"), nullable=False)\n service_name_1 = Column(String(200))\n service_name_2 = Column(String(200))\n\n # Relationships\n site = relationship( # many-to-one\n \"ICSite\",\n back_populates = \"services\"\n )\n org = relationship( # one-to-one\n \"Org\",\n back_populates = \"ic_service\"\n )\n\nclass PubTree(Base):\n __tablename__ = \"pub_tree\"\n __table_args__ = (\n PrimaryKeyConstraint(\"id\", \"parent\"),\n )\n\n # Columns\n id = Column(Integer, nullable=False, index=True)\n parent = Column(Integer, nullable=False, index=True) # why not a foreign key?\n pub = Column(Integer, ForeignKey(\"pub.id\"), nullable=False, index=True) # rename to pub_id\n note = Column(Text)\n depth = Column(Integer, nullable=False)\n\n # Relationships\n publication = relationship(\"Pub\") # many-to-one, rename to pub\n\nclass Site(Base):\n __tablename__ = \"site\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n org_address_id = Column(Integer, ForeignKey(\"org_address_rel.id\"), nullable=False, unique=True)\n context_id = Column(Integer, nullable=False, default=1)\n code = Column(String(20))\n\n # Relationships\n address = relationship(\n \"Address\",\n secondary = with_schema(\"org_address_rel\"),\n uselist = False # one-to-one: org_address_id is unique\n )\n\nclass OrgTree(Base):\n __tablename__ = \"org_tree\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n org_id = Column(Integer, ForeignKey(\"org.id\"), nullable=False)\n super_id = Column(Integer, ForeignKey(\"org_tree.id\"), nullable=False)\n\nclass OrgSite(Base):\n __tablename__ = \"org_site\"\n __table_args__ = (\n UniqueConstraint(\"org_id\", \"site_id\", \"label\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n org_id = Column(Integer, ForeignKey(\"org.id\"), nullable=False)\n site_id = Column(Integer, ForeignKey(\"site.id\"), nullable=False)\n name = Column(String(100))\n note = Column(Text)\n label = Column(String(100))\n type = Column(Integer, nullable=False, default=3)\n\n # Relationships\n site = relationship(\"Site\") # many-to-one\n org = relationship(\"Org\") # many-to-one\n org_name = relationship(\n \"OrgNames\", # org_names\n secondary = with_schema(\"org_site_name\"),\n uselist = False # one-to-one\n )\n\nclass OrgSiteName(Base):\n __tablename__ = \"org_site_name\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n org_site_id = Column(Integer, ForeignKey(\"org_site.id\"), nullable=False)\n org_names_id = Column(Integer, ForeignKey(\"org_names.id\"), nullable=False)\n\nclass OrgThesPub(Base):\n __tablename__ = \"org_thes_pub\"\n __table_args__ = (\n UniqueConstraint(\"org_thes_id\", \"pub_id\"),\n )\n\n # Columns\n id = Column(Integer, primary_key=True)\n org_thes_id = Column(Integer, ForeignKey(\"org_thes.id\"), nullable=False)\n pub_id = Column(Integer, ForeignKey(\"pub.id\"), nullable=False)\n is_active = Column(Boolean, nullable=False, default=True)\n\nclass TempTaxActive(Base):\n __tablename__ = \"temptaxactive\"\n\n # Columns\n code = Column(String(25), primary_key=True) # SQLAlchemy needs a primary key\n\nclass TempCCAC(Base):\n __tablename__ = \"tempccac\"\n\n # Columns\n ext = Column(String(10), primary_key=True) # SQLAlchemy needs a primary key\n # Foreign key added for SQLAlchemy\n id = Column(String(10), ForeignKey(\"org.cic_id\"), nullable=False)\n name = Column(String(200), nullable=False)\n \n # Relationships\n org = relationship(\"Org\")\n\nclass ContactComm(Base):\n __tablename__ = \"contact_comm\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n contact_id = Column(Integer, ForeignKey(\"tblcontact.id\"), nullable=False)\n comm_id = Column(Integer, ForeignKey(\"tblcomm.id\"), nullable=False)\n type = Column(Integer)\n note = Column(String(50))\n added = Column(DateTime, nullable=False, default=func.now())\n\nclass External(Base):\n __tablename__ = \"external\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n name = Column(String(50), nullable=False)\n field = Column(String(50), nullable=False)\n cic = Column(String(50), nullable=False)\n note = Column(Text, nullable=False)\n\nclass ExternalData(Base):\n __tablename__ = \"external_data\"\n\n # Columns\n id = Column(Integer, primary_key=True)\n external_type = Column(Integer, ForeignKey(\"external.id\"), nullable=False)\n cic_id = Column(Integer, nullable=False)\n data = Column(Text, nullable=False)\n external_id = Column(String(50), nullable=False)\n" }, { "alpha_fraction": 0.7330290079116821, "alphanum_fraction": 0.7331762909889221, "avg_line_length": 18.914955139160156, "blob_id": "e16883dd18a746c6b6b858a2d1a7f58146a353ca", "content_id": "5be9c2abb75af41038419e13ca7dbc79346b0b3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 6791, "license_type": "no_license", "max_line_length": 53, "num_lines": 341, "path": "/sql/init_scripts/04-alter-table.sql", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "SET search_path TO tempdb;\n-- skipping WebTemplate\n-- skipping WebData\n\nalter table thes\n add foreign key (cat_id)\n references thes_cat (id);\n\nalter table thes_tree\n add foreign key (parent_id)\n references thes (id);\n\nalter table thes_related\n add foreign key (related_id)\n references thes (id);\n\nalter table thes_related\n add foreign key (thes_id)\n references thes (id);\n\n-- skipping blue_entry\n\nalter table thes_reject\n add foreign key (thes_id)\n references thes (id);\n\nalter table thes_reject\n add foreign key (accept_id)\n references thes (id);\n\n-- skipping xref\n\nalter table tblAddress\n add foreign key (AddressTypeID)\n references tlkpAddressType (ID);\n\nalter table trelAddressAccessibility\n add foreign key (AccessibilityID)\n references tlkpAccessibility (ID);\n\nalter table trelAddressAccessibility\n add foreign key (AddressID)\n references tblAddress (ID);\n\nalter table tblComm\n add foreign key (CommTypeID)\n references tlkpCommType (ID);\n\nalter table trelServiceLanguage\n add foreign key (ServiceID)\n references tblService (ID);\n\nalter table trelServiceLanguage\n add foreign key (LanguageID)\n references tlkpLanguage (ID);\n\nalter table trelServiceArea\n add foreign key (AreaID)\n references tlkpArea (ID);\n\nalter table trelServiceArea\n add foreign key (ServiceID)\n references tblService (ID);\n\nalter table tblOrgName\n add foreign key (OrgNameTypeID)\n references tlkpOrgNameType (ID);\n\nalter table tblOrgName\n add foreign key (ParentID)\n references tblOrgName (ID);\n\nalter table org_names\n add foreign key (org_name_id)\n references tblOrgName (ID);\n\nalter table org_names\n add foreign key (org_id)\n references org (id);\n\nalter table org\n add foreign key (org_name_id)\n references tblOrgName (ID);\n\n-- skipping settle_thes\n-- skipping settle_org\n\nalter table org_comm_rel\n add foreign key (org_id)\n references org (id);\n\nalter table org_comm_rel\n add foreign key (comm_id)\n references tblComm (ID);\n\nalter table org_address_rel\n add foreign key (org_id)\n references org (id);\n\nalter table org_address_rel\n add foreign key (address_id)\n references tblAddress (ID);\n\nalter table org_contact_rel\n add foreign key (org_id)\n references org (id);\n\nalter table org_contact_rel\n add foreign key (contact_id)\n references tblContact (ID);\n\nalter table org_service_rel\n add foreign key (org_id)\n references org (id);\n\nalter table org_service_rel\n add foreign key (service_id)\n references tblService (ID);\n\nalter table pub_org\n add foreign key (pub_id)\n references pub (id);\n\nalter table pub_org\n add foreign key (org_id)\n references org (id);\n\nalter table pub_org\n add foreign key (org_contact_id)\n references org_contact_rel (id) on delete set null;\n\n-- skipping res\n-- skipping res_order\n-- skipping org_res_rel\n-- skipping temp_name_2\n\nalter table thes_rel\n add foreign key (rel_id)\n references thes_original (id);\n\nalter table thes_rel\n add foreign key (ca)\n references thes_cat (id);\n\nalter table thes_rel\n add foreign key (thes_id)\n references thes_original (id);\n\n-- skipping temp_insert_rel\n\nalter table org_thes\n add foreign key (org_id)\n references org (id);\n\nalter table org_thes\n add foreign key (thes_id)\n references thes_original (id);\n\nalter table org_thes\n add foreign key (official_id)\n references thes_original (id);\n\nalter table pub_entry\n add foreign key (pub_org_id)\n references pub_org (id);\n\nalter table area\n add foreign key (locatedIn)\n references area (id);\n\n-- skipping org_parent_child\n-- skipping parent_child_hours\n\nalter table taxonomy\n add foreign key (parentId)\n references taxonomy (id);\n\nalter table taxRel\n add foreign key (taxID)\n references taxonomy (id);\n\nalter table taxRel\n add foreign key (relID)\n references taxonomy (id);\n\n-- skipping cura\n-- skipping curaTargets\n-- skipping curaCategory\n-- skipping curaCategories\n-- skipping tblBroadcast\n\nalter table locations\n add foreign key (locatedIn)\n references locations (id);\n\n-- skipping log_enq\n-- skipping staff\n\nalter table pubGroup\n add foreign key (groupId)\n references pubGroupName (id);\n\nalter table pubGroup\n add foreign key (pubId)\n references pub (id);\n\nalter table orgNotes\n add foreign key (orgId)\n references org (id);\n\nalter table orgNotes\n add foreign key (noteType)\n references orgNoteTypes (id);\n\nalter table pubThes\n add foreign key (pubId)\n references pub (id);\n\nalter table pubThes\n add foreign key (thesId)\n references thes_original (id);\n\nalter table taxGroups\n add foreign key (taxID)\n references taxonomy (id);\n\nalter table orgUpdated\n add foreign key (orgid)\n references org (id);\n\n-- skipping postalCodes\n\nalter table taxLink\n add foreign key (taxId)\n references taxonomy (id);\n\nalter table taxLink\n add foreign key (linkId)\n references taxLinkNote (id)\n on delete cascade;\n\nalter table orgTaxLink\n add foreign key (orgId)\n references org (id);\n\nalter table orgTaxLink\n add foreign key (linkId)\n references taxLinkNote (id)\n on delete cascade;\n\nalter table cioc\n add foreign key (pid)\n references pub (id);\n\nalter table cioc\n add foreign key (xid)\n references ciocExport (id);\n\nalter table pubTax\n add foreign key (taxId)\n references taxLinkNote (id)\n on delete cascade;\n\nalter table pubTax\n add foreign key (pubId)\n references pub (id);\n\nalter table ic_agencies\n add foreign key (orgid)\n references org (id);\n\nalter table ic_agency_sites\n add foreign key (agencyid)\n references ic_agencies (id);\n\nalter table ic_agency_sites\n add foreign key (siteid)\n references org (id);\n\nalter table ic_site_services\n add foreign key (siteid)\n references ic_agency_sites (id);\n\nalter table ic_site_services\n add foreign key (serviceid)\n references org (id);\n\nalter table pub_tree\n add foreign key (pub)\n references pub (id);\n\nalter table site\n add foreign key (org_address_id)\n references org_address_rel (id)\n on delete cascade;\n\nalter table org_tree\n add foreign key (org_id)\n references org (id);\n\nalter table org_tree\n add foreign key (super_id)\n references org_tree (id);\n\nalter table org_site\n add foreign key (org_id)\n references org (id);\n\nalter table org_site\n add foreign key (site_id)\n references site (id);\n\nalter table org_site_name\n add foreign key (org_site_id)\n references org_site (id);\n\nalter table org_site_name\n add foreign key (org_names_id)\n references org_names (id);\n\nalter table org_thes_pub\n add foreign key (org_thes_id)\n references org_thes (id)\n on delete cascade;\n\nalter table org_thes_pub\n add foreign key (pub_id)\n references pub (id)\n on delete cascade;\n\nalter table contact_comm\n add foreign key (comm_id)\n references tblComm (ID);\n\nalter table contact_comm\n add foreign key (contact_id)\n references tblContact (ID);\n\nalter table external_data\n add foreign key (external_type)\n references external (id);\n" }, { "alpha_fraction": 0.7010983228683472, "alphanum_fraction": 0.7251046299934387, "avg_line_length": 21.843488693237305, "blob_id": "89d10f41d4d1bc316d466a0f58e955a0e10793ea", "content_id": "ff00f55d0510b5bdc7ecd4438428b674fbbff968", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 19120, "license_type": "no_license", "max_line_length": 105, "num_lines": 837, "path": "/sql/init_scripts/02-create-table.sql", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "SET search_path TO tempdb;\n\ncreate table Data (\n RecordNumber varchar(5) not null unique,\n InternalMemo multiocc null,\n Comments text null,\n RecNum varchar(7) not null check(left(RecNum, 3) = 'WRN') unique,\n Org1 varchar(100) null,\n Org2 varchar(70) null,\n Org3 varchar(70) null,\n Org4 varchar(70) null,\n Org5 varchar(70) null,\n AltOrg multiocc null,\n FormerOrg multiocc null,\n XRef multiocc null,\n StreetBuilding varchar(90) null,\n StreetAddress varchar(90) null,\n StreetCity varchar(40) null,\n MailCareOf varchar(60) null,\n Building varchar(90) null,\n Address varchar(90) null,\n City varchar(40) null,\n Province varchar(25) null,\n PostalCode varchar(7) null,\n Accessibility multiocc null,\n Location varchar(60) null,\n Intersection varchar(60) null,\n OfficePhone multiocc null,\n Fax multiocc null,\n EMail multiocc null,\n WWW varchar(255) null,\n AfterHoursPhone multiocc null,\n CrisisPhone multiocc null,\n TDDPhone multiocc null,\n Data varchar(30) null,\n Description multiocc null,\n PubDescription text null,\n GeneralInfo text null,\n BND multiocc null,\n OtherResource text null,\n Fees text null,\n Hours text null,\n Dates text null,\n AreaServed multiocc null,\n Eligibility text null,\n Application multiocc null,\n Languages multiocc null,\n Contact1 varchar(60) null,\n Contact1Title varchar(120) null,\n Contact1Org varchar(90) null,\n Contact1Phone multiocc null,\n Contact2 varchar(60) null,\n Contact2Title varchar(120) null,\n PrintedMaterial multiocc null,\n Contact2Org varchar(90) null,\n Contact2Phone multiocc null,\n Contact3 varchar(60) null,\n Contact3Title varchar(120) null,\n Contact3Org varchar(90) null,\n Contact3Phone multiocc null,\n Contact4 varchar(60) null,\n Contact4Title varchar(120) null,\n Contact4Org varchar(90) null,\n Contact4Phone multiocc null,\n DateEstablished varchar(60) null,\n Elections varchar(120) null,\n Funding multiocc null,\n DDCode varchar(10) null,\n LevelOfService varchar(60) null,\n Subject multiocc null,\n UsedFor multiocc null,\n Blue multiocc null,\n SeeAlso multiocc null,\n LocalSubjects multiocc null,\n TypeOfRecord varchar(2) null,\n QualityLevel varchar(20) null,\n ToBeDeleted varchar(20) null,\n Distribution multiocc null,\n Pub multiocc null,\n SourceOfInfo varchar(60) null,\n SourceTitle varchar(60) null,\n SourceOrg varchar(60) null,\n SourceBuilding varchar(30) null,\n SourceAddress varchar(60) null,\n SourceCity varchar(30) null,\n SourceProvince varchar(2) null,\n SourcePostalCode varchar(7) null,\n SourcePhone multiocc null,\n CollectedBy varchar(40) null,\n DateCollected varchar(10) null,\n CreatedBy varchar(40) null,\n UpdatedBy varchar(40) null,\n UpdateDate varchar(10) null,\n UpdateSchedule varchar(10) null,\n HistoryOfUpdate varchar(10) null,\n LastModified multiocc null,\n org1_sort varchar(100) null,\n id serial primary key,\n org_name_id integer not null\n);\n\n-- skipping WebConnection\n-- skipping WebDocumentType\n-- skipping WebSynchronize\n-- skipping WebTemplate\n-- skipping WebData\n-- skipping WebVersion\n\ncreate table thes (\n id serial primary key,\n term varchar(60) not null,\n note text not null,\n action varchar(6) null,\n cat_id integer null,\n sort varchar(6) null\n);\n\ncreate table thes_cat (\n id serial primary key,\n category varchar(30) not null\n);\n\ncreate table thes_tree (\n id serial primary key,\n term text not null,\n parent_id integer null,\n cat_id integer not null\n);\n\n-- skipping thes_data\n-- skipping StaffBook\n-- skipping staff_calendar\n-- skipping staff_group\n-- skipping staff_hours\n\ncreate table city (\n id serial primary key,\n city varchar(20) not null\n);\n\ncreate table pub (\n id serial primary key,\n code varchar(20) not null unique,\n title varchar(50) not null,\n isdefault boolean not null default false,\n lastUpdated timestamp null,\n note text null\n);\n\ncreate table thes_related (\n thes_id integer not null,\n related_id integer not null,\n primary key (thes_id, related_id)\n);\n\n-- skipping blue_entry\n\ncreate table thes_reject (\n thes_id integer not null,\n accept_id integer not null\n);\n\n-- skipping thes_blue_entry\n-- skipping thes_blue\n-- skipping old_blue_entry\n-- skipping thes_blue_related\n-- skipping xref\n-- skipping defunct\n\ncreate table tlkpAddressType (\n ID serial primary key,\n Name varchar(50) not null\n);\n\ncreate table tblAddress (\n ID serial primary key,\n AddressTypeID integer not null,\n InCareOf varchar(60) null,\n Building varchar(50) null,\n Address varchar(50) null,\n City varchar(50) not null,\n Province varchar(2) null default 'ON',\n PostalCode varchar(7) null check(postalcode ~* '[a-z][0-9][a-z] [0-9][a-z][0-9]'),\n Intersection varchar(255) null,\n unit varchar(10) null,\n unitValue varchar(10) null,\n streetNumber varchar(10) null,\n streetSuffix varchar(10) null,\n streetDirection varchar(2) null,\n unitExtra varchar(25) null,\n deliveryNumber varchar(10) null,\n deliveryStation varchar(30) null,\n deliveryMode varchar(20) null,\n busRoute varchar(50) null,\n utm_x integer null,\n utm_y integer null,\n ismappable boolean null,\n latitude decimal(11,9) null,\n longitude decimal(11,9) null,\n check(\n (utm_x is null and utm_y is null)\n or\n (utm_x is not null and utm_y is not null)\n or\n (latitude is null and longitude is null)\n or\n (latitude is not null and longitude is not null)\n )\n);\n\ncreate table tlkpAccessibility (\n ID serial primary key,\n Name varchar(100) not null\n);\n\ncreate table trelAddressAccessibility (\n AddressID serial primary key,\n AccessibilityID integer not null\n);\n\ncreate table tlkpCommType (\n ID serial primary key,\n Name varchar(50) not null unique\n);\n\ncreate table tblComm (\n ID serial primary key,\n CommTypeID integer not null,\n Value varchar(255) not null,\n Comment text null,\n check(\n (commtypeid in (1, 2, 3, 5, 6) and value ~* '[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]')\n or\n (commtypeid = 2 and value = '911')\n or\n (commtypeid = 4 and value like '_%@%.%')\n or\n (commtypeid = 7 and value like '%.__%')\n or\n commtypeid > 7\n )\n);\n\ncreate table tblContact (\n ID serial primary key,\n Name varchar(60) null,\n Title varchar(120) null,\n Org varchar(90) null,\n Comm text null,\n ContactType integer null default 0\n);\n\ncreate table tblService (\n ID serial primary key,\n Description text null,\n Eligibility text null,\n Info text null,\n Fees text null,\n Hours text null,\n Dates text null,\n Application text null,\n updated timestamp null,\n ciocDescription text null,\n ciocEligibility text null,\n ciocApplication text null\n);\n\ncreate table tlkpLanguage (\n ID serial primary key,\n Name text not null\n);\n\ncreate table trelServiceLanguage (\n ServiceID integer not null,\n LanguageID integer not null,\n primary key (ServiceID, LanguageID)\n);\n\ncreate table tlkpArea (\n ID serial primary key,\n Name text not null\n);\n\ncreate table trelServiceArea (\n ServiceID integer not null,\n AreaID integer not null,\n primary key (ServiceID, AreaID)\n);\n\n-- skipping Level integer null COMPUTE ((length(sort_key)-1)/5): no COMPUTE\n-- skipping Name varchar(100) not null check(length(name) > 0): bad data\ncreate table tblOrgName (\n ID serial primary key,\n OrgNameTypeID integer not null,\n Name varchar(100) not null,\n ParentID integer null,\n Level integer null,\n Sort varchar(100) null,\n sort_key varchar(100) null,\n added timestamp null default current_timestamp\n);\n\ncreate table tlkpOrgNameType (\n ID serial primary key,\n Type varchar(20) not null\n);\n\n-- skipping meta_word\n-- skipping meta_column\n-- skipping org_notes\n-- skipping meta_index\n-- skipping meta_group\n-- skipping meta_column_group\n\ncreate table org_names (\n id serial primary key,\n org_id integer not null,\n org_name_id integer not null,\n added timestamp null default current_timestamp,\n unique(org_id, org_name_id)\n);\n\n-- skipping meta_index_thes\ncreate table org (\n id serial primary key,\n org_name_id integer not null,\n update_note text null,\n cic_id varchar(7) not null unique,\n updated timestamp null default current_timestamp,\n service_level varchar(60) not null,\n created timestamp not null default current_timestamp,\n isactive boolean not null default true,\n iscomplete boolean not null default false,\n modified timestamp null,\n established varchar(4) null check(established ~* '[1-2][0-9][0-9][0-9]'),\n bn varchar(15) null check(bn ~* '[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]RR[0-9][0-9][0-9][0-9]'),\n deleted timestamp null\n);\n\n-- skipping org_mod\n-- skipping org_meta\n-- skipping settle_thes\n-- skipping settle_org\n\ncreate table org_comm_rel (\n id serial primary key,\n org_id integer not null,\n comm_id integer not null,\n added timestamp not null default current_timestamp,\n note text null\n);\n\ncreate table org_address_rel (\n id serial primary key,\n org_id integer not null,\n address_id integer not null,\n added timestamp not null default current_timestamp,\n note varchar(100) null,\n label varchar(50) null\n);\n\ncreate table org_contact_rel (\n id serial primary key,\n org_id integer not null,\n contact_id integer not null,\n added timestamp not null default current_timestamp,\n note text null\n);\n\ncreate table org_rel_del (\n id serial primary key,\n org_id integer not null,\n rel_id integer not null,\n added timestamp not null,\n note text null,\n deleted timestamp not null,\n table_id integer not null\n);\n\ncreate table org_service_rel (\n id serial primary key,\n org_id integer not null,\n service_id integer not null,\n added timestamp not null default current_timestamp,\n note text null\n);\n\ncreate table org_del (\n id serial primary key,\n org_name_id integer not null,\n update_note text null,\n cic_id varchar(7) not null unique,\n updated timestamp null,\n service_level varchar(60) null\n);\n\ncreate table pub_org (\n id serial primary key,\n pub_id integer not null,\n org_id integer not null,\n added timestamp not null default current_timestamp,\n org_contact_id integer null,\n deleted timestamp null,\n isActive boolean not null default true,\n xml text null,\n unique(pub_id, org_id)\n);\n\ncreate table thes_original (\n id serial primary key,\n de varchar(100) not null unique,\n use varchar(100) null,\n woo varchar(1) null,\n eq varchar(100) null,\n uf text null,\n sn text null,\n bt varchar(100) null,\n nt text null,\n rt varchar(150) null,\n ca varchar(50) null,\n input varchar(50) null,\n act varchar(10) not null,\n msg varchar(50) null,\n cr varchar(50) null,\n up varchar(50) null,\n sort varchar(100) null,\n comments text null\n);\n\n-- skipping res_type\n-- skipping res_loc\n-- skipping res\n-- skipping res_order\n-- skipping org_res_rel\n-- skipping pub_list\n-- skipping temp_name_1\n-- skipping temp_name_2\n-- skipping temp_name_3\n\ncreate table thes_rel (\n id serial primary key,\n thes_id integer not null,\n rel_id integer not null,\n rel_type varchar(2) not null,\n ca integer null,\n sort_key varchar(100) null,\n comments text null\n);\n\n-- skipping temp_insert\n-- skipping temp_insert_rel\n\ncreate table org_thes (\n id serial primary key,\n org_id integer not null,\n thes_id integer not null,\n official_id integer not null,\n unique(org_id, thes_id, official_id)\n);\n\n-- skipping temp_thes\n\ncreate table pub_entry (\n id serial primary key,\n pub_org_id integer not null,\n entry integer not null,\n pub_year integer not null check(pub_year between 2000 and 2050),\n unique(pub_org_id, pub_year)\n);\n\ncreate table area (\n id serial primary key,\n name varchar(255) not null,\n locatedIn integer null,\n alt varchar(255) null,\n unique(name, locatedin)\n);\n\n-- skipping org_parent_child\n-- skipping parent_child_hours\n-- skipping taxonomy_original\n\ncreate table taxonomy (\n id serial primary key,\n name varchar(100) not null,\n code varchar(19) null unique,\n isPreferred boolean not null,\n definition text null,\n created date null,\n modified date null,\n parentId integer null,\n cicModified timestamp null\n);\n\n-- skipping taxonomy_original\n-- skipping taxtree\n\ncreate table taxRel (\n id serial primary key,\n taxID integer not null,\n relID integer not null,\n relType varchar(2) not null,\n unique(taxid, relid)\n);\n\n-- skipping org_tax\n-- skipping tempSuffix\n-- skipping addressSuffix\n-- skipping addressUnit\n-- skipping cura\n-- skipping curaTarget\n-- skipping curaTargets\n-- skipping curaCategory\n-- skipping curaCategories\n-- skipping curaCatchment\n-- skipping curaAvailability\n-- skipping tblProject\n-- skipping trelBroadcastProject\n-- skipping tlkpBroadcastType\n-- skipping tblBroadcast\n-- skipping trelBroadcastRec\n-- skipping tlkpBroadcastStatus\n\ncreate table locations (\n id serial primary key,\n officialName varchar(100) not null,\n locatedIn integer null,\n sortAs varchar(100) null,\n displayAs varchar(100) null\n);\n\n-- skipping log_level\n-- skipping log_source\n-- skipping log_contact\n-- skipping log_age\n-- skipping log_enq\n-- skipping log_area_call\n-- skipping log_area_need\n-- skipping log_enq_mode\n-- skipping staff\n-- skipping DocInfo\n-- skipping NAMESDATA\n-- skipping PAGESDATA\n-- skipping SortHeadings\n-- skipping SUBJECTDATA\n-- skipping subjects\n-- skipping TESTXML\n-- skipping XMLContent\n-- skipping log_result\n-- skipping log_need\n-- skipping log_resource\n-- skipping tempCIT\n\ncreate table pubGroupName (\n id serial primary key,\n groupName varchar(50) not null\n);\n\ncreate table pubGroup (\n id serial primary key,\n pubId integer not null,\n groupId integer not null,\n unique(pubId, groupId)\n);\n\n-- skipping tempGeo\n\ncreate table orgNotes (\n id serial primary key,\n orgId integer not null,\n noteType integer not null,\n note text not null,\n added timestamp not null default current_timestamp,\n modified timestamp null,\n isactive boolean not null default true,\n ispublic boolean not null default true,\n alertDate date null\n);\n\ncreate table orgNoteTypes (\n id serial primary key,\n value varchar(30) not null\n);\n\ncreate table pubThes (\n id serial primary key,\n pubId integer not null,\n thesId integer not null,\n isactive boolean not null default true,\n unique(pubid, thesid)\n);\n\n-- skipping tempUTM\n-- skipping orgmod\n-- skipping orgmodcolumns\n-- skipping og\n\ncreate table taxGroups (\n id serial primary key,\n taxGroup integer not null,\n taxID integer not null,\n isActive boolean not null,\n hasChildren boolean not null,\n added timestamp null default current_timestamp,\n isLocal boolean not null default false,\n modified timestamp null,\n unique(taxgroup, taxid)\n);\n\ncreate table temptaxgroup (\n groupid integer not null,\n taxcode varchar(13) not null\n);\n\ncreate table taxChanges (\n changeType integer not null,\n oldCode varchar(13) not null,\n newCode varchar(13) not null,\n oldName varchar(60) not null,\n newName varchar(60) not null,\n dateUS varchar(10) not null\n);\n\n-- skipping tempContactComm\n-- skipping taxgroup\n\ncreate table orgUpdated (\n id serial primary key,\n orgid integer not null,\n updated timestamp not null,\n unique(orgid, updated)\n);\n\n-- skipping temp211\n-- skipping postalCodes\n-- skipping sqlXml\n\ncreate table taxLink (\n id serial primary key,\n linkId integer not null,\n taxId integer not null,\n unique(linkId, taxId)\n);\n\ncreate table orgTaxLink (\n id serial primary key,\n orgId integer not null,\n linkId integer not null,\n added timestamp null default current_timestamp,\n unique(orgId, linkId)\n);\n\ncreate table taxLinkNote (\n id serial primary key,\n note text not null\n);\n\n-- skipping taxStartTemp\n-- skipping taxTemp\n\ncreate table cioc (\n id serial primary key,\n pid integer not null,\n ptype integer not null,\n xid integer not null,\n unique(xid, ptype, pid)\n);\n\ncreate table ciocExport (\n id serial primary key,\n updated timestamp null,\n notes text not null\n);\n\n-- skipping tempNO\n\ncreate table taxRelTemp (\n id serial primary key,\n taxCode varchar(19) not null,\n relCode varchar(19) not null,\n relType varchar(2) not null\n);\n\n-- skipping taxTempOldCode\n-- skipping taxonomy_copy\n-- skipping funding\n\ncreate table tempTaxNames (\n code varchar(19) not null,\n name varchar(100) not null,\n isPreferred boolean not null,\n release text null\n);\n\ncreate table tempTaxAlso (\n code varchar(19) not null,\n see varchar(19) not null,\n release text null\n);\n\ncreate table tempTaxOld (\n code varchar(19) not null,\n old varchar(19) not null,\n release text null\n);\n\ncreate table tempTaxDetails (\n code varchar(19) not null,\n definition text not null,\n created date not null,\n modified date not null,\n release text null\n);\n\n-- skipping isql\n-- skipping org_location\n-- skipping org_locations\n\ncreate table pubTax (\n id serial primary key,\n pubId integer not null,\n taxId integer not null,\n added timestamp not null default current_timestamp,\n unique(pubid, taxid)\n);\n\ncreate table ic_agencies (\n id serial primary key,\n orgid integer not null unique,\n CND varchar(8) null,\n name_1 varchar(100) null,\n name_level_1 integer null,\n name_2 varchar(100) null,\n name_level_2 integer null\n);\n\ncreate table ic_agency_sites (\n id serial primary key,\n agencyid integer not null,\n siteid integer not null,\n CND varchar(8) null,\n site_name varchar(200) null, -- changed from null\n site_name_level integer null,\n site_name_other varchar(3) null,\n unique(agencyid, siteid)\n);\n\ncreate table ic_site_services (\n id serial primary key,\n siteid integer not null,\n serviceid integer not null,\n service_name_1 varchar(200) null,\n service_name_2 varchar(200) null,\n unique(siteid, serviceid)\n);\n\ncreate table pub_tree (\n id integer not null,\n parent integer not null, -- why not a foreign key?\n pub integer not null,\n note text null,\n depth integer not null,\n primary key (id, parent)\n);\n\ncreate table site (\n id serial primary key,\n org_address_id integer not null unique,\n context_id integer not null default 1,\n code varchar(20) null\n);\n\ncreate table org_tree (\n id serial primary key,\n org_id integer not null,\n super_id integer not null\n);\n\ncreate table org_site (\n id serial primary key,\n org_id integer not null,\n site_id integer not null,\n name varchar(100) null,\n note text null,\n label varchar(100) null,\n type integer not null default 3,\n unique(org_id, site_id, label)\n);\n\ncreate table org_site_name (\n id serial primary key,\n org_site_id integer not null,\n org_names_id integer not null\n);\n\ncreate table org_thes_pub (\n id serial primary key,\n org_thes_id integer not null,\n pub_id integer not null,\n is_active boolean not null default true,\n unique(org_thes_id, pub_id)\n);\n\ncreate table tempTaxActive (\n code varchar(25) not null unique\n);\n\ncreate table tempCCAC (\n ext varchar(10) not null,\n id varchar(10) not null,\n name varchar(200) not null\n);\n\ncreate table contact_comm (\n id serial primary key,\n contact_id integer not null,\n comm_id integer not null,\n type integer null,\n note varchar(50) null,\n added timestamp not null default current_timestamp\n);\n\ncreate table external (\n id serial primary key,\n name varchar(50) not null,\n field varchar(50) not null,\n cic varchar(50) not null,\n note text not null\n);\n\ncreate table external_data (\n id serial primary key,\n external_type integer not null,\n cic_id integer not null,\n data text not null,\n external_id varchar(50) not null\n);\n" }, { "alpha_fraction": 0.5490683317184448, "alphanum_fraction": 0.7204968929290771, "avg_line_length": 17.720930099487305, "blob_id": "aaf6a91b853b28eeea0a476aa9625e128dcfcea7", "content_id": "9a2c84f6831fe8bf94d6bf19d1b43e32067e256e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 805, "license_type": "no_license", "max_line_length": 41, "num_lines": 43, "path": "/requirements.txt", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "backports-abc==0.4\nbackports.shutil-get-terminal-size==1.0.0\nbackports.ssl-match-hostname==3.5.0.1\ncertifi==2016.2.28\nclick==6.6\nconfigparser==3.5.0\ndecorator==4.0.10\nentrypoints==0.2.2\nipykernel==4.3.1\nipython==4.2.0\nipython-genutils==0.1.0\nipython-sql==0.3.6\nipywidgets==5.1.5\nJinja2==2.8\njsonschema==2.5.1\njupyter==1.0.0\njupyter-client==4.3.0\njupyter-console==4.1.1\njupyter-core==4.1.0\nMarkupSafe==0.23\nmistune==0.7.2\nnbconvert==4.2.0\nnbformat==4.0.1\nnotebook==4.2.1\npathlib2==2.1.0\npexpect==4.1.0\npickleshare==0.7.2\npkg-resources==0.0.0\nprettytable==0.7.2\npsycopg2==2.6.1\nptyprocess==0.5.1\nPygments==2.1.3\npyzmq==15.2.0\nqtconsole==4.2.1\nsimplegeneric==0.8.1\nsingledispatch==3.4.0.3\nsix==1.10.0\nSQLAlchemy==1.0.13\nsqlparse==0.1.19\nterminado==0.6\ntornado==4.3\ntraitlets==4.2.1\nwidgetsnbextension==1.2.3\n" }, { "alpha_fraction": 0.7345454692840576, "alphanum_fraction": 0.7454545497894287, "avg_line_length": 21.83333396911621, "blob_id": "6f5c13fdd1e5d628c33f3d381ace090460c68b0b", "content_id": "c03716347bacd3a4cd2637159441a7bad3e5a89b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 55, "num_lines": 12, "path": "/edit/__init__.py", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom .context import Session\n\napp = Flask(__name__)\n\nimport edit.views\n\n# http://flask.pocoo.org/docs/0.10/patterns/sqlalchemy/\[email protected]_appcontext\ndef shutdown_session(exception=None):\n print(\"shutdown_session() says bye!\")\n Session.remove()\n\n" }, { "alpha_fraction": 0.7246299982070923, "alphanum_fraction": 0.7251585721969604, "avg_line_length": 14.89915943145752, "blob_id": "87c13d71041f7620648480d5dd28c3fed998bff6", "content_id": "2909dd17bae29f19a46084b90d8ce3fa42d61cf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1892, "license_type": "no_license", "max_line_length": 62, "num_lines": 119, "path": "/sql/init_scripts/05-create-index.sql", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "SET search_path TO tempdb;\n-- skipping Data\n-- skipping WebTemplate\n-- skipping WebData\n\ncreate index thes_term_index on thes (\n term\n);\n\ncreate index pub_title_index on pub (\n title\n);\n\ncreate index tblcomm_value_index on tblComm (\n Value\n);\n\ncreate index tblcontact_contacttype_index on tblContact (\n ContactType\n);\n\ncreate index tblorgname_sort_index on tblOrgName (\n Sort\n);\n\ncreate index tblorgname_name_index on tblOrgName (\n Name\n);\n\ncreate index tblorgname_sort_key_index on tblOrgName (\n sort_key\n);\n\ncreate index org_names_org_name_id_index on org_names (\n org_name_id\n);\n\ncreate index org_names_org_id_index on org_names (\n org_id\n);\n\ncreate index org_names_org_name_id_org_id_index on org_names (\n org_name_id,\n org_id\n);\n\ncreate index org_created_index on org (\n created\n);\n\ncreate index org_isactive_index on org (\n isactive\n);\n\ncreate index org_iscomplete_index on org (\n iscomplete\n);\n\n-- skipping res_loc\n-- skipping res\n-- skipping temp_name_2\n\ncreate index thes_rel_rel_type_index on thes_rel (\n rel_type\n);\n\ncreate index pub_entry_pub_year_entry_index on pub_entry (\n pub_year,\n entry\n);\n\ncreate index taxonomy_name_index on taxonomy (\n name\n);\n\ncreate index taxonomy_modified_index on taxonomy (\n modified\n);\n\n-- skipping tempCIT\n-- skipping tempGeo\n-- skipping taxTemp\n--skipping taxTempOldCode\n\ncreate index temptaxnames_code_index on tempTaxNames (\n code\n);\n\ncreate index temptaxalso_code_index on tempTaxAlso (\n code\n);\n\ncreate index temptaxalso_see_index on tempTaxAlso (\n see\n);\n\ncreate index temptaxold_code_index on tempTaxOld (\n code\n);\n\ncreate index temptaxold_old_index on tempTaxOld (\n old\n);\n\ncreate index temptaxdetails_code_index on tempTaxDetails (\n code\n);\n\ncreate index pub_tree_parent_index on pub_tree (\n parent\n);\n\ncreate index pub_tree_pub_index on pub_tree (\n pub\n);\n\ncreate index pub_tree_id_index on pub_tree (\n id\n);\n" }, { "alpha_fraction": 0.6626983880996704, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 14.75, "blob_id": "a9199398cd8d7223e1d6c0ffbec125ab7c43b828", "content_id": "aa7125227c8bd14fdb4ce50978cda4fb8eaf9123", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 504, "license_type": "no_license", "max_line_length": 71, "num_lines": 32, "path": "/sql/query_pub_tree.sql", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "-- find org records associated with a group of pubs defined in pub_tree\nselect\n org.id,\n min(pub.title),\n pub_tree.parent,\n min(parent_pub.title)\nfrom\n org\n join\n pub_org\n on org.id = pub_org.org_id\n\n join\n pub\n on pub_org.pub_id = pub.id\n\n join\n pub_tree\n on pub.id = pub_tree.pub\n\n join\n pub_tree as parent\n on pub_tree.parent = parent.id and parent.depth = 1\n\n join\n pub as parent_pub\n on parent.pub = parent_pub.id\nwhere\n pub_tree.depth = 1\ngroup by\n org.id,\n pub_tree.parent;\n" }, { "alpha_fraction": 0.8073394298553467, "alphanum_fraction": 0.8119266033172607, "avg_line_length": 26.25, "blob_id": "b6e30cec050716b5b4468cd48b7b049631ecb7f5", "content_id": "d81d91caa2726e4cf54958d3a924ecfca3a55c98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 218, "license_type": "no_license", "max_line_length": 46, "num_lines": 8, "path": "/sql/init_scripts/01-setup_db.sql", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "drop schema if exists tempdb cascade;\ncreate schema if not exists tempdb;\nset search_path to tempdb;\n\ndrop domain if exists tempdb.multiocc cascade;\ncreate domain tempdb.multiocc as text;\n\ncreate extension plpython3u;\n" }, { "alpha_fraction": 0.7441860437393188, "alphanum_fraction": 0.7441860437393188, "avg_line_length": 16.200000762939453, "blob_id": "824915c6cb44e07babb3f1fedb3ce2a13aa8f31a", "content_id": "d35a70b1150482d154706e4f69a8f28ba7fde352", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 86, "license_type": "no_license", "max_line_length": 28, "num_lines": 5, "path": "/sql/init_scripts/14-run-functions.sql", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "set search_path to tempdb;\n\nselect set_autoincrement();\n\nselect org_name_sort_keys();\n" }, { "alpha_fraction": 0.7135862708091736, "alphanum_fraction": 0.7154222726821899, "avg_line_length": 27.13793182373047, "blob_id": "e709bdfde008cbf8bd7a66d1b67324a4bf6d534d", "content_id": "0fc9e5641c07cf80b1e218439523b0faa7557b7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1634, "license_type": "no_license", "max_line_length": 80, "num_lines": 58, "path": "/db/__init__.py", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "from contextlib import contextmanager\nfrom functools import wraps\n\nfrom sqlalchemy import create_engine\n\nfrom sqlalchemy.orm import scoped_session\nfrom sqlalchemy.orm import sessionmaker\n\nimport click\nimport json\n\nimport config\n\npg = \"postgresql://{user}:{password}@localhost:{port}/{database}\"\ndb = pg.format(**config.db)\nengine = create_engine(db, echo=False)\n\nsession_factory = sessionmaker(bind=engine)\nSession = scoped_session(session_factory)\n\n# http://docs.sqlalchemy.org/en/rel_1_0/orm/session_basics.html\n@contextmanager\ndef session_scope():\n \"\"\"Provide a transactional scope around a series of operations.\"\"\"\n click.echo(\"I am session_scope()\")\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n click.echo(\"Closing session\")\n session.close()\n\ndef transactional(query_function):\n \"\"\"\n Decorate a function to use session_scope()\n query_function has only named arguments, including \"session\"\n \"\"\"\n click.echo(\"I am transactional({})\".format(query_function.__name__))\n @wraps(query_function)\n def wrapper(**kwargs):\n click.echo(\"I am transactional.wrapper({})\".format(query_function.__name__))\n click.echo(query_function.__doc__)\n with session_scope() as session:\n return query_function(session=session, **kwargs)\n return wrapper\n\ndef jsonify(dict_function):\n \"\"\"Decorate a function to return JSON instead of a dict\"\"\"\n click.echo(\"I am jsonify()\")\n @wraps(dict_function)\n def wrapper(*args, **kwargs):\n dict_ = dict_function(*args, **kwargs)\n return json.dumps(dict_, sort_keys=False, indent=2)\n return wrapper\n\n\n" }, { "alpha_fraction": 0.7539682388305664, "alphanum_fraction": 0.7539682388305664, "avg_line_length": 26.88888931274414, "blob_id": "806fcdebc85f04c4de5069bf4749ce4e38ba8737", "content_id": "f256a373c0b712c75ab7fe907080c53fa342365f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 252, "license_type": "no_license", "max_line_length": 42, "num_lines": 9, "path": "/sql/create-index-search.sql", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "drop index if exists search_org_name;\ncreate index search_org_name\n on tblorgname\n using gin(to_tsvector('english', name));\n\ndrop index if exists search_taxonomy;\ncreate index search_taxonomy\n on taxonomy\n using gin(to_tsvector('english', name));\n\n" }, { "alpha_fraction": 0.7046831846237183, "alphanum_fraction": 0.7179063558578491, "avg_line_length": 20.228069305419922, "blob_id": "5036840126f654eb9e00f31036e6752f9c030a6c", "content_id": "33bfed65ca761474bb42f32a85ed860da9dbf54e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 3630, "license_type": "no_license", "max_line_length": 55, "num_lines": 171, "path": "/sql/old-tables.sql", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "create table thes_data (\n id serial primary key,\n thes_id integer not null,\n data_id integer not null,\n unique (thes_id, data_id)\n);\n\ncreate table blue_entry (\n entry integer not null unique,\n data_id integer primary key\n);\ncreate table thes_blue_entry (\n thes_id integer not null,\n entry integer not null,\n primary key (thes_id, entry)\n);\n\ncreate table thes_blue (\n thes_id integer primary key\n);\n\ncreate table old_blue_entry (\n entry integer primary key,\n data_id integer not null\n);\ncreate table thes_blue_related (\n thes_blue_id integer not null,\n related_id integer not null,\n type varchar(1) not null,\n primary key (thes_blue_id, related_id)\n);\n\ncreate table xref (\n data_id integer not null,\n xref varchar(100) not null,\n sort varchar(100) null,\n id serial not null unique,\n entry integer null,\n primary key (data_id, xref)\n);\n\ncreate table meta_word (\n id serial primary key,\n word varchar(30) not null unique\n);\n\ncreate table org_notes (\n id serial primary key,\n org varchar(300) not null,\n note varchar(100) not null,\n details text null,\n org_id integer null,\n note_date date not null default CURRENT_DATE\n);\n\ncreate table meta_index (\n id serial primary key,\n row_id integer not null,\n column_id integer not null,\n word_id integer not null,\n position integer not null,\n unique(row_id, column_id, position)\n);\n\ncreate table meta_group (\n id serial primary key,\n \"group\" varchar(20) not null\n);\n\ncreate table meta_column_group (\n id serial primary key,\n column_id integer not null,\n group_id integer not null,\n unique(column_id, group_id)\n);\ncreate table meta_index_thes (\n id serial primary key,\n row_id integer not null,\n column_id integer not null,\n word_id integer not null,\n position integer not null\n);\ncreate table org_mod (\n id serial primary key,\n org_id integer not null,\n mod text not null,\n mod_date timestamp not null default CURRENT_TIMESTAMP\n);\n\ncreate table org_meta (\n org_id integer not null,\n word_id integer not null,\n column_id integer not null,\n row_id integer not null,\n position integer null\n);\ncreate table taxonomy_original (\n recordType varchar(2) not null,\n termCode varchar(13) not null,\n value text not null\n);\ncreate table taxTree (\n id serial primary key,\n taxID integer not null,\n level1 varchar(1) not null,\n level2 varchar(1) null,\n level3 integer null,\n level4 integer null,\n level5 integer null,\n parentID integer null,\n unique(level1, level2, level3, level4, level5)\n);\n\ncreate table org_tax (\n id serial primary key,\n orgID integer not null,\n taxID integer not null,\n unique(orgID, taxID)\n);\n\ncreate table orgMod (\n id serial primary key,\n columnId integer not null,\n oldValue text null,\n newValue text null,\n modified timestamp not null\n);\ncreate table taxGroup (\n id serial primary key,\n name varchar(50) not null,\n notes text null\n);\n\ncreate table orgFunding (\n id serial primary key,\n orgId integer not null,\n fundingId integer not null\n);\n\ncreate table org_location (\n id serial primary key,\n org_id integer not null,\n name varchar(100) not null\n);\n\ncreate table org_locations (\n id serial primary key,\n location_id integer not null,\n address_id integer not null\n);\n\ncreate table orgModColumns (\n id serial primary key,\n columnName varchar(50) not null\n);\n\ncreate table funding (\n id serial primary key,\n name varchar(100) not null,\n notes text null\n);\n\ncreate table isql (\n id serial primary key,\n name varchar(100) not null,\n note text null,\n query text not null,\n parameters varchar(100) null,\n linkID integer null,\n created timestamp not null default CURRENT_TIMESTAMP\n);\n" }, { "alpha_fraction": 0.6498467326164246, "alphanum_fraction": 0.657392144203186, "avg_line_length": 31.615385055541992, "blob_id": "5a5990bdcd47209a7a9d84fefcb81de75d1ca37f", "content_id": "6a73cb5ced4655fac6f3603763dfcd62df7c0600", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4241, "license_type": "no_license", "max_line_length": 87, "num_lines": 130, "path": "/examples.py", "repo_name": "ToddTurnbull/reload", "src_encoding": "UTF-8", "text": "from db import *\nfrom db.models import *\n\nfrom collections import OrderedDict\n\nimport click\n\n@transactional\ndef address(session=None, address_id=5571):\n \"\"\"Test joining tbladdress to tlkpaddressaccessibility\"\"\"\n address = session.query(Address).filter_by(id=address_id).one()\n return_value = \"Address {} is '{}'\".format(address_id, address.access.name)\n print(\"Return value should be: {}\".format(address.access.name))\n return return_value\n\n@transactional\n@jsonify\ndef test_org(session=None, org_id=\"WRN2000\"):\n \"\"\"Test joining org to: names, contacts, publications, addresses, etc\"\"\"\n org = session.query(Org).filter_by(cic_id=org_id).one()\n return OrderedDict([\n (\"Names\", [name.name for name in org.names]),\n (\"Alternate Names\", [name.name for name in org.alt_names]),\n (\"Contacts\", [(contact.name, len(contact.comms)) for contact in org.contacts]),\n (\"Publications/Contacts\", [\n (pub.pub.title, pub.contact.name) if pub.contact\n else (pub.pub.title, None) \n for pub in org.pubs\n ]),\n (\"Postal codes\", [address.postalcode for address in org.addresses]),\n (\"Service Description\", org.service.description),\n (\"Thesaurus Terms\", [thes.de for thes in org.thes_official]),\n (\"Notes\", [note.note for note in org.notes]),\n (\"Update History\", [str(update.updated) for update in org.updates]),\n (\"Taxonomy\", [\n {link.note: [tax.name for tax in link.taxonomy]}\n for link in org.taxonomy_links\n ]),\n (\"Agency\", \"Is an agency\" if org.ic_agency else \"Is not an agency\")\n ])\n \n@transactional\n@jsonify\ndef test_thesaurus(session=None, thes_id=0):\n \"\"\"Test joining thesaurus term to its related terms\"\"\"\n thes = session.query(Thesaurus).filter_by(id=thes_id).one()\n return OrderedDict([\n (\"Term\", thes.de),\n (\"Related\", [(rel.rel_type, rel.related.de) for rel in thes.relations]),\n (\"Used for\", [uf.related.de for uf in thes.used_fors]),\n (\"See also\", [sa.related.de for sa in thes.see_alsos]),\n (\"Broader terms\", [bt.related.de for bt in thes.broader_terms])\n ])\n\n@transactional\n@jsonify\ndef test_taxonomy(session=None, code=\"BD\"):\n \"\"\"Test joining taxonomy term to its related terms\"\"\"\n tax = session.query(Taxonomy).filter_by(code=code).one()\n return OrderedDict([\n (\"Term\", tax.name),\n (\"Related\", [\n (rel.reltype, rel.related.code, rel.related.name)\n for rel in tax.relations\n ])\n ])\n\n@transactional\n@jsonify\ndef test_pub(session=None, pub_id=527):\n \"\"\"Test joining publication to its taxonomy terms\"\"\"\n pub = session.query(Pub).filter_by(id=pub_id).one()\n return OrderedDict([\n (\"Title\", pub.title),\n (\"Taxonomy\", [tax.note for tax in pub.taxonomy])\n ])\n\n@transactional\n@jsonify\ndef test_agency(session=None, agency_id=1214):\n \"\"\"Test joining agency to its org, sites, services\"\"\"\n agency = session.query(ICAgency).filter_by(id=agency_id).one()\n return OrderedDict([\n (\"Agency\", agency.id),\n (\"Org\", [name.name for name in agency.org.names]),\n (\"Sites\", [site.site_name for site in agency.sites]),\n (\"Services\", [\n (\n site.site_name,\n [(service.service_name_1, service.service_name_2) for service in site.services]\n )\n for site in agency.sites\n ])\n ])\n\n@transactional\n@jsonify\ndef test_site(session=None, site_id=89):\n \"\"\"Test joining site to its address\"\"\"\n site = session.query(Site).filter_by(id=site_id).one()\n return OrderedDict([\n (\"Site\", site.id),\n (\"Address\", (site.address.address, site.address.city))\n ])\n\n@transactional\n@jsonify\ndef test_org_site(session=None, org_id=\"WRN5575\"):\n \"\"\"List sites for an org record\"\"\"\n org = session.query(Org).filter_by(cic_id=org_id).one()\n return OrderedDict([\n (\"Org\", [name.name for name in org.names]),\n (\"Sites\", [OrderedDict([\n (\"Label\", site.label),\n (\"Site Name\", site.name),\n (\"Site Address City\", site.site.address.city),\n (\"Org Name\", site.org_name.name.name if site.org_name else None)\n ]) for site in org.sites\n ])\n ])\n\nif __name__ == \"__main__\":\n print(address())\n print(test_org())\n print(test_thesaurus(thes_id=3))\n print(test_taxonomy(code=\"BD\"))\n print(test_pub(pub_id=527))\n print(test_agency(agency_id=1214))\n print(test_site())\n print(test_org_site())\n\n" } ]
18
kszabo/MITCS50
https://github.com/kszabo/MITCS50
0a42b543d9a7a73d2e58278ab5dfb8b73494248b
340979954564939c7ceacf7a217a3761b5e3caf7
aea49ea370d33b161883dcbec19c633b02dd1ed6
refs/heads/master
2021-08-31T21:18:34.840788
2017-12-23T00:04:52
2017-12-23T00:04:52
115,153,548
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47511979937553406, "alphanum_fraction": 0.48912641406059265, "avg_line_length": 22.188034057617188, "blob_id": "4334c30a8ef4ca98184de52ba4154107b347020d", "content_id": "faada3dedb0723c547ff4f3cfd34a435f58ac2d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2713, "license_type": "no_license", "max_line_length": 100, "num_lines": 117, "path": "/pset3/find/helpers.c", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n/**\n * helpers.c\n *\n * Helper functions for Problem Set 3.\n */\n\n#include <cs50.h>\n\n#include \"helpers.h\"\n\n/**\n * Returns true if value is in array of n values, else false.\n */\nbool search(int value, int values[], int n)\n{\n // only search for positive values of needle\n if (value < 0)\n {\n return false;\n }\n else\n {\n // to hold ultimate found/not-found value\n int found = 0;\n\n // use recursive method to add up found (=1) and not-found (=0) values\n return search_recursive(value, values, n, found);\n }\n}\n\nint search_recursive(int value, int values[], int n, int found)\n{\n if (values[n/2] == value)\n {\n // Found it! Found it!\n return found + 1;\n }\n // if we have no more array, and still haven't found it\n else if (n == 0)\n {\n return found + 0;\n }\n else\n {\n // initialize a new half-size array\n int ta[n/2];\n\n // see if we need to check the first half\n // if so, populate the half-size array\n if (value < values[n/2])\n {\n for (int i = 0; i < n/2; i++)\n {\n ta[i] = values[i];\n }\n }\n else\n {\n for (int i = 0; i < n/2; i++)\n {\n // if even numbered the original array size, start from middle PLUS one index\n // otherwise start from middle-index\n // this way the second half of the original array is the same size\n ta[i] = (n % 2 == 0 ? n == 2 ? values[n/2] : values[n/2 + 1] : values[n/2 + 1 + i]);\n }\n }\n\n // because we are here, that means we haven't yet found it - send in found=1\n found += search_recursive(value, ta, n/2, 0);\n // rewind and send whatever we have in $found\n return found;\n }\n}\n\n/**\n * Sorts array of n values.\n */\nvoid sort(int values[], int n)\n{\n // find the max value from values[]\n int maxval = 0;\n for (int i = 0; i < n; i++)\n {\n maxval = values[i] > maxval ? values[i] : maxval;\n }\n\n // create and initialize the counting array\n int counting[maxval + 1];\n for (int i = 0; i <= maxval; i++)\n {\n counting[i] = 0;\n }\n\n // populate the counting array from values[]\n for (int i = 0; i < n; i++)\n {\n counting[values[i]]++;\n }\n\n // create a new array for the sorted values\n //int sorted[n];\n int sortedi = 0;\n for (int i = 0; i <= maxval; i++)\n {\n if (counting[i] > 0)\n {\n for (int j = 0; j < counting[i]; j++)\n {\n values[sortedi] = i;\n sortedi++;\n }\n }\n }\n\n return;\n}\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.8214285969734192, "avg_line_length": 13, "blob_id": "c9f7fb14c209dedbaef1e49d033d9ca32de80090", "content_id": "1b9c387d5ddd791c2f223da2cfdf80dacb1ffc0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28, "license_type": "no_license", "max_line_length": 17, "num_lines": 2, "path": "/README.md", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "# MITCS50\nclass assignments\n" }, { "alpha_fraction": 0.5252062082290649, "alphanum_fraction": 0.5300183296203613, "avg_line_length": 22.717391967773438, "blob_id": "03f78dbacebc9ae4a9239d0cae101fc683efa25b", "content_id": "537a48f02a097873c2e30d0a39a761be4bcfb916", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4364, "license_type": "no_license", "max_line_length": 150, "num_lines": 184, "path": "/week5/speller/dictionary.c", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "/**\n * Implements a dictionary's functionality.\n */\n\n#include <stdbool.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n#include <ctype.h>\n\n#include \"dictionary.h\"\n\nint dictionaryWordCount;\nint alphabetSize = 27;\n\ntypedef struct node\n{\n bool isWord;\n struct node *children[27];\n}\nnode;\n\n// create the top node\nnode *dictionaryRoot;\n\n/**\n * Returns true if word is in dictionary else false.\n */\nbool check(const char *word)\n{\n //fprintf(stdout, \"word: %s\\n\", word);\n\n if (dictionaryWordCount == 0)\n {\n fprintf(stdout, \"Dictionary is empty!\\n\");\n return 0;\n }\n\n // create a cursor and point it to root\n node *cursor = dictionaryRoot;\n\n int letterIndex = 0;\n for (int i = 0; i < strlen(word); i++)\n {\n if (tolower(word[i]) == '\\'')\n letterIndex = 26;\n else\n letterIndex = tolower(word[i]) - 'a';\n\n //fprintf(stdout, \"alphabet[%i]: %c, cursor->children[%i]\\n\", i, alphabet[letterIndex], letterIndex);\n\n if (cursor -> children[letterIndex] == NULL)\n return false;\n else\n cursor = cursor -> children[letterIndex];\n }\n\n // check if the end flag is set\n return cursor -> isWord;\n}\n\n/**\n * Loads dictionary into memory. Returns true if successful else false.\n */\nbool load(const char *dictionary)\n{\n // open dictionary file\n FILE *dp = fopen(dictionary, \"r\");\n if (dp == NULL)\n {\n printf(\"Could not open %s.\\n\", dictionary);\n unload();\n return 1;\n }\n\n dictionaryRoot = malloc(sizeof(node));\n if (dictionaryRoot == NULL)\n {\n fprintf(stdout, \"Could not allocate memory!\\n\");\n fclose(dp);\n unload();\n return 1;\n }\n\n // initialize root\n for (int a = 0; a < alphabetSize; a++)\n {\n dictionaryRoot -> children[a] = NULL;\n }\n dictionaryRoot -> isWord = false;\n\n // create a cursor and point it to root\n node *cursor = dictionaryRoot;\n\n int letterIndex = 0;\n for (int c = fgetc(dp); c != EOF; c = fgetc(dp))\n {\n if (c != '\\n')\n {\n //fprintf(stdout, \"%c: alphabet[%i], cursor->children[%i]: %s\\n\", c, letterIndex, letterIndex, (char *)(cursor -> children[letterIndex]));\n\n // get index of character in alphabet array\n if (tolower(c) == '\\'')\n letterIndex = 26;\n else\n letterIndex = tolower(c) - 'a';\n\n // if not yet in trie\n if (cursor -> children[letterIndex] == NULL)\n {\n // create a new node and initialize\n node *thisNode = malloc(sizeof(node));\n if (thisNode == NULL)\n {\n fprintf(stdout, \"Could not allocate memory!\\n\");\n fclose(dp);\n unload();\n return 1;\n }\n\n for (int j = 0; j < alphabetSize; j++)\n {\n thisNode -> children[j] = NULL;\n }\n thisNode -> isWord = false;\n\n // move cursor down the tree\n cursor -> children[letterIndex] = thisNode;\n }\n\n // letter is in trie (already or just added now), move the cursor only\n cursor = cursor -> children[letterIndex];\n }\n else\n {\n // mark last character with end-of-word\n cursor -> isWord = true;\n // keep the count\n dictionaryWordCount++;\n // reset the cursor\n cursor = dictionaryRoot;\n }\n\n //fprintf(stdout, \"cursor -> children[%i] = %c\\n\", letterIndex, alphabet[letterIndex]);\n }\n\n fclose(dp);\n return 1;\n}\n\n/**\n * Returns number of words in dictionary if loaded else 0 if not yet loaded.\n */\nunsigned int size(void)\n{\n return dictionaryWordCount;\n}\n\nvoid freeDictionary(node *cursor)\n{\n // check each child\n for (int i = 0; i < alphabetSize; i++)\n {\n if (cursor -> children[i] != NULL)\n {\n freeDictionary(cursor -> children[i]);\n }\n }\n\n // checked all children, looks like all are NULL\n free(cursor);\n}\n\n/**\n * Unloads dictionary from memory. Returns true if successful else false.\n */\nbool unload(void)\n{\n node *cursor = dictionaryRoot;\n\n freeDictionary(cursor);\n\n return true;\n}\n" }, { "alpha_fraction": 0.46869897842407227, "alphanum_fraction": 0.47577571868896484, "avg_line_length": 31.821428298950195, "blob_id": "d642b6017ae8efb6293c6b2d65e66565edc7fcd6", "content_id": "5f95fe34d0b1ae3761be439c47bf2f2fcda46a10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1837, "license_type": "no_license", "max_line_length": 126, "num_lines": 56, "path": "/pset6/vigenere.py", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "import cs50\nimport sys\n\ndef main():\n if len(sys.argv) != 2:\n print(\"Usage: python ./vigenere.py k\")\n exit(1)\n\n # check input and build the key array at the same time\n asciikey = []\n for c in sys.argv[1]:\n if c.isalpha() == False:\n print(\"Usage: python ./vigenere.py k\")\n exit(1)\n else:\n asciikey.append(ord(c.lower()) - ord(\"a\"))\n #print(\"asciikey: {}\".format(asciikey))\n\n # get user input\n print(\"plaintext: \", end=\"\")\n pt = cs50.get_string()\n\n #variable to keep track of the alpha character\n alphaplace = 0\n print(\"ciphertext: \", end=\"\")\n for p in pt:\n if p.isalpha() == True:\n # if alpha character, encrypt\n\n if ord(p) >= ord(\"a\") and ord(p) <= ord(\"z\"):\n # if lowercase\n if ord(p) + asciikey[alphaplace % len(asciikey)] > ord(\"z\"):\n # wrap around if needd\n print(\"{}\".format(chr(ord(\"a\") + 1 + ord(p) + asciikey[alphaplace % len(asciikey)] - ord(\"z\"))), end = \"\")\n else:\n print(\"{}\".format(chr(ord(p) + asciikey[alphaplace % len(asciikey)])), end = \"\")\n elif ord(p) >= ord(\"A\") and ord(p) <= ord(\"Z\"):\n # if uppercase\n if ord(p) + asciikey[alphaplace % len(asciikey)] > ord(\"Z\"):\n #wrap around if needed\n print(\"{}\".format(chr(ord(\"A\") + 1 + ord(p) + asciikey[alphaplace % len(asciikey)] - ord(\"Z\"))), end = \"\")\n else:\n print(\"{}\".format(chr(ord(p) + asciikey[alphaplace % len(asciikey)])), end = \"\")\n\n alphaplace += 1\n else:\n # not alpha, don't encrypt\n print(\"{}\".format(p), end = \"\")\n\n print()\n\n exit(0)\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.41914334893226624, "alphanum_fraction": 0.4252622425556183, "avg_line_length": 26.25, "blob_id": "a0484c9d785e0aa738d9ccdd9a8bdf06b0b2696a", "content_id": "3d05f409e54141c23759d8060cefe4dbbf8be569", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2288, "license_type": "no_license", "max_line_length": 171, "num_lines": 84, "path": "/pset2/vigenere/vigenere.c", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "#include <cs50.h>\n#include <stdio.h>\n#include <string.h>\n#include <ctype.h>\n\nint main(int argc, string argv[])\n{\n // get key from command line\n if (argc != 2)\n {\n printf(\"Usage: ./vigenere k\\n\");\n return 1;\n }\n\n string key = argv[1];\n for (int i = 0, n = strlen(key); i < n; i++)\n {\n if (isalpha(key[i]) == 0 )\n {\n printf(\"Usage: ./vigenere k\\n\");\n return 1;\n }\n }\n\n int keylength = strlen(key);\n\n // create the array of the key's ascii vals\n char asciikey[keylength];\n for (int i = 0; i < keylength; i++)\n {\n asciikey[i] = (int)tolower(key[i]) - 'a';\n\n //printf(\"asciikey[%i]=%i\\n\", i, asciikey[i]);\n }\n\n // get plaintext\n printf(\"plaintext: \");\n string pt = get_string();\n\n // variable to keep track of the alpha caracter\n int alphaplace = -1;\n // encipher\n printf(\"ciphertext: \");\n for (int i = 0, n = strlen(pt); i < n; i++)\n {\n if (isalpha(pt[i]))\n {\n alphaplace += 1;\n //printf(\"pt[%i]=%c; (int)\\'%c\\'=%i, mod of %i=%i, key[mod of %i]=%c, %i ----- \",\n //i, pt[i], pt[i], (int)pt[i], alphaplace, alphaplace % keylength, alphaplace % keylength, key[alphaplace % keylength], (int)asciikey[alphaplace % keylength]);\n\n if (pt[i] >= 'a' && pt[i] <= 'z')\n {\n if ((int)pt[i] + asciikey[alphaplace % keylength] > (int)'z')\n {\n printf(\"%c\", ('a' - 1) + (int)pt[i] + asciikey[alphaplace % keylength] - 'z');\n }\n else\n {\n printf(\"%c\", (int)pt[i] + asciikey[alphaplace % keylength]);\n }\n }\n else if (pt[i] >= 'A' && pt[i] <= 'Z')\n {\n if ((int)pt[i] + asciikey[alphaplace % keylength] > (int)'Z')\n {\n printf(\"%c\", ('A' - 1) + (int)pt[i] + asciikey[alphaplace % keylength] - 'Z');\n }\n else\n {\n printf(\"%c\", (int)pt[i] + asciikey[alphaplace % keylength]);\n }\n }\n }\n else\n {\n printf(\"%c\", pt[i]);\n }\n }\n\n printf(\"\\n\");\n\n // print ciphertext\n}" }, { "alpha_fraction": 0.5906680822372437, "alphanum_fraction": 0.6002120971679688, "avg_line_length": 25.97142791748047, "blob_id": "d87c79c0b544f39f0027683f790bbcdd30de125f", "content_id": "8a1cfe5d69a1a53f7ee24586827ffaa6d9ad6729", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 943, "license_type": "no_license", "max_line_length": 63, "num_lines": 35, "path": "/pset6/sentiments/tweets", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport sys\nimport os\n\nfrom helpers import get_user_timeline\nfrom analyzer import Analyzer\nfrom termcolor import colored\n\n\ndef main():\n if len(sys.argv) != 2:\n sys.exit(\"Usage: ./tweets @screen_name\")\n\n #initialize Analyzer\n # absolute paths to lists\n positives = os.path.join(sys.path[0], \"positive-words.txt\")\n negatives = os.path.join(sys.path[0], \"negative-words.txt\")\n myAnalyzer = Analyzer(positives, negatives)\n\n allTweets = get_user_timeline(sys.argv[1], count=50)\n\n for t in allTweets:\n # first analyze one tweet at a time\n # then print colored tweet\n sent = myAnalyzer.analyze(t)\n if sent > 0:\n print(colored(\"{} {}\".format(sent, t), \"green\"))\n elif sent < 0:\n print(colored(\"{} {}\".format(sent, t), \"red\"))\n else:\n print(colored(\"{} {}\".format(sent, t), \"yellow\"))\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5798319578170776, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 21.037036895751953, "blob_id": "fc7c1adbe9b6a80cb9bd9748f0461b937140a5a1", "content_id": "3bd0bb4e67aa3ddc98d05927849025bcd4a52ed2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1190, "license_type": "no_license", "max_line_length": 96, "num_lines": 54, "path": "/pset3/find/generate.c", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "/**\n * generate.c\n *\n * Generates pseudorandom numbers in [0,MAX), one per line.\n *\n * Usage: generate n [s]\n *\n * where n is number of pseudorandom numbers to print\n * and s is an optional seed\n */\n\n#define _XOPEN_SOURCE\n\n#include <cs50.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <time.h>\n\n// upper limit on range of integers that can be generated\n#define LIMIT 65536\n\nint main(int argc, string argv[])\n{\n // The program argument count should not be other than two or three (including program name)\n if (argc != 2 && argc != 3)\n {\n printf(\"Usage: ./generate n [s]\\n\");\n return 1;\n }\n\n // Convert the first argument (after program name) to integer\n int n = atoi(argv[1]);\n\n // If optional second (after program name) argument given, read it as integer value\n // otherwise rely on srand48\n if (argc == 3)\n {\n srand48((long) atoi(argv[2]));\n }\n else\n {\n srand48((long) time(NULL));\n }\n\n // generate as many random numbers as user asked for\n // but none larger than the LIMIT value\n for (int i = 0; i < n; i++)\n {\n printf(\"%i\\n\", (int) (drand48() * LIMIT));\n }\n\n // success\n return 0;\n}\n" }, { "alpha_fraction": 0.3891891837120056, "alphanum_fraction": 0.39459457993507385, "avg_line_length": 23.36842155456543, "blob_id": "a3fd0a0a8fa8f62d78be2412c01c152429c6b95d", "content_id": "01c362338b324e4019c9c97e26a21f2cd0b4e99f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 925, "license_type": "no_license", "max_line_length": 79, "num_lines": 38, "path": "/pset2/initials/initials.c", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "#include <cs50.h>\n#include <stdio.h>\n#include <string.h>\n\nint main(void)\n{\n // get user input\n string name = get_string();\n\n // only check if user input is a string\n if (name != NULL)\n {\n // initialize the space char\n char p = ' ';\n\n // check each character\n for (int i = 0, n = strlen(name); i < n; i++)\n {\n // if previous character is a single space, then this is an initial\n if (p == ' ')\n {\n if (name[i] >= 'a' && name[i] <= 'z')\n {\n //printf(\"%c\", toupper(name[i])); // doesn't compile!!\n printf(\"%c\", name[i] - 32);\n }\n else\n {\n printf(\"%c\", name[i]);\n }\n }\n // save current character to check against\n p = name[i];\n }\n printf(\"\\n\");\n }\n\n}" }, { "alpha_fraction": 0.38418862223625183, "alphanum_fraction": 0.3911234438419342, "avg_line_length": 23.879310607910156, "blob_id": "73790dccedc4b173db27eeaea45bc2fc883b75b6", "content_id": "dc8dc77bebd5d6dad234a0d193689ba8850f3a0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1442, "license_type": "no_license", "max_line_length": 85, "num_lines": 58, "path": "/pset2/caesar/caesar.c", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "#include <cs50.h>\n#include <stdio.h>\n#include <string.h>\n\nint main(int argc, string argv[])\n{\n // store key, make sure it is not larger than the alphabet\n int key;\n\n if (argc != 2)\n {\n printf(\"Usage: ./caesar k\\n\");\n return 1;\n }\n key = atoi(argv[1]) % 26;\n\n // get user input\n printf(\"plaintext: \");\n string pt = get_string();\n\n printf(\"ciphertext: \");\n for (int i = 0, n = strlen(pt); i < n; i++)\n {\n if (pt[i] >= 'a' && pt[i] <= 'z')\n {\n // if key makes the ascii too large, calculate where it would wrap around\n if ((int)(pt[i]) + key > 'z')\n {\n printf(\"%c\", ('a' - 1) + (int)(pt[i]) + key - 'z');\n }\n else\n {\n // safely in the alphabet\n printf(\"%c\", (int)(pt[i]) + key);\n }\n }\n else if (pt[i] >= 'A' && pt[i] <= 'Z')\n {\n // if key makes the ascii too large, calculate where it would wrap around\n if ((int)(pt[i]) + key > 'Z')\n {\n printf(\"%c\", ('A' - 1) + (int)(pt[i]) + key - 'Z');\n }\n else\n {\n // safely in the alphabet\n printf(\"%c\", (int)(pt[i]) + key);\n }\n }\n else\n {\n // not alphabet, no need to cypher\n printf(\"%c\", pt[i]);\n }\n }\n\n printf(\"\\n\");\n}" }, { "alpha_fraction": 0.6074363589286804, "alphanum_fraction": 0.6092715263366699, "avg_line_length": 37.685184478759766, "blob_id": "220a38921658b0276b54bc40fa796a3c6981a8e0", "content_id": "df6a4d97b8d9779ebbff896f95bd8f608d85699f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12533, "license_type": "no_license", "max_line_length": 189, "num_lines": 324, "path": "/pset7/finance/application.py", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "from cs50 import SQL\nfrom flask import Flask, flash, redirect, render_template, request, session, url_for\nfrom flask_session import Session\nfrom passlib.apps import custom_app_context as pwd_context\nfrom tempfile import mkdtemp\n\nfrom helpers import *\n\n# configure application\napp = Flask(__name__)\n\n# ensure responses aren't cached\nif app.config[\"DEBUG\"]:\n @app.after_request\n def after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n# custom filter\napp.jinja_env.filters[\"usd\"] = usd\n\n# configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# configure CS50 Library to use SQLite database\ndb = SQL(\"sqlite:///finance.db\")\n\[email protected](\"/\", methods=[\"GET\", \"POST\"])\n@login_required\ndef index():\n\n userid = session.get(\"user_id\")\n\n # database lookup; also set placeholders\n purchases = db.execute(\"SELECT userid, symbol, symbol as name, numshares, 0 AS price, 0 AS total FROM view_purchases WHERE userid = :userid\", userid=userid)\n\n if request.method == \"GET\":\n\n usercash = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=userid)\n cash = usercash[0]['cash']\n\n # look up current share price and calculate current value\n for purchase in purchases:\n symbolquote = lookup(purchase['symbol'])\n purchase['price'] = usd(symbolquote[\"price\"])\n purchase['total'] = usd(symbolquote[\"price\"] * purchase['numshares'])\n\n # append the CASH row to the end of the list\n purchases.append({'userid': userid, 'symbol': 'CASH', 'name': '', 'numshares': '', 'price': '', 'total': usd(cash)})\n\n return render_template(\"index.html\", purchases=purchases, history=\"no\")\n\n else:\n\n # compare to all purchases\n for purchase in purchases:\n\n # check for user's action request\n actionvalue = request.form.get('action_' + purchase['symbol'])\n\n if actionvalue == 'buy' or actionvalue == 'sell':\n # check if number entered\n numshares = int(request.form.get('numtrade_' + purchase['symbol']))\n\n # request user to enter number of shares to trade\n if not numshares or numshares == 0:\n return apology('Please enter number of shares to trade')\n\n # look up price and calculate $\n symbolquote = lookup(purchase['symbol'])\n totalprice = int(numshares) * symbolquote['price']\n\n\n if actionvalue == 'buy':\n # check if user has enough $\n cash = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=userid)\n if cash[0]['cash'] < totalprice:\n return apology(\"You do not have enough funds\")\n\n # update necessary records\n db.execute(\"INSERT INTO purchases (userid, symbol, price, purchdate, numshares) VALUES (:userid, :symbol, :price, :purchdate, :numshares)\",\n userid=userid, symbol=purchase['symbol'], price=symbolquote['price'], purchdate=currentdate(), numshares=numshares)\n db.execute(\"UPDATE users SET cash = cash - :debit WHERE id = :userid\", debit=totalprice, userid=userid)\n elif actionvalue == 'sell':\n db.execute(\"INSERT INTO purchases (userid, symbol, price, purchdate, numshares) VALUES (:userid, :symbol, :price, :purchdate, :numshares)\",\n userid=userid, symbol=purchase['symbol'], price=symbolquote['price'], purchdate=currentdate(), numshares=-(numshares))\n db.execute(\"UPDATE users SET cash = cash + :credit WHERE id = :userid\", credit=totalprice, userid=userid)\n\n\n\n '''\n if symboltosell == \"on\":\n\n # find out how many\n numtosell = int(request.form.get(\"sell_\" + purchase['symbol']))\n\n # if tries to sell more than she's got, error\n if numtosell > purchase['numshares']:\n return apology(\"You do not have enough shares for this sale\")\n else:\n # check current price\n price = float((lookup(purchase['symbol']))[\"price\"])\n\n # debit the sale to cash\n db.execute(\"UPDATE users SET cash = cash + :sold WHERE id = :userid\", sold=(price * numtosell), userid=userid)\n\n # record the sale\n db.execute(\"INSERT INTO purchases (userid, symbol, price, purchdate, numshares) VALUES (:userid, :symbol, :price, :purchdate, :numshares)\",\n userid=userid, symbol=purchase['symbol'], price=price, purchdate=datetime.datetime.now().strftime(\"%m-%d-%Y %H:%M:%S\"), numshares=-(numtosell))\n\n '''\n\n return redirect(url_for(\"history\"))\n\n\[email protected](\"/buy\", methods=[\"GET\", \"POST\"])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock.\"\"\"\n if request.method == \"POST\":\n userid = session.get(\"user_id\")\n symbol = request.form.get(\"symbol\")\n\n # if user didn't input a symbol\n if symbol == \"\":\n return apology(\"Please enter a symbol\")\n\n symbolquote = lookup(request.form.get(\"symbol\"))\n\n # if symbol is incorrect\n if not symbolquote:\n return apology(\"Symbol does not exist\")\n\n #if user didn't input number of shares\n numshares = request.form.get(\"numshares\")\n if numshares == \"\":\n return apology(\"Please enter number of shares\")\n elif int(numshares) < 0:\n return apology(\"Please enter a positive int for number of shares\")\n\n # look up symbol price and available cash\n symbolquote = lookup(symbol)\n cash = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=userid)\n\n totalprice = symbolquote[\"price\"] * int(numshares)\n if totalprice > cash[0]['cash']:\n return apology(\"Sorry, you do not have enough $\")\n else:\n # record the purchase\n db.execute(\"INSERT INTO purchases (userid, symbol, price, purchdate, numshares) VALUES (:userid, :symbol, :price, :purchdate, :numshares)\",\n userid=userid, symbol=symbolquote[\"symbol\"], price=symbolquote[\"price\"], purchdate=currentdate(), numshares=numshares)\n # debit the account\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :userid\", cash=cash[0]['cash']-totalprice, userid=userid)\n\n #return render_template(\"quoted.html\", symbolquote=symbolquote)\n # redirect user to home page\n return redirect(url_for(\"index\", history=\"no\"))\n\n else:\n return render_template(\"buy.html\")\n\n\[email protected](\"/history\")\n@login_required\ndef history():\n \"\"\"Show history of transactions.\"\"\"\n userid = session.get(\"user_id\")\n\n purchases = db.execute(\"SELECT symbol, price, numshares, purchdate FROM purchases WHERE userid = :userid ORDER BY purchaseid DESC\", userid=userid)\n return render_template(\"index.html\", purchases=purchases, history=\"yes\")\n\n\[email protected](\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"Log user in.\"\"\"\n\n # forget any user_id\n session.clear()\n\n # if user reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\")\n\n # ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\")\n\n # query database for username\n rows = db.execute(\"SELECT * FROM users WHERE username = :username\", username=request.form.get(\"username\"))\n\n # ensure username exists and password is correct\n if len(rows) != 1 or not pwd_context.verify(request.form.get(\"password\"), rows[0][\"hash\"]):\n return apology(\"invalid username and/or password\")\n\n # remember which user has logged in\n session[\"user_id\"] = rows[0][\"id\"]\n\n # redirect user to home page\n return redirect(url_for(\"index\", history=\"no\"))\n\n # else if user reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"login.html\")\n\[email protected](\"/logout\")\ndef logout():\n \"\"\"Log user out.\"\"\"\n\n # forget any user_id\n session.clear()\n\n # redirect user to login form\n return redirect(url_for(\"login\"))\n\[email protected](\"/quote\", methods=[\"GET\", \"POST\"])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == \"POST\":\n\n # if user didn't input a symbol\n if request.form.get(\"symbol\") == \"\":\n return apology(\"Please enter a symbol\")\n\n symbolquote = lookup(request.form.get(\"symbol\"))\n\n # if symbol is incorrect\n if not symbolquote:\n return apology(\"Symbol does not exist\")\n\n return render_template(\"quoted.html\", symbolquote=symbolquote)\n else:\n return render_template(\"quote.html\")\n\[email protected](\"/quoted\", methods=[\"GET\"])\n@login_required\ndef quoted():\n \"\"\"Display quoute\"\"\"\n return render_template(\"quoted.html\")\n\[email protected](\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n\n # forget any user_id\n session.clear()\n\n # if user reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n \"\"\"Register user.\"\"\"\n if not request.form.get(\"username\"):\n return apology(\"Please provide a username\")\n else:\n # check if username already exists\n rows = db.execute(\"SELECT * FROM users WHERE username = :username\", username=request.form.get(\"username\"))\n if len(rows) >= 1:\n return apology(\"The username already exists\")\n\n if request.form[\"password1\"] == \"\":\n return apology(\"Please provide a password\")\n elif request.form[\"password1\"] != request.form[\"password2\"]:\n return apology(\"The two passwords do not match\")\n else:\n db.execute(\"INSERT INTO users (username, hash) VALUES (:username, :password1)\", username=request.form.get(\"username\"), password1=pwd_context.hash(request.form.get(\"password1\")))\n\n # else if user reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"register.html\")\n\n return render_template(\"login.html\")\n\[email protected](\"/sell\", methods=[\"GET\", \"POST\"])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock.\"\"\"\n userid = session.get(\"user_id\")\n\n if request.method == \"POST\":\n\n # database lookup; also set placeholders for html\n purchases = db.execute(\"SELECT userid, symbol, numshares FROM view_purchases WHERE userid = :userid\", userid=userid)\n\n # compare to all purchases\n for purchase in purchases:\n\n # if user intends to sell this stock\n symboltosell = request.form.get(purchase['symbol'])\n\n if symboltosell == \"on\":\n\n # find out how many\n numtosell = int(request.form.get(\"sell_\" + purchase['symbol']))\n\n # if tries to sell more than she's got, error\n if numtosell > purchase['numshares']:\n return apology(\"You do not have enough shares for this sale\")\n else:\n # check current price\n price = float((lookup(purchase['symbol']))[\"price\"])\n\n # debit the sale to cash\n db.execute(\"UPDATE users SET cash = cash + :sold WHERE id = :userid\", sold=(price * numtosell), userid=userid)\n\n # record the sale\n db.execute(\"INSERT INTO purchases (userid, symbol, price, purchdate, numshares) VALUES (:userid, :symbol, :price, :purchdate, :numshares)\",\n userid=userid, symbol=purchase['symbol'], price=price, purchdate=currentdate(), numshares=-(numtosell))\n\n\n # redirect user to home page\n return redirect(url_for(\"index\", history=\"no\"))\n else:\n\n # database lookup; also set placeholders\n purchases = db.execute(\"SELECT userid, symbol, numshares FROM view_purchases WHERE userid = :userid\", userid=userid)\n\n return render_template(\"sell.html\", purchases=purchases)" }, { "alpha_fraction": 0.5036371350288391, "alphanum_fraction": 0.5237483978271484, "avg_line_length": 24.129032135009766, "blob_id": "7eb776da49f79a7107f8756dc89a178475ed4396", "content_id": "098f71eddb663785f24ee327ce579c841c7cbdf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2337, "license_type": "no_license", "max_line_length": 75, "num_lines": 93, "path": "/pset4/recover/recover.c", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdint.h>\n\ntypedef uint8_t BYTE;\n\nint main(int argc, char *argv[])\n{\n // ensure proper usage\n if (argc != 2)\n {\n fprintf(stderr, \"Usage: ./recover image\\n\");\n return 1;\n }\n\n // remember filenames\n char *infile = argv[1];\n\n // open input file\n FILE *inptr = fopen(infile, \"r\");\n if (inptr == NULL)\n {\n fprintf(stderr, \"Could not open %s.\\n\", infile);\n return 2;\n }\n\n // the first 3 bytes of JPEGs are:\n // 0xff 0xd8 0xff\n // and the 4th byte's first four bits are\n // 1110\n\n int sizeOfBuffer = 512; // buffer size for FAT file system's block size\n char buffer[sizeOfBuffer];\n int readBytes = 0;\n int fileCount = 0;\n int signature = 0;\n char outfile[8] = \"000.jpg\";\n FILE *outptr = NULL;\n\n readBytes = fread(&buffer, sizeof(BYTE), sizeOfBuffer, inptr);\n\n while (readBytes == sizeOfBuffer)\n {\n // check each block for JPEG signature\n if ((BYTE)buffer[0] == 0xff &&\n (BYTE)buffer[1] == 0xd8 &&\n (BYTE)buffer[2] == 0xff &&\n ((BYTE)buffer[3] & 0xf0) == 0xe0\n )\n {\n signature = 1;\n }\n\n if (signature == 1)\n {\n // close previous output file - if this is not the first one\n if (fileCount > 0)\n fclose(outptr);\n\n // construct file name\n snprintf(outfile, sizeof(outfile), \"%03d.jpg\", fileCount);\n\n outptr = fopen(outfile, \"w\");\n if (outptr == NULL)\n {\n fclose(inptr);\n fprintf(stderr, \"Could not create %s.\\n\", outfile);\n return 3;\n }\n\n fileCount++;\n }\n\n // whether it is a new file or not, we're appending the block\n // but not if the first few blocks are not JPEGs!\n if (fileCount > 0)\n fwrite(&buffer, sizeof(BYTE), sizeOfBuffer, outptr);\n\n readBytes = fread(&buffer, sizeof(BYTE), sizeOfBuffer, inptr);\n signature = 0;\n }\n\n // Let's check if there are more bytes at this end\n // and make sure to append\n if (readBytes > 0)\n fwrite(&buffer, sizeof(BYTE), readBytes, outptr);\n\n // close infile\n fclose(inptr);\n // close outfile\n fclose(outptr);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.49212121963500977, "alphanum_fraction": 0.5066666603088379, "avg_line_length": 29.2356014251709, "blob_id": "03b018567e46cd8af28eaba797306e31609fed58", "content_id": "8d659b3a4afa6792ce5f49301070fa33070256c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5775, "license_type": "no_license", "max_line_length": 136, "num_lines": 191, "path": "/week4/resize/resize.c", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "/**\n * Copies a BMP piece by piece, just because.\n */\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <math.h>\n\n#include \"bmp.h\"\n\nint main(int argc, char *argv[])\n{\n // START input checking\n // ensure proper usage\n if (argc != 4)\n {\n fprintf(stderr, \"Usage: ./resize n infile outfile\\n\");\n return 1;\n }\n\n // get requested factor\n float factor = atof(argv[1]);\n if (factor <= 0)\n {\n fprintf(stderr, \"Please choose a positive number for size.\\n\");\n return 1;\n }\n //fprintf(stdout, \"factor=%f\\n\", factor);\n\n // remember filenames\n char *infile = argv[2];\n char *outfile = argv[3];\n\n // open input file\n FILE *inptr = fopen(infile, \"r\");\n if (inptr == NULL)\n {\n fprintf(stderr, \"Could not open %s.\\n\", infile);\n return 2;\n }\n\n // open output file\n FILE *outptr = fopen(outfile, \"w\");\n if (outptr == NULL)\n {\n fclose(inptr);\n fprintf(stderr, \"Could not create %s.\\n\", outfile);\n return 3;\n }\n\n // read infile's BITMAPFILEHEADER\n BITMAPFILEHEADER bf;\n fread(&bf, sizeof(BITMAPFILEHEADER), 1, inptr);\n\n // read infile's BITMAPINFOHEADER\n BITMAPINFOHEADER bi;\n fread(&bi, sizeof(BITMAPINFOHEADER), 1, inptr);\n\n // ensure infile is (likely) a 24-bit uncompressed BMP 4.0\n if (bf.bfType != 0x4d42 || bf.bfOffBits != 54 || bi.biSize != 40 ||\n bi.biBitCount != 24 || bi.biCompression != 0)\n {\n fclose(outptr);\n fclose(inptr);\n fprintf(stderr, \"Unsupported file format.\\n\");\n return 4;\n }\n // DONE input checking\n\n // save info before overwriting\n int origPadding = (4 - (bi.biWidth * sizeof(RGBTRIPLE)) % 4) % 4;\n int origbiWidth = bi.biWidth;\n int origbiHeight = abs(bi.biHeight);\n\n // the bitmapheader and bitmapfileheader changes\n bi.biWidth *= factor; // not including padding\n bi.biHeight *= factor;\n\n // determine new padding for scanlines - to add\n int newbiWidth = bi.biWidth;\n int newPadding = (4 - (newbiWidth * sizeof(RGBTRIPLE)) % 4) % 4;\n\n bi.biSizeImage = ((sizeof(RGBTRIPLE) * bi.biWidth) + newPadding) * abs(bi.biHeight);\n bf.bfSize = bi.biSizeImage + sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);\n\n // write outfile's BITMAPFILEHEADER\n fwrite(&bf, sizeof(BITMAPFILEHEADER), 1, outptr);\n\n // write outfile's BITMAPINFOHEADER\n fwrite(&bi, sizeof(BITMAPINFOHEADER), 1, outptr);\n\n int numRow = 0;\n int numPix = 0;\n\n // iterate over infile's scanlines\n for (int i = 0, biHeight = abs(origbiHeight); i < biHeight; i++)\n {\n // if factor < 1\n // print those lines/pixels where i % (1/factor) < 1\n // else if factor > 1 and is an integer\n // print all lines/pixels factor number of times\n // else\n // print all lines/pixels factor number of times PLUS (formula for factor < 1)\n\n // calculate if I need to print this row at all\n // and if so, how many times\n if (factor < 1)\n {\n //if (fmod((i+1), (1/factor)) < 1)\n if (fmod(i, (1/factor)) < 1)\n numRow = 1;\n else\n numRow = 0;\n }\n else\n {\n //if ((int)factor != factor && fmod((i+1), (1/factor)) < 1 - (1/factor))\n if ((int)factor != factor && fmod(i, (1/factor)) < 1 - (1/factor))\n numRow = factor + 1;\n else\n numRow = factor;\n }\n\n // if skipping a row, advance the file position\n if (numRow == 0)\n fseek(inptr, (origbiWidth * sizeof(RGBTRIPLE) + origPadding), SEEK_CUR);\n\n for (int r = 0; r < numRow; r++)\n {\n // put the pointer back to the beginning of this same row - in the original file\n if (r > 0)\n {\n //fprintf(stdout, \"orig row [%i]; putting pointer back\\n\", i);\n fseek(inptr, -((origbiWidth * sizeof(RGBTRIPLE)) + origPadding), SEEK_CUR);\n }\n\n // iterate over pixels in scanline\n for (int j = 0; j < origbiWidth; j++)\n {\n // decide whether to print the pixel or not, and if so, how many times\n numPix = 0;\n if (factor < 1)\n {\n if (fmod((j+1), (1/factor)) < 1)\n numPix = 1;\n else\n numPix = 0;\n }\n else\n {\n if ((int)factor != factor && fmod((j+1), (1/factor)) < 1 - (1/factor))\n numPix = factor + 1;\n else\n numPix = factor;\n }\n\n // temporary storage\n RGBTRIPLE triple;\n\n // read RGB triple from infile\n //fprintf(stdout, \"reading [%i][%i]: B=%i, G=%i, R=%i\\n\", i, j, triple.rgbtBlue, triple.rgbtGreen, triple.rgbtRed);\n fread(&triple, sizeof(RGBTRIPLE), 1, inptr);\n\n for (int k = 0; k < numPix; k++)\n {\n // write RGB triple to outfile\n //1fprintf(stdout, \"writing [%i][%i]: B=%i, G=%i, R=%i\\n\", i, j, triple.rgbtBlue, triple.rgbtGreen, triple.rgbtRed);\n fwrite(&triple, sizeof(RGBTRIPLE), 1, outptr);\n }\n }\n\n // skip over padding, if any\n fseek(inptr, origPadding, SEEK_CUR);\n\n // then add new padding if needed\n for (int k = 0; k < newPadding; k++)\n {\n fputc(0x00, outptr);\n }\n }\n }\n\n // close infile\n fclose(inptr);\n\n // close outfile\n fclose(outptr);\n\n // success\n return 0;\n}\n" }, { "alpha_fraction": 0.4918566644191742, "alphanum_fraction": 0.49755701422691345, "avg_line_length": 24.5625, "blob_id": "9e92827ab02d8cb1a1e6ee403cc6c203c50f2c9a", "content_id": "35ba409beaf40b071f8d7a769e9096c94203dc14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1228, "license_type": "no_license", "max_line_length": 62, "num_lines": 48, "path": "/pset6/sentiments/analyzer.py", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "import nltk\nimport mmap\n\nclass Analyzer():\n \"\"\"Implements sentiment analysis.\"\"\"\n\n def __init__(self, positives, negatives):\n \"\"\"Initialize Analyzer.\"\"\"\n\n #instance variables\n self.posWords = []\n self.negWords = []\n\n #read in the positive words\n f = open(positives, \"r\")\n lines = f.read().splitlines()\n\n for word in lines:\n if word[:1] != \";\" and word[:1] != \"\\n\":\n # ignore the comments in the file\n self.posWords.append(word)\n\n f.close()\n\n # read in negative words\n f = open(negatives, \"r\")\n lines = f.read().splitlines()\n\n for word in lines:\n if word[:1] != \";\" and word[:1] != \"\\n\":\n # ingnore the comments in the file\n self.negWords.append(word)\n\n f.close()\n\n\n def analyze(self, text):\n \"\"\"Analyze text for sentiment, returning its score.\"\"\"\n\n sent = 0\n for word in text.split():\n # check each word in tweet\n if word.strip(\":, \").lower() in self.posWords:\n sent += 1\n elif word.strip(\":, \").lower() in self.negWords:\n sent -= 1\n\n return sent\n\n" }, { "alpha_fraction": 0.4003322124481201, "alphanum_fraction": 0.4252491593360901, "avg_line_length": 16.676469802856445, "blob_id": "f42fac6de5a3b87ed4bd25128eef0b17d8d61ff1", "content_id": "5ad8096fa115490f83ad68003760f8c51d549fe4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 602, "license_type": "no_license", "max_line_length": 58, "num_lines": 34, "path": "/pset6/mario.py", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "import cs50\n\ndef main():\n i = get_height()\n build(i)\n exit(0)\n\n\ndef get_height():\n while True:\n print(\"How tall half-pyramid you need?: \", end=\"\")\n n = cs50.get_int()\n if n >= 0 and n <= 23:\n break\n return n\n\n\ndef build(height):\n for h in range(0, height):\n for i in range (0, height - h - 1):\n print(\" \", end=\"\")\n\n for i in range (0, h + 1):\n print(\"#\", end=\"\")\n\n print(\" \", end=\"\")\n\n for i in range (0, h + 1):\n print(\"#\", end=\"\")\n\n print()\n\nif __name__ == \"__main__\":\n main()\n\n" }, { "alpha_fraction": 0.6290322542190552, "alphanum_fraction": 0.6435483694076538, "avg_line_length": 25.95652198791504, "blob_id": "688a8005525bf2cff8db90c18cd31f527c92d0aa", "content_id": "9ddbf91d8e011b42bd7efb94dbd93097de0edf6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1240, "license_type": "no_license", "max_line_length": 79, "num_lines": 46, "path": "/pset6/sentiments/application.py", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "from flask import Flask, redirect, render_template, request, url_for\n\nimport helpers\nimport os\nimport sys\nfrom analyzer import Analyzer\n\napp = Flask(__name__)\n\[email protected](\"/\")\ndef index():\n return render_template(\"index.html\")\n\[email protected](\"/search\")\ndef search():\n\n # validate screen_name\n screen_name = request.args.get(\"screen_name\", \"\")\n if not screen_name:\n return redirect(url_for(\"index\"))\n\n # get screen_name's tweets\n how_many = 100\n tweets = helpers.get_user_timeline(screen_name, how_many)\n\n positive, negative, neutral = 0.0, 0.0, 100.0\n positives = os.path.join(sys.path[0], \"positive-words.txt\")\n negatives = os.path.join(sys.path[0], \"negative-words.txt\")\n # initialize Analyzer with the dictionaries\n myAnalyzer = Analyzer(positives, negatives)\n\n for t in tweets:\n # count sentiments for total\n sent = myAnalyzer.analyze(t)\n if sent > 0:\n positive = positive + 1\n if sent < 0:\n negative = negative + 1\n else:\n neutral = neutral + 1\n\n # generate chart\n chart = helpers.chart(positive, negative, neutral)\n\n # render results\n return render_template(\"search.html\", chart=chart, screen_name=screen_name)\n" }, { "alpha_fraction": 0.516339898109436, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 16.11111068725586, "blob_id": "fcf48a16a94b9936d8c0c2b4d59fbc799b5d8d78", "content_id": "0f73d90f1a75bb62e88589590b1ddbe20d853c84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 153, "license_type": "no_license", "max_line_length": 31, "num_lines": 9, "path": "/week8/string.py", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "import cs50\n\ns = cs50.get_string(\"name: \")\nprint(\"hello, {}\".format(s))\n\ni = cs50.get_int()\nprint(\"hello, {}\".format(i))\n\nprint(\"{:.55f}\".format(1 / 10))" }, { "alpha_fraction": 0.45958584547042847, "alphanum_fraction": 0.4969939887523651, "avg_line_length": 23.540983200073242, "blob_id": "47484f48c69a08bca80c6581adab89e1f31e7dad", "content_id": "bf1908417ecaf2924cbdd32ebcba84b45b5c1217", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1497, "license_type": "no_license", "max_line_length": 85, "num_lines": 61, "path": "/pset6/credit.py", "repo_name": "kszabo/MITCS50", "src_encoding": "UTF-8", "text": "import cs50\n\ndef main():\n\n print(\"Number: \", end=\"\")\n credit = cs50.get_string()\n\n c = 0\n digitSum = 0\n for i in reversed(range(len(credit))):\n c += 1\n if c % 2 == 0:\n digit = int(credit[i])\n if digit * 2 >= 10:\n digitSum += 1 + (digit * 2) - 10\n else:\n digitSum += digit * 2\n\n #print(\"second {} from back: {}, digitSum={}\".format(c, digit, digitSum))\n\n c = 0\n for i in reversed(range(len(credit))):\n c += 1\n if c % 2 == 1:\n digit = int(credit[i])\n digitSum += digit\n\n if digitSum % 10 != 0:\n print(\"INVALID\")\n else:\n print(\"{}\".format(valid_format(credit)))\n\n exit(0)\n\n\ndef valid_format(credit):\n\n firstdigit = \"\"\n for i in range(len(credit)):\n if i == 0 or i == 1:\n firstdigit += credit[i]\n\n if len(credit) == 15 and (int(firstdigit) == 34 or int(firstdigit) == 37):\n cctype = \"AMEX\"\n elif len(credit) == 16 and (int(firstdigit) == 51 or int(firstdigit) == 52):\n cctype = \"MASTERCARD\"\n elif len(credit) == 16 and (int(firstdigit) == 53 or int(firstdigit) == 54):\n cctype = \"MASTERCARD\"\n elif len(credit) == 16 and int(firstdigit) == 55:\n cctype = \"MASTERCARD\"\n elif (len(credit) == 13 or len(credit) == 16) and int(firstdigit) // 10 == 4:\n cctype = \"VISA\"\n else:\n cctype = \"INVALID\"\n\n return cctype\n\n\n\nif __name__ == \"__main__\":\n main()\n" } ]
17
orangecig/mit600
https://github.com/orangecig/mit600
ad19fa88b7249a22cd8f127eab33096f1ee99847
e618c8ae7542add49a8a00623848c2ab407fb266
ea3336d19ee5587800f07184ad309dc27a68d5f3
refs/heads/master
2020-03-09T11:08:48.626415
2013-05-17T18:51:10
2013-05-17T18:51:10
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4967532455921173, "alphanum_fraction": 0.5129870176315308, "avg_line_length": 17.117647171020508, "blob_id": "1ffa4dccd6d4fe32594371bb88e0f8fee7a71572", "content_id": "2da4f51a3662c1faa7057775f3cf4ab3a87e6713", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 308, "license_type": "no_license", "max_line_length": 38, "num_lines": 17, "path": "/pset_10_memoization/test.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "txtFile = open(\"ryan.rmtl\")\nsubjectList = []\ntest = 0\nfinalValue = 0\n\nfor line in txtFile:\n if test > 1:\n break\n else:\n test += 1\n each = line.split('notes')\n try:\n finalValue = each[1]\n except:\n continue\n\nprint finalValue.strip('=').strip('\"')\n" }, { "alpha_fraction": 0.5724940299987793, "alphanum_fraction": 0.5768198370933533, "avg_line_length": 31.386472702026367, "blob_id": "011120263da989677500a2afb2c1ffef7808211e", "content_id": "bdaaf094ce429c22bb4a9e22c1b963e4ded306df", "detected_licenses": [ "Giftware" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6704, "license_type": "permissive", "max_line_length": 108, "num_lines": 207, "path": "/pset_03_scrabble/ps3b.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "from ps3a import *\nimport time\nfrom perm import *\nimport itertools\n\n# Name: Jorge Amaya\n# Collaborators: None\n# Time: 3:00\n#\n# Problem #6: Computer chooses a word\n#\n#\ndef comp_choose_word(hand, word_list):\n \"\"\"\n Assumes Python 2.6+ (using itertools)\n \n Given a hand and a word_dict, find the word that gives \n the maximum value score, and return it.\n\n This word should be calculated by considering all possible \n permutations of lengths 1 to HAND_SIZE.\n\n If all possible permutations are not in word_list, return None.\n\n hand: dictionary (string -> int)\n word_list: list (string)\n returns: string or None\n \"\"\"\n # Create an empty list to store all possible permutations of length 1 to HAND_SIZE\n possiblePerm = []\n allLetters = ''\n\n # List of letters\n for letter in hand.keys():\n for j in range(hand[letter]): \n allLetters += letter\n \n # For all lengths from 1 to HAND_SIZE (including! HAND_SIZE):\n for i in range(1,handsize):\n \n # Get the permutations of this length\n tempList = list(itertools.permutations(allLetters, i))\n\n # Add each permutation to possiblePerm list as a joined string\n for j in range(len(tempList)):\n possiblePerm.append(''.join(tempList[j]))\n \n # Create a new variable to store the maximum score seen so far (initially 0)\n maxScore = 0\n \n # Create a new variable to store the best word seen so far (initially None) \n wordScore = None\n bestWord = None\n\n # For each possible word permutation:\n for word in possiblePerm:\n \n # If the permutation is in the word list:\n if word in word_list:\n \n # Get the word's score\n wordScore = get_word_score(word, handsize)\n \n # If the word's score is larger than the maximum score seen so far:\n if wordScore > maxScore:\n maxScore = wordScore\n \n # Save the current score and the current word as the best found so far\n bestWord = word\n \n if wordScore == None:\n return None\n else:\n return bestWord\n \n\n#\n# Problem #7: Computer plays a hand\n#\ndef comp_play_hand(hand, word_list):\n \"\"\"\n Allows the computer to play the given hand, following the same procedure\n as play_hand, except instead of the user choosing a word, the computer \n chooses it.\n\n 1) The hand is displayed.\n 2) The computer chooses a word.\n 3) After every valid word: the word and the score for that word is \n displayed, the remaining letters in the hand are displayed, and the \n computer chooses another word.\n 4) The sum of the word scores is displayed when the hand finishes.\n 5) The hand finishes when the computer has exhausted its possible\n choices (i.e. comp_choose_word returns None).\n \n hand: dictionary (string -> int)\n word_list: list (string)\n \"\"\"\n ## Keep track of two numbers: the number of letters left in your hand and the total score\n totalScore = 0\n \n #check if letters are left in the hand\n while calculate_handlen(hand) > 0: \n # Display the hand\n print \"Current Hand:\",\n display_hand(hand)\n print\n # Ask user for input\n word = comp_choose_word(hand, word_list)\n # If the input is a single period:\n if word == None:\n break\n # End the game (break out of the loop)\n # Otherwise (the input is not a single period):\n else:\n # Tell the user how many points the word earned, and the updated total score \n totalScore += get_word_score(word, handsize)\n print '''\"%s\" earned %d points. Total: %d''' % (word, get_word_score(word, handsize),totalScore)\n print\n # Update hand and show the updated hand to the user\n hand = update_hand(hand, word)\n \n\n # Game is over (user entered a '.' or ran out of letters), so tell user the total score\n print \"Total score: \", totalScore\n \n#\n# Problem #8: Playing a game\n#\n#\ndef play_game(word_list):\n \"\"\"\n Allow the user to play an arbitrary number of hands.\n \n 1) Asks the user to input 'n' or 'r' or 'e'.\n * If the user inputs 'e', immediately exit the game.\n * If the user inputs anything that's not 'n', 'r', or 'e', keep asking them again.\n\n 2) Asks the user to input a 'u' or a 'c'.\n * If the user inputs anything that's not 'c' or 'u', keep asking them again.\n\n 3) Switch functionality based on the above choices:\n * If the user inputted 'n', play a new (random) hand.\n * Else, if the user inputted 'r', play the last hand again.\n \n * If the user inputted 'u', let the user play the game\n with the selected hand, using play_hand.\n * If the user inputted 'c', let the computer play the \n game with the selected hand, using comp_play_hand.\n\n 4) After the computer or user has played the hand, repeat from step 1\n\n word_list: list (string)\n \"\"\"\n #generate initial hand\n hand = deal_hand(handsize)\n \n #generic while loop to keep running until user exits\n test = 1\n while test != 0:\n print\n print \"Enter 'n' for new random hand\"\n print \"Enter 'r' to replay hand\"\n print \"Enter 'e' to exit game\"\n \n userOption = raw_input(\"Enter option: \")\n \n if userOption == 'n'or userOption == 'r':\n print\n print \"Enter 'u' for user\"\n print \"Enter 'c' for computer\"\n\n whoPlays = raw_input(\"Enter options: \")\n \n if whoPlays == 'u':\n if userOption == 'n':\n #generates new random hand\n hand = deal_hand(handsize)\n print\n play_hand(hand, word_list)\n else:\n print\n play_hand(hand, word_list)\n \n if whoPlays == 'c':\n print\n if userOption == 'n':\n #generates new random hand\n hand = deal_hand(handsize)\n comp_play_hand(hand, word_list)\n else:\n comp_play_hand(hand, word_list)\n \n \n \n elif userOption == 'e':\n print \"Thank you for playing\"\n break\n else: print \"Please enter a valid option.\"\n\n \n#\n# Build data structures used for entire session and play game\n#\nif __name__ == '__main__':\n word_list = load_words()\n hand = deal_hand(handsize)\n play_game(word_list)\n" }, { "alpha_fraction": 0.505854070186615, "alphanum_fraction": 0.576703667640686, "avg_line_length": 25.648000717163086, "blob_id": "14cce9b0f7a557ba9b90187a4706f654505dd4b6", "content_id": "c9d7bfd091bd4af1338549c00c8e1dc2ab4492b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3331, "license_type": "no_license", "max_line_length": 82, "num_lines": 125, "path": "/pset_02_hangman/ps2_newton.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "# 6.00 Problem Set 2\n# Name: Jorge Amaya\n# Collaborators: None\n# Time: 3:30\n\n# Successive Approximation: Newton's Method\n\n\ndef evaluate_poly(poly, x):\n \"\"\"\n Computes the value of a polynomial function at given value x. Returns that\n value as a float.\n\n Example:\n >>> poly = [0.0, 0.0, 5.0, 9.3, 7.0] # f(x) = 5x^2 + 9.3x^3 + 7x^4 \n >>> x = -13\n >>> print evaluate_poly(poly, x) # f(-13) = 5(-13)^2 + 9.3(-13)^3 + 7(-13)^4 \n 180339.9\n\n poly: list of numbers, length > 0\n x: number\n returns: float\n \"\"\"\n if len(poly) == 0:\n print \"Please enter list for poly\"\n assert False\n \n #define result to use and return a float\n result = 0.0\n\n #loops through each part of the polynomial and adds up the total value \n for i in range(len(poly)):\n result = result + poly[i]*(x**i)\n\n #print result\n return result\n\n#test variables\n##poly = [0.0, 0.0, 5.0, 9.3, 7.0]\n##x = -13\n##evaluate_poly(poly, x)\n\n\ndef compute_deriv(poly):\n \"\"\"\n Computes and returns the derivative of a polynomial function as a list of\n floats. If the derivative is 0, returns [0.0].\n\n Example:\n >>> poly = [-13.39, 0.0, 17.5, 3.0, 1.0] # - 13.39 + 17.5x^2 + 3x^3 + x^4\n >>> print compute_deriv(poly) # 35^x + 9x^2 + 4x^3\n [0.0, 35.0, 9.0, 4.0]\n\n poly: list of numbers, length > 0\n returns: list of numbers (floats)\n \"\"\"\n if len(poly) == 0:\n print \"Please enter list for poly\"\n assert False\n \n #define empty list to append to later\n result = []\n\n for i in range(1, len(poly)):\n #derivative stored to temp variable\n temp = poly[i] * i\n #turn value into float\n temp = temp/1.0\n result.append(temp)\n\n #print result\n return result\n\n#test variables\n##poly = [-13.39, 0.0, 17.5, 3.0, 1.0]\n##compute_deriv(poly)\n \n \ndef compute_root(poly, x_0, epsilon):\n \"\"\"\n Uses Newton's method to find and return a root of a polynomial function.\n Returns a list containing the root and the number of iterations required\n to get to the root.\n\n Example:\n >>> poly = [-13.39, 0.0, 17.5, 3.0, 1.0] # - 13.39 + 17.5x^2 + 3x^3 + x^4\n >>> x_0 = 0.1\n >>> epsilon = .0001\n >>> print compute_root(poly, x_0, epsilon)\n [0.80679075379635201, 8]\n >>> poly = [1, 9, 8]\n >>> x_0 = -3\n >>> epsilon = .0001\n >>> print compute_root(poly, x_0, epsilon)\n [-1.0000079170005467, 6]\n\n poly: list of numbers, length > 1.\n Represents a polynomial function containing at least one real root.\n The derivative of this polynomial function at x_0 is not 0.\n x_0: float\n epsilon: float > 0\n returns: list [float, int]\n \"\"\"\n #define initial variables\n iteration = 0\n guess = x_0\n result = 1\n deriv = compute_deriv(poly)\n\n #loop that uses Newton's methond to iterate for poly root\n while abs(result) > epsilon:\n iteration += 1\n #checks how close guess is to 0 \n result = evaluate_poly(poly, guess)\n #new guess generated through Newton's equation\n guess = guess - (evaluate_poly(poly, guess)/evaluate_poly(deriv, guess))\n \n return guess, iteration\n \n#test variables\n##poly = [-13.39, 0.0, 17.5, 3.0, 1.0]\n##x_0 = 0.1\n##epsilon = 0.0001\n##\n##compute_root(poly, x_0, epsilon)\n" }, { "alpha_fraction": 0.5582258105278015, "alphanum_fraction": 0.562820315361023, "avg_line_length": 27.310880661010742, "blob_id": "25368794232a7f07e5ac77068c136b9b88179da9", "content_id": "fa4c920cb37af9f72b14213ea15fe2d7b40d616a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5659, "license_type": "no_license", "max_line_length": 80, "num_lines": 193, "path": "/pset_09_greedy_algo/ps9b_cow2.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "###########################\r\n# Problem Set 9b: Space Cows \r\n# Name: Jorge Amaya\r\n# Collaborators: none\r\n# Time: 8:00\r\n\r\nfrom ps9b_partition import getPartitions\r\nimport time\r\n\r\n#================================\r\n# Part 2: Transporting Space Cows\r\n#================================\r\n\r\n# Problem 5\r\ndef loadCows(filename):\r\n \"\"\"\r\n Read the contents of the given file. Assumes the file contents contain\r\n data in the form of comma-separated cow name, weight pairs, and return a\r\n dictionary containing cow names as keys and corresponding weights as values.\r\n\r\n Parameters:\r\n filename - the name of the data file as a string\r\n\r\n Returns:\r\n a dictionary of cow name, weight pairs\r\n \"\"\"\r\n txtFile = open(filename)\r\n\r\n #parse file\r\n cowDict = {}\r\n \r\n for line in txtFile:\r\n each = line.split(',')\r\n cow = each[0]\r\n weight = each[1].strip('\\n')\r\n\r\n cowDict[cow] = float(weight)\r\n \r\n return cowDict\r\n\r\n# Problem 6\r\ndef greedyTransport(cows,limit):\r\n \"\"\"\r\n Finds the allocation of cows that minimizes the number of spaceship trips\r\n via a greedy heuristic (always choose the heaviest cow to fill the\r\n remaining space).\r\n \r\n Parameters:\r\n cows - a dictionary of name (string), weight (float) pairs\r\n limit - weight limit of the spaceship\r\n \r\n Returns:\r\n A list of lists, with each inner list containing the names of cows\r\n transported on a particular trip and the overall list containing all the\r\n trips\r\n \"\"\"\r\n\r\n def solveTripsByWeight():\r\n \"\"\"solve problem with just float values from cow weights\"\"\"\r\n \r\n #sorted list of cow weights\r\n cowWeight = sorted(cows.values())\r\n cowWeight.reverse()\r\n\r\n #copy list to avoid side effects\r\n copyWeight = cowWeight[:]\r\n results = []\r\n\r\n #overall loop to hold all the trips\r\n while len(copyWeight) > 0:\r\n\r\n #variable resets after each trip\r\n trip = []\r\n weight = 0\r\n cowWeight = copyWeight[:]\r\n\r\n #inner loop for each trip\r\n for i in range(len(cowWeight)):\r\n if weight + cowWeight[i] <= limit:\r\n weight += cowWeight[i]\r\n trip.append(cowWeight[i])\r\n copyWeight.remove(cowWeight[i])\r\n \r\n #final results \r\n results.append(trip)\r\n \r\n return results\r\n\r\n def findValue(dic, val):\r\n \"\"\"Used this code from StackOverflow to search dict value\"\"\"\r\n return [k for k, v in dic.iteritems() if v == val][0]\r\n\r\n def replaceNumWithName():\r\n \"\"\"replaces float with dictionary cow name\"\"\"\r\n \r\n results = solveTripsByWeight()\r\n\r\n #create a dict copy to remove cows that already went on trip\r\n cows2 = cows.copy()\r\n while len(cows2) > 0:\r\n for i in range(len(results)):\r\n for j in range(len(results[i])):\r\n #since cow name doesn't matter\r\n #just search dictionary for correct weight\r\n #replace weight with first found cow name\r\n #delete that cow\r\n replace = findValue(cows2, results[i][j])\r\n cows2.pop(replace)\r\n results[i][j] = replace\r\n\r\n return results\r\n\r\n return replaceNumWithName()\r\n \r\n# Problem 7\r\ndef bruteForceTransport(cows,limit):\r\n \"\"\"\r\n Finds the allocation of cows that minimizes the number of spaceship trips\r\n via brute force.\r\n \r\n Parameters:\r\n cows - a dictionary of name (string), weight (float) pairs\r\n limit - weight limit of the spaceship\r\n \r\n Returns:\r\n A list of lists, with each inner list containing the names of cows\r\n transported on a particular trip and the overall list containing all the\r\n trips\r\n \"\"\"\r\n def addCowWeight(list, cows):\r\n \"\"\"adds a list of cows, with value coming from dict\"\"\"\r\n sum = 0.0\r\n for key in list:\r\n sum += cows[key]\r\n return sum\r\n\r\n #list of cows\r\n cowName = (cows.keys())\r\n ##cowName = ['Maggie', 'Lola', 'Oreo']\r\n\r\n #list to store all partitions and useful ones\r\n allPart = []\r\n usePart = []\r\n \r\n for part in getPartitions(cowName):\r\n allPart.append(part)\r\n\r\n #make a test that checks each trip list if their sum <= limit\r\n #adds each partition that passes all tests to usePart\r\n for part in allPart:\r\n test = []\r\n for trip in part:\r\n if addCowWeight(trip, cows) <= limit:\r\n test.append(trip)\r\n \r\n if len(test) == len(part):\r\n usePart.append(part)\r\n\r\n #find all the lengths of each option, and search for smallest\r\n lenIndex = []\r\n for part in usePart:\r\n lenIndex.append(len(part))\r\n \r\n find = min(lenIndex)\r\n\r\n for part in usePart:\r\n if len(part) == find:\r\n return part\r\n \r\n \r\n\r\n# Problem 8\r\nif __name__ == \"__main__\":\r\n\r\n \"\"\"\r\n Using the data from ps9b_data.txt and the specified weight limit, run your\r\n greedyTransport and bruteForceTransport functions here. Print out the\r\n number of trips returned by each method, and how long each method takes\r\n to run in seconds.\r\n \"\"\"\r\n limit = 1.0\r\n openFile = \"ps9b_data.txt\"\r\n\r\n start = time.time()\r\n print len(greedyTransport(loadCows(openFile), limit))\r\n end = time.time()\r\n print end - start\r\n\r\n print\r\n start = time.time()\r\n print len(bruteForceTransport(loadCows(openFile), limit))\r\n end = time.time()\r\n print end - start\r\n\r\n" }, { "alpha_fraction": 0.6915615797042847, "alphanum_fraction": 0.7051406502723694, "avg_line_length": 27.63888931274414, "blob_id": "6cc37990ca87566491972ce8cf8e0ac90ac806e0", "content_id": "3a898d80fd24ba0041b72839d2b96a24fc53a422", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1031, "license_type": "no_license", "max_line_length": 87, "num_lines": 36, "path": "/pset_01_finance_calc/ps1a.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "# Problem Set 1A\n# Name: Jorge Amaya\n# Collaborators: None\n# Time Spent: 1:00\n\nbalance = float(raw_input(\"Enter the outstanding balance on your credit card: \"))\ninterestRate = float(raw_input(\"Enter the annual credit card interest as a decimal: \"))\nmonthRate = float(raw_input(\"Enter the minimum monthly payment rate as a decimal: \"))\nbalanceInitial = balance\n\nx = 1\n#while loop for 12 months\nwhile x<13:\n \n #minimum monthly payment\n monthPay = monthRate*balance\n monthRound = round(monthPay,2)\n\n #interest payment for this month\n interestPaid = (interestRate/12)*balance\n\n #balance after interest is addded and payment is applied\n balance = (balance+interestPaid)-monthPay\n balanceRound = round(balance,2)\n \n print 'Month: %d' % x\n print 'Minimum monthly payment: %g' % monthRound\n print 'Remaining balance: %g' % balanceRound\n x += 1\n \ntotalPaid = balanceInitial - balanceRound\n\nprint ' '\nprint 'RESULT'\nprint 'Total amount paid: %g' % totalPaid\nprint 'Remaing balance: %g' % balanceRound\n" }, { "alpha_fraction": 0.6182572841644287, "alphanum_fraction": 0.6680498123168945, "avg_line_length": 24.77777862548828, "blob_id": "b9d95c4f293860fbdad8348ad28afa3ce1619b29", "content_id": "1fbdd7181ef46874d38461068d0f67c3407399ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "no_license", "max_line_length": 71, "num_lines": 9, "path": "/pset_00_helloworld/ps0.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "#Problem Set 0\r\n#Name: Jorge Amaya\r\n#Collaborators: None\r\n#Time Spent: 0:05\r\n\r\ndateBirth = raw_input('What is your date of birth (ex. 01/01/2012) ? ')\r\nlastName = raw_input('What is your last name (ex. Johnson) ? ')\r\n\r\nprint lastName, dateBirth\r\n" }, { "alpha_fraction": 0.582524299621582, "alphanum_fraction": 0.6310679316520691, "avg_line_length": 15.166666984558105, "blob_id": "cfb6a74604ee81200267daadc3deed64277a4986", "content_id": "2fa0fed444080f9c1ec8def7aa46dd9661da4859", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "no_license", "max_line_length": 36, "num_lines": 6, "path": "/pset_00_helloworld/hello_world.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "#Jorge Amaya\r\n#hello_world.py\r\n#Feb 7, 2012\r\n\r\n#print \"Hello, world!\" to the screen\r\nprint \"Hello, world!\"\r\n" }, { "alpha_fraction": 0.5915113687515259, "alphanum_fraction": 0.5932349562644958, "avg_line_length": 30.682594299316406, "blob_id": "419f9486354780013eb69d8b4832ec5cb8e647a0", "content_id": "5fda84a1e8d08145257ad781f02fa79c34ecb013", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9283, "license_type": "no_license", "max_line_length": 84, "num_lines": 293, "path": "/pset_11_graph_DFS_BFS/graph.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "## 6.00 Problem Set 11\n## GRAPH\n\nimport random\nfrom collections import deque\n#-----------------------------------------------------------------------\n#\n# Problem Set 11\n#\n# Graph code\n#\n###\n\nclass Node(object):\n \"\"\"Represents a node in the graph with a name.\"\"\"\n\n def __init__(self, name):\n \"\"\"Initializes a Node with input name converted to string.\"\"\"\n self.name = str(name)\n \n def getName(self):\n \"\"\"Returns name of the node\"\"\"\n return self.name\n \n def __repr__(self):\n \"\"\"Returns the node name as the string representation of the node.\"\"\"\n return self.name\n \n def __eq__(self, other):\n \"\"\"Specifies that two nodes are equal if they have the same name.\"\"\"\n return self.name == other.getName()\n \n def __hash__(self):\n \"\"\"Sets the hash value of the node.\"\"\"\n return hash(self.name)\n\n\nclass Edge(object):\n \"\"\"Represents an unweighted directed edge from a source node to a\n destination node\"\"\"\n \n def __init__(self, src, dest):\n \"\"\"Initializes a edge from the specified source node to the speficied\n destination node.\"\"\"\n self.src = src\n self.dest = dest\n \n def getSource(self):\n \"\"\"Returns the source node of the edge.\"\"\"\n return self.src\n \n def getDestination(self):\n \"\"\"Returns the desination node of the edge.\"\"\"\n return self.dest\n \n def __eq__(self, other):\n \"\"\"Speficies that two edges are equal if they have the same source and\n destination nodes\"\"\"\n return self.src == other.getSource() and self.dest == other.getDestination()\n\n def __repr__(self):\n \"\"\"Returns a string representation of the edge.\"\"\"\n return str(self.src) + '->' + str(self.dest)\n\n\nclass Digraph(object):\n \"\"\"Represents a directed graph.\"\"\"\n \n def __init__(self):\n \"\"\"Initializes a graph with no node or edge.\"\"\"\n self.nodes = set()\n self.edges = {}\n\n def addNode(self, node):\n \"\"\"\n Adds the specified node to the graph.\n Raises a ValueError exception if the node already exists in the graph.\n \"\"\"\n if node in self.nodes:\n raise ValueError('Duplicate node')\n else:\n self.nodes.add(node)\n self.edges[node] = []\n \n def addEdge(self, edge):\n \"\"\"\n Adds the specified edge to the graph.\n Raises a ValueError exception if the source or destination node of the\n edge are not in the graph.\n \"\"\"\n src = edge.getSource()\n dest = edge.getDestination()\n if not(src in self.nodes and dest in self.nodes):\n print src, dest\n raise ValueError('Node not in graph')\n self.edges[src].append(edge)\n\n def getNodes(self):\n \"\"\"Returns a set containing all the nodes in the graph.\"\"\"\n return self.nodes.copy()\n \n def numNodes(self):\n \"\"\"Returns an integer representing the number of nodes in the graph.\"\"\"\n return len(self.nodes)\n \n def childrenOf(self, node):\n \"\"\"Returns a list containing all the child nodes of the node.\"\"\"\n result = []\n for e in self.edges[node]:\n if not e.getDestination() in result:\n result.append(e.getDestination())\n return result\n \n def edgesOf(self, node):\n \"\"\"\n Returns a list of all the edges in the graph that leaves the specified\n node (i.e. the edges in the graph that have the specified node as the\n source node).\n \"\"\"\n result = []\n for e in self.edges[node]:\n result.append(e)\n return result\n \n def hasNode(self, node):\n \"\"\"\n Returns whether the graph contains the speficied node. \n \"\"\"\n return node in self.nodes\n \n def __repr__(self):\n \"\"\"Returns a string representation of the graph.\"\"\"\n res = ''\n for k in self.edges:\n for e in self.edges[k]:\n res = res + str(e) + '\\n'\n return res[:-1]\n\n\nclass Graph(Digraph):\n \"\"\"\n Represents an undirected graph.\n If an edge A--B is added to the graph, then node A is a child of node B, and\n node B is also a child of node A. \n \"\"\"\n \n def addEdge(self, edge):\n \"\"\"Adds an edge to the undirected graph.\"\"\"\n Digraph.addEdge(self, edge)\n rev = Edge(edge.getDestination(), edge.getSource())\n Digraph.addEdge(self, rev)\n\n\nclass Path(object):\n \"\"\" Represents a series of connected directed edges from a start node to an\n end node.\"\"\"\n \n def __init__(self, start):\n \"\"\"Intializes a path with the specified start node.\"\"\"\n assert type(start) == Node\n self.val = [start]\n \n def addStep(self, edge):\n \"\"\"\n Adds the specified edge to the end of the path as a continutation of the\n path. Raises a ValueError exception if the source node of the specified\n edge is not the same as the node at the end of the path instance. \n \"\"\"\n if self.val[-1] != edge.getSource():\n raise ValueError('Not a continuation of path')\n self.val.append(edge.getDestination())\n \n def getStart(self):\n \"\"\"Returns the start node of the path.\"\"\"\n return self.val[0]\n \n def getLength(self):\n \"\"\"Returns the length of the path, i.e. the number of edges in the path\n \"\"\"\n return len(self.val) - 1\n \n def __add__(self, edge):\n \"\"\"Returns a new copy of the path instance with the specified edge added\n to it. Does not mutate the path instance.\n \"\"\"\n result = Path(self.getStart())\n for elem in self.val[1:]:\n result.val.append(elem)\n result.val.append(edge.getDestination())\n return result\n \n def contains(self, node):\n \"\"\"Returns whether the specified node exists in the path.\"\"\"\n for step in self.val:\n if step == node:\n return True\n return False\n \n def __repr__(self):\n \"\"\"Returns a string representation of the path.\"\"\"\n result = ''\n for step in self.val:\n result = result + '->' + str(step)\n return result[2:]\n\n\ndef BFS(graph, source, destination):\n \"\"\"\n An iterative implementation of breadth-first search starting from the\n source node. Returns a path from the source to the destination if the\n destination is found in the graph, None otherwise.\n\n Parameters:\n graph - a Graph instance\n source - a Node instance\n destination - a Node instance\n\n Returns: a Path instance from source to destination, or None if no path\n is found\n \"\"\"\n queue = deque([source])\n previous = {source: None} # dict to keep track of previous nodes on path\n while len(queue) > 0:\n node = queue.popleft()\n if node == destination: # destination found\n return findPath(previous, node)\n for childNode in graph.childrenOf(node): \n if not childNode in previous:\n previous[childNode] = node\n queue.append(childNode) # append children at the end\n return None \n\n\ndef DFS(graph, source, destination):\n \"\"\"\n A recursive implementation of depth-first search starting from the source\n node. Returns a path from the source to the destiation if the destination\n is found in the graph, None otherwise.\n\n Parameters:\n graph - a Graph instance\n source - a Node instance\n destination - a Node instance\n\n Returns: a Path instance from source to destination, or None if no path\n is found\n \"\"\"\n previous = {source: None} # dict to keep track of previous nodes on path\n if recursiveDFS(graph, source, destination, previous):\n return findPath(previous, destination)\n else:\n return None\n\ndef recursiveDFS(graph, node, destination, previous):\n \"\"\"Helper function for the DFS implementation.\"\"\"\n if node == destination: # base case\n return True\n for childNode in graph.childrenOf(node):\n if not childNode in previous:\n previous[childNode] = node\n # recursive case: look through all the children depth first\n if recursiveDFS(graph, childNode, destination, previous):\n return True\n return False\n\n\ndef findPath(previous, node):\n \"\"\"\n Helper function to build a path from the source to the specified node. Takes\n in a dictionary called previous that maps a node to the node that was\n visited before it (i.e. its parent) during the search.\n\n Parameters:\n previous - dictionary with nodes as keys and their previous nodes as values.\n (Ex: previous[b] = a means that node a was visited right before node b\n in the path)\n node - node at the end of the path, i.e. the destination\n\n Returns: a Path instance from the source to the node\n \"\"\"\n edges = []\n while previous.get(node) != None:\n previousNode = previous[node]\n edge = Edge(previousNode, node)\n edges.append(edge)\n node = previousNode\n if len(edges) == 0:\n return Path(node)\n edges.reverse()\n path = Path(edges[0].getSource())\n for edge in edges:\n path.addStep(edge)\n return path\n" }, { "alpha_fraction": 0.6304165124893188, "alphanum_fraction": 0.6361482739448547, "avg_line_length": 29.87234115600586, "blob_id": "123f2670ce38f4e9f9d66d1e316cccb7b1971aa6", "content_id": "5fdf093c4e55a7e07a8c8f9432a56f1ad10cf403", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13085, "license_type": "no_license", "max_line_length": 81, "num_lines": 423, "path": "/pset_10_memoization/ps10_new.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "# 6.00 Problem Set 10 Spring 2012\n#\n# Name: \n# Collaborators:\n# Time spent:\n\nimport pylab\nimport random\nimport time\n\n'''\nBegin helper code\n'''\n\nclass EventTime(object):\n \"\"\"\n Represents the time for a weekly recurring event.\n \"\"\"\n def __init__(self, timeStr):\n \"\"\"\n Initialize a EventTime instance from the string. The input\n string needs to be of the form <dayOfWeek><times>, where\n dayOfWeek is a string that represents the days of the week the\n event occurs, with each letter being either M, T, W, R, F (e.g., MWF),\n and times is a two character digit from 00 to 23 that represents\n the hour of the day the event happens (e.g., 09).\n \"\"\"\n assert isinstance(timeStr, str) and len(timeStr) <= 7 and \\\n timeStr[-1].isdigit() and timeStr[-2].isdigit()\n self.time = int(timeStr[-2:])\n self.daysOfWeek = timeStr[:-2]\n assert self.time >= 0 and self.time <= 23\n assert False not in [c in 'MTWRF' for c in self.daysOfWeek]\n\n def getTime(self):\n \"\"\"\n Gets the hour that the event happens.\n \n Returns: an integer from 0 to 23\n \"\"\"\n return self.time\n\n def getDaysOfWeek(self):\n \"\"\"\n Gets the days of the week that the event happens.\n \n Returns: a string made up with letters MTWRF\n \"\"\"\n return self.daysOfWeek\n\n def conflict(self, other):\n \"\"\"\n Checks if the passed in EventTime instance other is in conflict\n with the current instance. Two EventTime instances are in conflict\n if any occurence of one of the EventTime coincidences with\n some occurence of the other EventTime instance.\n returns: True if the two EventTime instances conflict with each other,\n False otherwise.\n \"\"\"\n if not isinstance(other, EventTime):\n return False\n dayConflict = True in [d in other.daysOfWeek for d in self.daysOfWeek]\n return dayConflict and other.time == self.time\n\n def __str__(self):\n return self.daysOfWeek + ' ' + str(self.time)\n\n def __cmp__(self, other):\n if not isinstance(other, EventTime):\n raise NotImplementedError\n if self.time == other.time:\n return cmp(self.daysOfWeek, other.daysOfWeek)\n else: # the times are not equal\n return cmp(self.time, other.time)\n \n def __hash__(self):\n return hash(self.time) + hash(self.daysOfWeek)\n \n\ndef printSubjects(subjects, sortOutput=True):\n \"\"\"\n Pretty-prints a list of Subject instances using the __str__ method\n of the Subject class.\n\n Parameters:\n subjects: a list of Subject instances to be printed\n sortOutput: boolean that indicates whether the output should be sorted\n according to the lexicographic order of the subject names\n \"\"\"\n if sortOutput:\n subjectCmp = lambda s1, s2: cmp(s1.getName(), s2.getName())\n sortedSubjects = sorted(subjects, cmp=subjectCmp)\n else:\n sortedSubjects = subjects\n \n print 'Course\\tValue\\tWork\\tTime\\n======\\t=====\\t====\\t====\\n'\n totalValue, totalWork = 0, 0\n for subject in sortedSubjects:\n print subject\n totalValue += subject.getValue()\n totalWork += subject.getWork()\n\n print '\\nNumber of subjects: %d\\nTotal value: %d\\nTotal work: %d\\n' % \\\n (len(subjects), totalValue, totalWork)\n'''\nEnd Helper Code\n'''\n\nclass Subject(object):\n \"\"\"\n A class that represents a subject.\n \"\"\"\n def __init__(self, name, value, work, time):\n \"\"\"\n Initializes a Subject instance.\n\n Parameters:\n name: a string that represents the name of the subject\n value: an integer that represents the value for the subject\n work: an integer that represents the amount of work for the subject\n time: an EventTime instance that represents when the subject meets\n \"\"\"\n # TODO\n raise NotImplementedError\n \n def getName(self):\n \"\"\"\n Gets the name of the subject.\n\n Returns: a string that represents the name of the subject\n \"\"\"\n # TODO\n raise NotImplementedError\n \n def getValue(self):\n \"\"\"\n Gets the value of the subject.\n \n Returns: an integer that represents the value of the subject\n \"\"\"\n # TODO\n raise NotImplementedError\n\n def getWork(self):\n \"\"\"\n Gets the amount of work for the subject.\n\n Returns: an integer that represents the work amount of the subject\n \"\"\"\n # TODO\n raise NotImplementedError\n\n def getTime(self):\n \"\"\"\n Gets the hours and days of the week that the subject meets.\n\n Returns: an EventTime instance that represents when the subject meets\n \"\"\"\n # TODO\n raise NotImplementedError\n\n def conflict(self, subjectList):\n \"\"\"\n Checks whether any subjects in the passed in list conflicts in\n time with the current subject instance.\n\n Parameters:\n subjectList: a list of Subject instances to check conflicts against\n\n Returns:\n True if current instance conflicts with any subjects in the list,\n and False otherwise\n \"\"\"\n # TODO\n raise NotImplementedError\n\n def __str__(self):\n \"\"\"\n Generates the string representation of the Subject class.\n\n Returns:\n a string of the form <subject name>\\t<value>\\t<work>\\t<times of the day>\n where \\t is the tab character, and <times of the day> is the\n string representation when the subject meets\n \"\"\"\n # TODO\n raise NotImplementedError\n\n\ndef loadSubjects(filename):\n \"\"\"\n Loads in the subjects contained in the given file. Assumes each line of\n the file\n is of the form \"<subject name>,<value>,<work>,<times of the week>\" where\n each field is separated by a comma.\n\n Parameter:\n filename: name of the data file as a string\n\n Returns:\n a list of Subject instances, each representing one line from the data file\n \"\"\"\n # TODO\n raise NotImplementedError\n\n\nclass SubjectAdvisor(object):\n \"\"\"\n An abstract class that represents all subject advisors.\n \"\"\"\n \n def pickSubjects(self, subjects, maxWork):\n \"\"\"\n Pick a set of subjects from the subjects list such that the value of\n the picked set is maximized, with the constraint that the total amount\n of work of the picked set needs to be <= maxWork. To be implemented\n by subclasses.\n\n Parameters:\n subjects: list of Subject instances to choose from, each subject\n can be chosen at most once\n maxWork: maximum amount of work the student is willing to take on\n\n Returns:\n a list of Subject instances that are chosen to take\n \"\"\"\n raise NotImplementedError('Should not call SubjectAdvisor.pickSubjects!')\n\n def getName(self):\n \"\"\"\n Gets the name of the advisor. Useful for generating plot legends. To be\n implemented by subclasses.\n\n Returns:\n A string that represents the name of this advisor\n \"\"\"\n raise NotImplementedError('Should not call SubjectAdvisor.getName!')\n\n\ndef cmpValue(subject1, subject2):\n \"\"\"\n A comparator function for two subjects based on their values. To be used\n by the GreedyAdvisor class.\n\n Paramters:\n subject1, subject2: two Subject instances\n\n Returns:\n -1 if subject1 has more value than subject2, 1 if subject1 has less value\n than subject2, 0 otherwise\n \"\"\"\n # TODO\n raise NotImplementedError\n\ndef cmpWork(subject1, subject2):\n \"\"\"\n A comparator function for two subjects based on their amount of work.\n To be used by the GreedyAdvisor class.\n\n Paramters:\n subject1, subject2: two Subject instances\n\n Returns:\n -1 if subject1 has more less than subject2, 1 if subject1 has more work\n than subject2, 0 otherwise\n \"\"\"\n # TODO\n raise NotImplementedError\n\ndef cmpRatio(subject1, subject2):\n \"\"\"\n A comparator function for two subjects based on their value to work ratio.\n To be used by the GreedyAdvisor class.\n\n Paramters:\n subject1, subject2: two Subject instances\n\n Returns:\n -1 if subject1 has higher value to work ratio than subject2, 1 if subject1\n has lower value to work ratio than subject1, 0 otherwise\n \"\"\"\n # TODO\n raise NotImplementedError\n\nclass GreedyAdvisor(SubjectAdvisor):\n \"\"\"\n An advisor that picks subjects based on a greedy algorithm.\n \"\"\"\n \n def __init__(self, comparator):\n \"\"\"\n Initializes a GreedyAdvisor instance.\n\n Parameter:\n comparator: a comparator function, either one of cmpValue, cmpWork,\n or cmpRatio\n \"\"\"\n # TODO\n raise NotImplementedError\n\n def pickSubjects(self, subjects, maxWork):\n \"\"\"\n Picks subjects to take from the subjects list using a greedy algorithm,\n based on the comparator function that is passed in during\n initialization.\n\n Parameters:\n subjects: list of Subject instances to choose from, each subject\n can be chosen at most once\n maxWork: maximum amount of work the student is willing to take on\n\n Returns:\n a list of Subject instances that are chosen to take\n \"\"\"\n # TODO\n raise NotImplementedError\n\n def getName(self):\n \"\"\"\n Gets the name of the advisor. \n\n Returns:\n A string that represents the name of this advisor\n \"\"\"\n return \"Greedy\"\n\n\nclass BruteForceAdvisor(SubjectAdvisor):\n\n def __init__(self):\n \"\"\"\n Initializes a BruteForceAdvisor instance.\n \"\"\"\n # TODO\n raise NotImplementedError\n\n def pickSubjects(self, subjects, maxWork):\n \"\"\"\n Pick subjects to take using brute force. Use recursive backtracking\n while exploring the list of subjects in order to cut down the number\n of paths to explore, rather than exhaustive enumeration\n that evaluates every possible list of subjects from the power set.\n\n Parameters:\n subjects: list of Subject instances to choose from, each subject\n can be chosen at most once\n maxWork: maximum amount of work the student is willing to take on\n\n Returns:\n a list of Subject instances that are chosen to take\n \"\"\"\n # TODO\n raise NotImplementedError\n\n def getName(self):\n \"\"\"\n Gets the name of the advisor. \n\n Returns:\n A string that represents the name of this advisor\n \"\"\"\n return \"Brute Force\"\n\nclass MemoizingAdvisor(SubjectAdvisor):\n\n def __init__(self):\n \"\"\"\n Initializes a MemoizingAdvisor instance.\n \"\"\"\n # TODO\n raise NotImplementedError\n\n def pickSubjects(self, subjects, maxWork):\n \"\"\"\n Pick subjects to take using memoization. Similar to\n BruteForceAdvisor except that the intermediate results are\n saved in order to avoid re-computation of previously traversed\n subject lists.\n\n Parameters:\n subjects: list of Subject instances to choose from, each subject\n can be chosen at most once\n maxWork: maximum amount of work the student is willing to take on\n\n Returns:\n a list of Subject instances that are chosen to take\n \"\"\"\n # TODO\n raise NotImplementedError\n\n def getName(self):\n \"\"\"\n Gets the name of the advisor.\n\n Returns:\n A string that represents the name of this advisor\n \"\"\"\n return \"Memoizing\"\n\n\ndef measureTimes(filename, maxWork, subjectSizes, numRuns):\n \"\"\"\n Compare the time taken to pick subjects for each of the advisors\n subject to maxWork constraint. Run different trials using different number\n of subjects as given in subjectSizes, using the subjects as loaded\n from filename. Choose a random subject of subjects for each trial.\n For instance, if subjectSizes is the list [10, 20, 30], then you should\n first select 10 random subjects from the loaded subjects, then run them\n through the three advisors using maxWork for numRuns times, measuring\n the time taken for each run, then average over the numRuns runs. After that,\n pick another set of 20 random subjects from the loaded subjects,\n and run them through the advisors, etc. Produce a plot afterwards\n with the x-axis showing number of subjects used, and y-axis showing\n time. Be sure you label your plots.\n\n After plotting the results, answer this question:\n What trend do you observe among the three advisors?\n How does the time taken to pick subjects grow as the number of subject\n used increases? Why do you think that is the case? Include the answers\n to these questions in your writeup.\n \"\"\"\n # TODO\n raise NotImplementedError\n \n \n" }, { "alpha_fraction": 0.5387921929359436, "alphanum_fraction": 0.5516888499259949, "avg_line_length": 25.10555648803711, "blob_id": "0036488b56de4c10ca84d77734d9a1bc21efd8e4", "content_id": "8890d6422e5792d0b9a1fbe2be18b5b968c71aef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4885, "license_type": "no_license", "max_line_length": 81, "num_lines": 180, "path": "/pset_09_greedy_algo/ps9a.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "###########################\r\n# Problem Set 9: Space Cows \r\n# Name: Jorge Amaya\r\n# Collaborators: none\r\n# Time: 6:00\r\n\r\nimport pylab\r\n\r\n#============================\r\n# Part A: Breeding Alien Cows\r\n#============================\r\n\r\n# Problem 1: File I/O\r\ndef loadData(filename):\r\n \"\"\"\r\n Read the contents of the given file. Assumes the file contents contain\r\n data in the form of comma-separated x,y pairs.\r\n\r\n Parameters:\r\n filename - the name of the data file as a string\r\n\r\n Returns:\r\n (x, y) - a tuple containing a Pylab array of x values and\r\n a Pylab array of y values\r\n \"\"\"\r\n \r\n txtFile = open(filename)\r\n\r\n #parse file\r\n xList = []\r\n yList = []\r\n for line in txtFile:\r\n each = line.split(',')\r\n x = each[0]\r\n y = each[1].strip('\\n')\r\n xList.append(int(x))\r\n yList.append(int(y))\r\n\r\n #pylab arrays\r\n xArray = pylab.array(xList)\r\n yArray = pylab.array(yList)\r\n \r\n return (xArray,yArray)\r\n\r\n\r\n# Problem 2a: Curve Fitting: Finding a polynomial fit\r\ndef polyFit(x, y, degree):\r\n \"\"\"\r\n Find the best fit polynomial curve z of the specified degree for the data\r\n contained in x and y and returns the expected y values for the best fit\r\n polynomial curve for the original set of x values.\r\n\r\n Parameters:\r\n x - a Pylab array of x values\r\n y - a Pylab array of y values\r\n degree - the degree of the desired best fit polynomial\r\n\r\n Returns:\r\n a Pylab array of coefficients for the polynomial fit function of the\r\n specified degree, corresponding to the input domain x.\r\n \"\"\"\r\n return pylab.polyfit(x, y, degree)\r\n\r\n\r\n# Problem 2b: Curve Fitting: Finding an exponential fit\r\ndef expFit(x, y):\r\n \"\"\"\r\n Find the best fit exponential curve z for the data contained in x and y.\r\n\r\n Parameters:\r\n x - a Pylab array of x values\r\n y - a Pylab array of y values\r\n\r\n Returns:\r\n a Pylab array of coefficients for the exponential fit function\r\n corresponding to the input domain x.\r\n \"\"\"\r\n y = pylab.log2(y)\r\n return pylab.polyfit(x,y,1)\r\n \r\n\r\n# Problem 3: Evaluating regression functions\r\ndef rSquare(measured, estimated):\r\n \"\"\"\r\n Calculate the R-squared error term.\r\n\r\n Parameters:\r\n measured - one dimensional array of measured values\r\n estimate - one dimensional array of predicted values\r\n\r\n Returns: the R-squared error term\r\n \"\"\"\r\n assert len(measured) == len(estimated)\r\n EE = ((estimated - measured)**2).sum()\r\n mMean = measured.sum()/float(len(measured))\r\n MV = ((mMean - measured)**2).sum()\r\n return 1 - EE/MV\r\n\r\n\r\n#======================\r\n# TESTING CODE\r\n#======================\r\ndef main():\r\n # Problem 1\r\n data1 = loadData('ps9a_data1.txt')\r\n data2 = loadData('ps9a_data2.txt')\r\n data3 = loadData('ps9a_data3.txt')\r\n\r\n # Checks for Problem 1\r\n assert all( [len(data) == 25 for xy in data] for data in [data1, data2] ), \\\r\n \"Error loading data from files; number of terms does not match expected\"\r\n assert all( [len(data) == 100 for xy in data] for data in [data1, data2] ), \\\r\n \"Error loading data from files; number of terms does not match expected\"\r\n\r\n\r\n # Problem 4\r\n # TODO: Make calls to other functions here for calculating errors and\r\n # generating plots.\r\n \r\n def allPlots():\r\n \"\"\"makes 3 calls to createPlots\"\"\"\r\n allData = [data1, data2, data3]\r\n prtData = 0\r\n \r\n for data in allData:\r\n prtData += 1\r\n createPlots(data, prtData)\r\n\r\n\r\n def createPlots(data, prtData):\r\n \"\"\"\r\n This function saves 4 pylab plots, modeling:\r\n 1. Linear\r\n 2. Quadratic\r\n 3. Quartic\r\n 4. Exponential\r\n \"\"\"\r\n degree = [1, 2, 4]\r\n iterList = [1,2,3]\r\n \r\n x = data[0]\r\n y = data[1]\r\n\r\n #use for 3 calls to polyFit()\r\n for i in iterList:\r\n #generate unique figure\r\n figure = i + ((prtData-1) * 4)\r\n\r\n #index for degree list\r\n index = i-1\r\n coef = polyFit(x, y, degree[index])\r\n \r\n #reset pylab plotter\r\n pylab.figure(figure)\r\n\r\n estimate = pylab.polyval(coef, x)\r\n ##print rSquare(y, estimate)\r\n\r\n #plots\r\n pylab.scatter(x, y)\r\n pylab.plot(x, estimate)\r\n pylab.savefig(str(figure), format=None)\r\n\r\n #single call to expFit()\r\n pylab.figure(4 * prtData)\r\n b, a = expFit(x,y)\r\n a = pylab.exp2(a)\r\n\r\n #exponential function\r\n estimate = ( a * ( 2 ** (b * x) ) )\r\n ##print rSquare(y, estimate)\r\n\r\n pylab.scatter(x, y)\r\n pylab.plot(x, estimate)\r\n pylab.savefig(str(4 * prtData), format=None) \r\n \r\n allPlots()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n \r\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 15.5, "blob_id": "bad8e28cadfefe2872cfaf7a0f6a2e14a4cf7650", "content_id": "d70f29d4865f7001d43c183424deb7f6a6a306ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 66, "license_type": "no_license", "max_line_length": 50, "num_lines": 4, "path": "/README.md", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "mit600\n======\n\nCode from 6.00 Intro to Programming and Algorithms\n" }, { "alpha_fraction": 0.5536636710166931, "alphanum_fraction": 0.5582551956176758, "avg_line_length": 26.69780158996582, "blob_id": "d513574cc006c1a6a64c289074c4e83328323112", "content_id": "c8039e7c92d92de4bfc4a918030f975d12b471e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5227, "license_type": "no_license", "max_line_length": 80, "num_lines": 182, "path": "/pset_09_greedy_algo/ps9b.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "###########################\r\n# Problem Set 9b: Space Cows \r\n# Name:\r\n# Collaborators:\r\n# Time:\r\n\r\nfrom ps9b_partition import getPartitions\r\nimport time\r\n\r\n#================================\r\n# Part 2: Transporting Space Cows\r\n#================================\r\n\r\n# Problem 5\r\ndef loadCows(filename):\r\n \"\"\"\r\n Read the contents of the given file. Assumes the file contents contain\r\n data in the form of comma-separated cow name, weight pairs, and return a\r\n dictionary containing cow names as keys and corresponding weights as values.\r\n\r\n Parameters:\r\n filename - the name of the data file as a string\r\n\r\n Returns:\r\n a dictionary of cow name, weight pairs\r\n \"\"\"\r\n txtFile = open(filename)\r\n\r\n #parse file\r\n cowDict = {}\r\n \r\n for line in txtFile:\r\n each = line.split(',')\r\n cow = each[0]\r\n weight = each[1].strip('\\n')\r\n\r\n cowDict[cow] = float(weight)\r\n \r\n return cowDict\r\n\r\n# Problem 6\r\ndef greedyTransport(cows,limit):\r\n \"\"\"\r\n Finds the allocation of cows that minimizes the number of spaceship trips\r\n via a greedy heuristic (always choose the heaviest cow to fill the\r\n remaining space).\r\n \r\n Parameters:\r\n cows - a dictionary of name (string), weight (float) pairs\r\n limit - weight limit of the spaceship\r\n \r\n Returns:\r\n A list of lists, with each inner list containing the names of cows\r\n transported on a particular trip and the overall list containing all the\r\n trips\r\n \"\"\"\r\n\r\n def solveTripsByWeight():\r\n \"\"\"solve problem with just float values from cow weights\"\"\"\r\n \r\n #sorted list of cow weights\r\n cowWeight = sorted(cows.values())\r\n cowWeight.reverse()\r\n\r\n #copy list to avoid side effects\r\n copyWeight = cowWeight[:]\r\n results = []\r\n\r\n #overall loop to hold all the trips\r\n while len(copyWeight) > 0:\r\n\r\n #variable resets after each trip\r\n trip = []\r\n weight = 0\r\n cowWeight = copyWeight[:]\r\n\r\n #inner loop for each trip\r\n for i in range(len(cowWeight)):\r\n if weight + cowWeight[i] <= limit:\r\n weight += cowWeight[i]\r\n trip.append(cowWeight[i])\r\n copyWeight.remove(cowWeight[i])\r\n \r\n #final results \r\n results.append(trip)\r\n \r\n return results\r\n\r\n def findValue(dic, val):\r\n \"\"\"Used this code from StackOverflow to search dict value\"\"\"\r\n return [k for k, v in dic.iteritems() if v == val][0]\r\n\r\n def replaceNumWithName():\r\n \"\"\"replaces float with dictionary cow name\"\"\"\r\n \r\n results = solveTripsByWeight()\r\n\r\n #create a dict copy to remove cows that already went on trip\r\n cows2 = cows.copy()\r\n while len(cows2) > 0:\r\n for i in range(len(results)):\r\n for j in range(len(results[i])):\r\n #since cow name doesn't matter\r\n #just search dictionary for correct weight\r\n #replace weight with first found cow name\r\n #delete that cow\r\n replace = findValue(cows2, results[i][j])\r\n cows2.pop(replace)\r\n results[i][j] = replace\r\n\r\n return results\r\n\r\n return replaceNumWithName()\r\n \r\n# Problem 7\r\ndef bruteForceTransport(cows,limit):\r\n \"\"\"\r\n Finds the allocation of cows that minimizes the number of spaceship trips\r\n via brute force.\r\n \r\n Parameters:\r\n cows - a dictionary of name (string), weight (float) pairs\r\n limit - weight limit of the spaceship\r\n \r\n Returns:\r\n A list of lists, with each inner list containing the names of cows\r\n transported on a particular trip and the overall list containing all the\r\n trips\r\n \"\"\"\r\n cowWeight = sorted(cows.values())\r\n cowWeight.reverse()\r\n\r\n print cowWeight\r\n \r\n allPart = []\r\n lenPart = []\r\n usePart = []\r\n \r\n for part in getPartitions(cowWeight):\r\n allPart.append(part)\r\n\r\n for part in allPart:\r\n numList = [item for sublist in part for item in sublist]\r\n \r\n if len(numList) == len(cowWeight):\r\n print numList\r\n lenPart.append(part)\r\n\r\n\r\n assert False\r\n \r\n for part in lenPart:\r\n test = []\r\n for trip in part:\r\n if sum(trip) < limit:\r\n test.append(trip)\r\n if len(test) == len(part):\r\n usePart.append(test)\r\n\r\n lenIndex = []\r\n for part in usePart:\r\n lenIndex.append(len(part))\r\n\r\n find = min(lenIndex)\r\n\r\n for part in usePart:\r\n if len(part) == find:\r\n return part\r\n \r\n \r\n\r\n# Problem 8\r\nif __name__ == \"__main__\":\r\n\r\n \"\"\"\r\n Using the data from ps9b_data.txt and the specified weight limit, run your\r\n greedyTransport and bruteForceTransport functions here. Print out the\r\n number of trips returned by each method, and how long each method takes\r\n to run in seconds.\r\n \"\"\"\r\n #print greedyTransport(loadCows(\"ps9b_data.txt\"), 0.7)\r\n print bruteForceTransport(loadCows(\"ps9b_data.txt\"), 1.0)\r\n\r\n\r\n" }, { "alpha_fraction": 0.583539605140686, "alphanum_fraction": 0.5912128686904907, "avg_line_length": 27.652482986450195, "blob_id": "acb1741d9bd39a59874b522ea50ff7f24fb92dce", "content_id": "c555b4e8177f2c46b3fa32ea4f297f9996025df4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8080, "license_type": "no_license", "max_line_length": 84, "num_lines": 282, "path": "/pset_04_ceasar_encryption/ps4_encryption/ps4_encryption.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "# 6.00 Problem Set 4\n#\n# Part 1 - HAIL CAESAR!\n#\n# Name : Jorge Amaya\n# Collaborators : None\n# Time spent : 2:30\n\n\nimport string\nimport random\nimport operator\n\nWORDLIST_FILENAME = \"words.txt\"\n\n# -----------------------------------\n# Helper code\n# (you don't need to understand this helper code)\ndef load_words():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print \" \", len(wordlist), \"words loaded.\"\n return wordlist\n\nwordlist = load_words()\n\ndef is_word(wordlist, word):\n \"\"\"\n Determines if word is a valid word.\n\n wordlist: list of words in the dictionary.\n word: a possible word.\n returns True if word is in wordlist.\n\n Example:\n >>> is_word(wordlist, 'bat') returns\n True\n >>> is_word(wordlist, 'asdf') returns\n False\n \"\"\"\n word = word.lower()\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\:;'<>?,./\\\"\")\n return word in wordlist\n\ndef random_word(wordlist):\n \"\"\"\n Returns a random word.\n\n wordlist: list of words \n returns: a word from wordlist at random\n \"\"\"\n return random.choice(wordlist)\n\ndef random_string(wordlist, n):\n \"\"\"\n Returns a string containing n random words from wordlist\n\n wordlist: list of words\n returns: a string of random words separated by spaces.\n \"\"\"\n return \" \".join([random_word(wordlist) for _ in range(n)])\n\ndef random_scrambled(wordlist, n):\n \"\"\"\n Generates a test string by generating an n-word random string\n and encrypting it with a sequence of random shifts.\n\n wordlist: list of words\n n: number of random words to generate and scamble\n returns: a scrambled string of n random words\n\n\n NOTE:\n This function will ONLY work once you have completed your\n implementation of apply_shifts!\n \"\"\"\n s = random_string(wordlist, n) + \" \"\n shifts = [(i, random.randint(0, 26)) for i in range(len(s)) if s[i-1] == ' ']\n return apply_shifts(s, shifts)[:-1]\n\ndef get_story_string():\n \"\"\"\n Returns a story in encrypted text.\n \"\"\"\n f = open(\"story.txt\", \"r\")\n story = str(f.read())\n f.close()\n return story\n\n\n# (end of helper code)\n# -----------------------------------\n\n\n#\n# Problem 1: Encryption\n#\ndef build_coder(shift):\n \"\"\"\n Returns a dict that can apply a Caesar cipher to a letter.\n The cipher is defined by the shift value. Ignores non-letter characters\n like punctuation and numbers. The empty space counts as the 27th letter\n of the alphabet, so spaces should be mapped to a lowercase letter as\n appropriate.\n\n shift: 0 <= int < 27\n returns: dict\n\n Example:\n >>> build_coder(3)\n {' ': 'c', 'A': 'D', 'C': 'F', 'B': 'E', 'E': 'H', 'D': 'G', 'G': 'J',\n 'F': 'I', 'I': 'L', 'H': 'K', 'K': 'N', 'J': 'M', 'M': 'P', 'L': 'O',\n 'O': 'R', 'N': 'Q', 'Q': 'T', 'P': 'S', 'S': 'V', 'R': 'U', 'U': 'X',\n 'T': 'W', 'W': 'Z', 'V': 'Y', 'Y': 'A', 'X': ' ', 'Z': 'B', 'a': 'd',\n 'c': 'f', 'b': 'e', 'e': 'h', 'd': 'g', 'g': 'j', 'f': 'i', 'i': 'l',\n 'h': 'k', 'k': 'n', 'j': 'm', 'm': 'p', 'l': 'o', 'o': 'r', 'n': 'q',\n 'q': 't', 'p': 's', 's': 'v', 'r': 'u', 'u': 'x', 't': 'w', 'w': 'z',\n 'v': 'y', 'y': 'a', 'x': ' ', 'z': 'b'}\n (The order of the key-value pairs may be different.)\n \"\"\"\n\n def do_shift(letters):\n '''\n Returns a dict for different list inputs with a shifted key value\n\n letters = list of letters as input\n return = dict with shift values\n '''\n dict = {}\n\n for i in range(len(letters)):\n #actual shift for each letter\n finalShift = i + shift\n\n #does shifts until it gets to \" \"\n if i + finalShift <= 27:\n dict[letters[i]] = letters[finalShift]\n\n #loops back to the beginning of the alphabet list\n else:\n frontShift = 27 - shift\n dict[letters[i]] = letters[i - frontShift]\n \n return dict\n\n #generate letter lists to shift\n upper = map(chr, range(65, 91))\n upper.append(\" \")\n lower = map(chr, range(97, 123))\n lower.append(\" \")\n \n dictupper = do_shift(upper)\n dictlower = do_shift(lower)\n\n #combine and return single dictionary\n return dict(dictupper.items() + dictlower.items())\n \n\ndef apply_coder(text, coder):\n \"\"\"\n Applies the coder to the text. Returns the encoded text.\n\n text: string\n coder: dict with mappings of characters to shifted characters\n returns: text after mapping coder chars to original text\n\n Example:\n >>> apply_coder(\"Hello, world!\", build_coder(3))\n 'Khoor,czruog!'\n >>> apply_coder(\"Khoor,czruog!\", build_coder(24))\n 'Hello, world!'\n \"\"\"\n #store text as individual items in list so can be mutatable\n encode = list(text)\n \n for i in range(len(encode)):\n\n #checks each char if a letter, if not won't mutate\n if encode[i] in coder.keys():\n\n #replace letter with shifted letter\n encode[i] = coder[encode[i]]\n\n #joins list items back into a usable string\n return \"\".join(encode)\n \n\ndef apply_shift(text, shift):\n \"\"\"\n Given a text, returns a new text Caesar shifted by the given shift\n offset. The empty space counts as the 27th letter of the alphabet,\n so spaces should be replaced by a lowercase letter as appropriate.\n Otherwise, lower case letters should remain lower case, upper case\n letters should remain upper case, and all other punctuation should\n stay as it is.\n\n text: string to apply the shift to\n shift: amount to shift the text (0 <= int < 27)\n returns: text after being shifted by specified amount.\n\n Example:\n >>> apply_shift('This is a test.', 8)\n 'Apq hq hiham a.'\n >>> apply_shift('Apq hq hiham a.', 19)\n 'This is a test.'\n \"\"\"\n\n return apply_coder(text, build_coder(shift))\n\n#\n# Problem 2: Decryption\n#\ndef find_best_shift(wordlist, text):\n \"\"\"\n Assumes Python 2.6+ (keyword operator)\n Finds a shift key that can decrypt the encoded text.\n\n text: string\n returns: 0 <= int < 27\n\n Example: \n >>> s = apply_shift('Hello, world!', 8)\n >>> s\n 'Pmttw,hdwztl!'\n >>> find_best_shift(wordlist, s)\n 19\n >>> apply_shift(s, 19)\n 'Hello, world!'\n \"\"\"\n #prepare dictionary for storing matching word amount\n score = {}\n for i in range(27):\n score[i] = 0\n \n #test every permuation of k and apply shift to test for word correct\n for k in score.keys():\n test = apply_shift(text, k)\n\n #split each string at the \" \" into individual words to check if in wordlist\n testSplit = test.split()\n \n for word in testSplit:\n if is_word(wordlist, word):\n score[k] += 1\n \n #return result with most matching results which would indicate correct shift key\n return max(score.iteritems(), key=operator.itemgetter(1))[0] \n\ndef decrypt_story():\n \"\"\"\n Using the methods you created in this problem set,\n decrypt the story given by the function get_story_string().\n Once you decrypt the message, be sure to include as a comment\n at the end of this problem set your decryption of the story.\n\n returns: string - story in plain text\n \"\"\"\n story = get_story_string()\n shift = find_best_shift(wordlist, story)\n \n return apply_shift(story, shift)\n\n\n'''\nJack Florey is a mythical character created on the spur of a moment\nto help cover an insufficiently planned hack. He has been registered\nfor classes at MIT twice before, but has reportedly never passed a class.\nIt has been the tradition of the residents of East Campus to become\nJack Florey for a few nights each year to educate incoming students in the ways,\nmeans, and ethics of hacking.\n'''\n" }, { "alpha_fraction": 0.6279287934303284, "alphanum_fraction": 0.6332396268844604, "avg_line_length": 28.100000381469727, "blob_id": "072e793d19381022ab3af577bd49b462ca410272", "content_id": "8d322db2da148d4420c7eb29f3256e05246bfb4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3201, "license_type": "no_license", "max_line_length": 85, "num_lines": 110, "path": "/pset_02_hangman/ps2_hangman.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "# 6.00 Problem Set 2\n# Name: Jorge Amaya\n# Collaborators: None\n# Time: 4:30\n\n# Hangman\n#\n\n\n# -----------------------------------\n# Helper code\n# You don't need to understand this helper code,\n# but you will have to know how to use the functions\n\nimport random\nimport string\n\nWORDLIST_FILENAME = \"words.txt\"\n\ndef load_words():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = string.split(line)\n #print \" \", len(wordlist), \"words loaded.\"\n return wordlist\n\ndef choose_word(wordlist):\n \"\"\"\n wordlist (list): list of words (strings)\n\n Returns a word from wordlist at random\n \"\"\"\n return random.choice(wordlist)\n\n# end of helper code\n# -----------------------------------\n\n# load the list of words into the wordlist variable\n# so that it can be accessed from anywhere in the program\nwordlist = load_words()\n\n#initial variables\nsecretWord = choose_word(wordlist)\nguessesLeft = 8\navailableLetters = 'abcdefghijklmnopqrstuvwxyz'\nwin = len(secretWord)\n\n#variable to visualize correct guesses in the context of actual word\nshowWord = []\nfor i in range(len(secretWord)):\n showWord.append(\"_\")\n\n#introdution\nprint \"\"\nprint \"Welcome to the game, Hangman!\"\nprint \"I am thinking of a word that is %d letters long\" % len(secretWord)\n\n#loop that terminates when you exhaust guesses or guess the word correctly\nwhile guessesLeft > 0 or win == 0:\n print \" \" .join(showWord) #turns showWord from list to string to print nicely\n print \"\"\n print \"You have %d guess(es) left\" % guessesLeft\n print \"Available letters: \", availableLetters\n \n guess = raw_input(\"Please guess a letter: \")\n guessLower = guess.lower() \n\n #checks whether guess has been guessed or not \n if guessLower in availableLetters:\n #incorrect answer\n if guessLower not in secretWord:\n print \"Sorry, that letter is not in my word\"\n guessesLeft -= 1\n #correct answer\n else:\n #see how many instances of the letter there are in the word\n howMany = secretWord.count(guessLower)\n win -= howMany\n #loop that replaces the '_' in the visualization of the word\n word = secretWord\n for i in range(howMany):\n find = word.index(guessLower)\n showWord[find] = guessLower\n word = word.replace(guessLower, \"0\", 1) \n print \"Good guess!\"\n #couldn't figure out why the 'while' loop doesn't recognize when win == 0\n if win == 0:\n break\n else:\n print \"Oops! you already guessed that letter\"\n\n #removes guessed letter from availableLetters string\n availableLetters = availableLetters.replace(guessLower, '')\n \nprint \"\"\nif win == 0: \n print \"Congratulations, you won!\"\nelse:\n print \"You did not win the game\"\nprint \"The word is\", secretWord\n" }, { "alpha_fraction": 0.6302430033683777, "alphanum_fraction": 0.6512923836708069, "avg_line_length": 27.834821701049805, "blob_id": "ae156374a52011442d1415925c567a7cfcc23602", "content_id": "5717da291dba30dcc6eca47d01f2144cd2273f53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6461, "license_type": "no_license", "max_line_length": 80, "num_lines": 224, "path": "/pset_11_graph_DFS_BFS/ps11.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "########################\n# Problem Set 11: The 6.00 Social Network\n# Name: Jorge Amaya\n# Collaborators: none\n# Time: 6:00\n#\n\nfrom graph import *\n\n#\n# PROBLEM 1a\n#\ndef buildFBGraph(filename):\n \"\"\"\n Reads the contents of the given file. Assumes the file contents contain\n data in the form of space-separated x y pairs of itegers. Each x y pair is\n unique and represents a pair of friends on facebook.\n \n There are 120 students in 6.00, and they are represented as integers from 0\n to 119. If a student does not have any Facebook friend in the class, her\n number will not appear in the file, but the graph returned by this function\n will include her.\n \n Parameters:\n filename - the name of the data file as a string. \n\n Returns:\n a Graph structure representing the Facebook network of the 6.00 students as\n encoded in the data file, including students who are not connected to any\n other students in the class\n \"\"\"\n pairs = []\n txtFile = open(filename)\n for line in txtFile:\n words = line.split()\n student0 = int(words[0])\n student1 = int(words[1])\n pairs.append([student0,student1])\n txtFile.close()\n \n nodeIndex = range(120)\n nodes = []\n for i in nodeIndex:\n nodes.append(Node(str(i)))\n \n g = Graph()\n for n in nodes:\n g.addNode(n)\n\n for each in pairs:\n g.addEdge(Edge(nodes[each[0]], nodes[each[1]]))\n\n return g\n\n##graph = buildFBGraph('FBFriends.txt')\n \n#\n# PROBLEM 1b\n#\n\ndef degOfSeparation(graph, student1, student2):\n \"\"\"\n Takes in a graph respresenting a Facebook network of the 6.00 students and\n returns the degree of separation between two students. Returns -1 if the two\n students are not connected.\n\n Parameters:\n graph - a Graph structure representing a Facebook network.\n student1 - integer reprsenting student1\n student2 - integer represnting student2\n\n Returns: an integer representing the degree of separation between student1\n and student2, i.e. how many steps away in the friendship chain student1 is\n from student2. If student1 and student2 are not connected, returns -1.\n \"\"\"\n std1 = Node(str(student1))\n std2 = Node(str(student2))\n \n path = BFS(graph, std1, std2)\n if path:\n return path.getLength()\n else:\n return -1\n \n\n##print degOfSeparation(graph, 0, 21)\n##print degOfSeparation(graph, 4, 90)\n##print degOfSeparation(graph, 3, 100)\n\n#\n# PROBLEM 2a\n#\n\ndef buildRatedFBGraph(filename):\n \"\"\"\n Read the contents of the given file. Assumes each line in the file has three\n space separated numbers; the first two numbers represent the two students\n who are Facebook friends, and the third number is their friendship rating.\n\n There are 120 students in 6.00, and they are represented as integers from 0\n to 119. If a student does not have any Facebook friend in the class, her\n number will not appear in the file, but the graph returned by this function\n will include her.\n \n Parameters:\n filename - the name of the data file as a string. \n\n Returns:\n a Graph structure representing the Facebook network of the 6.00 students and\n their friendship ratings as encoded in the data file, including students who\n are not be connected to any other students in the class\n \"\"\"\n pairs = []\n txtFile = open(filename)\n for line in txtFile:\n words = line.split()\n student0 = int(words[0])\n student1 = int(words[1])\n connect = int(words[2])\n pairs.append([student0,student1,connect])\n txtFile.close()\n \n nodeIndex = range(120)\n nodes = []\n for i in nodeIndex:\n nodes.append(Node(str(i)))\n \n g = Graph()\n for n in nodes:\n g.addNode(n)\n\n print g\n \n for each in pairs:\n subNodes = [nodes[each[0]]]\n for i in range((each[2])-1):\n subNode = Node(str(each[0]) + str(each[1]) + str(i))\n g.addNode(subNode)\n subNodes.append(subNode)\n subNodes.append(nodes[each[1]])\n \n for i in range(len(subNodes)-1):\n g.addEdge(Edge(subNodes[i], subNodes[i+1]))\n\n return g\n\n##graph = buildRatedFBGraph('ratedFBFriends.txt')\n \n\n#\n# PROBLEM 2b\n#\n\ndef ratedDegOfSeparation(graph, student1, student2):\n \"\"\"\n Takes in a rated Facebook graph and returns the rated degree of separation\n between two students. Returns -1 if the two students are not connected.\n\n Parameters:\n graph - a rated Graph structure representing a Facebook network.\n student1 - integer reprsenting student1\n student2 - integer represnting student2\n\n Returns: an integer representing rated degree of separation between student1\n and student2. If student1 and student2 are not connected, returns -1.\n \"\"\"\n std1 = Node(str(student1))\n std2 = Node(str(student2))\n \n path = BFS(graph, std1, std2)\n if path:\n return path.getLength()\n else:\n return -1\n\n##print ratedDegOfSeparation(graph, 0, 21)\n##print ratedDegOfSeparation(graph, 4, 90)\n##print ratedDegOfSeparation(graph, 3, 100)\n \n\n#\n# PROBLEM 3\n#\n\ndef findGroups(graph):\n \"\"\"\n Takes in a graph representing the Facebook network of the 6.00 students and\n returns a list of sets, where each set is a separate group of friends in the\n network. \n\n Parameters:\n graph - a Graph structure representing a Facebook network.\n\n Returns:\n A list of sets where each set is collection of integers, representing the\n group of friends in the network. In one group of friends, each student is\n reachable from every other student in the group through some number of\n friends in the group. A member in one group cannot reach a member in another\n group.\n \"\"\"\n result = []\n\n #build list\n nodeList = sorted(list(graph.getNodes()))\n num = range(graph.numNodes())\n \n listEdit = num\n while len(listEdit) > 0:\n #make list copy to delete used nodes\n listIter = listEdit\n #make list to add used nodes\n build = []\n for i in listIter:\n #use first node in list and check against remaining nodes\n path = BFS(graph, nodeList[listIter[0]], nodeList[i])\n if path:\n build.append(i)\n listEdit.remove(i)\n #store build list into final results as set\n result.append(set(build))\n \n return result\n\n##findGroups(graph)\n\n\n" }, { "alpha_fraction": 0.6254599094390869, "alphanum_fraction": 0.652685821056366, "avg_line_length": 25.73469352722168, "blob_id": "a169285ad0b9840729f09270aa32231380825db3", "content_id": "b73a5a0c15c205525442954e8b491d3139e6b7f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1359, "license_type": "no_license", "max_line_length": 88, "num_lines": 49, "path": "/pset_01_finance_calc/ps1c.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "# Problem Set 1C\r\n# Name: Jorge Amaya\r\n# Collaborators: None\r\n# Time Spent: 2:30\r\n\r\ndef test(initialBalance, interest, pay):\r\n \"\"\"solves for final balance with a varying input of\r\n payment values\"\"\"\r\n\r\n #resets balance and month\r\n balance = initialBalance\r\n month = 0\r\n \r\n while month < 12:\r\n balance = ((balance*(1+interest))-pay)\r\n month += 1\r\n return balance\r\n\r\n#constants\r\ninitialBalance = float(raw_input(\"Enter the outstanding balance on your credit card: \"))\r\ninterestRate = float(raw_input(\"Enter the annual credit card interest as a decimal: \"))\r\ninterest = interestRate/12\r\nepsilon = 0.000001\r\n\r\n#variable values\r\nlow = initialBalance/12\r\nhigh = (initialBalance*(1+interest)**12)/12\r\npay = (high + low)/2.0\r\n\r\n#loop to find payment value within epsilon\r\nwhile abs(test(initialBalance,interest,pay)) > epsilon:\r\n if test(initialBalance, interest, pay) > 0:\r\n low = pay\r\n else:\r\n high = pay\r\n pay = (high + low)/2.0\r\n\r\n#run loop again to find which month debt is less than or equal to 0\r\nmonth = 0\r\nbalance = initialBalance\r\nwhile balance >= 0:\r\n balance = ((balance*interest)+balance)-pay\r\n month += 1\r\n\r\nprint ' '\r\nprint 'RESULT'\r\nprint 'Monthly payment to pay off debt in 1 year: $%f' % round(pay,2)\r\nprint 'Number of months needed: %d' % month\r\nprint 'Balance: %f' % round(balance,2)\r\n" }, { "alpha_fraction": 0.628282368183136, "alphanum_fraction": 0.6330176591873169, "avg_line_length": 30.391891479492188, "blob_id": "f505f24096d75971f494f8e3d96b98eb48b2acd7", "content_id": "da4b413fe785b7b40072d9b1088815c08e2b9ba2", "detected_licenses": [ "Giftware" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4646, "license_type": "permissive", "max_line_length": 90, "num_lines": 148, "path": "/pset_03_scrabble/test_ps3b.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "from ps3a import *\nimport time\nfrom perm import *\nimport itertools\nimport operator\n\n#\n#\n# Problem #6: Computer chooses a word\n#\n#\ndef sortWordList(dict):\n keys = []\n keys = dict.keys()\n return keys.sort()\n\ndef comp_choose_word(hand, word_list):\n \"\"\"\n Given a hand and a word_dict, find the word that gives \n the maximum value score, and return it.\n\n This word should be calculated by considering all possible \n permutations of lengths 1 to HAND_SIZE.\n\n If all possible permutations are not in word_list, return None.\n\n hand: dictionary (string -> int)\n word_list: list (string)\n returns: string or None\n \"\"\"\n #find word score for word_list store in dictionary\n dictWordList = {}\n for word in word_list:\n dictWordList[word] = get_word_score(word,handsize)\n\n sortedWordList = sorted(dictWordList.iteritems(), key=operator.itemgetter(1))\n \n # Create an empty list to store all possible permutations of length 1 to HAND_SIZE\n possiblePerm = []\n allLetters = ''\n\n # List of letters\n for letter in hand.keys():\n for j in range(hand[letter]): \n allLetters += letter\n \n # For all lengths from 1 to HAND_SIZE (including! HAND_SIZE):\n for i in range(1,handsize):\n \n # Get the permutations of this length\n tempList = list(itertools.permutations(allLetters, i))\n\n # Add each permutation to possiblePerm list as a joined string\n for j in range(len(tempList)):\n possiblePerm.append(''.join(tempList[j]))\n \n # Create a new variable to store the maximum score seen so far (initially 0)\n maxScore = 0\n \n # Create a new variable to store the best word seen so far (initially None) \n wordScore = None\n\n # For each possible word permutation:\n for word in possiblePerm:\n \n # If the permutation is in the word list:\n if word in word_list:\n \n # Get the word's score\n wordScore = get_word_score(word, handsize)\n \n # If the word's score is larger than the maximum score seen so far:\n if wordScore > maxScore:\n maxScore = wordScore\n \n # Save the current score and the current word as the best found so far\n bestWord = word\n\n print bestWord, maxScore\n \n if maxScore == 0:\n return None\n else:\n return bestWord\n \n\n#\n# Problem #7: Computer plays a hand\n#\ndef comp_play_hand(hand, word_list):\n \"\"\"\n Allows the computer to play the given hand, following the same procedure\n as play_hand, except instead of the user choosing a word, the computer \n chooses it.\n\n 1) The hand is displayed.\n 2) The computer chooses a word.\n 3) After every valid word: the word and the score for that word is \n displayed, the remaining letters in the hand are displayed, and the \n computer chooses another word.\n 4) The sum of the word scores is displayed when the hand finishes.\n 5) The hand finishes when the computer has exhausted its possible\n choices (i.e. comp_choose_word returns None).\n \n hand: dictionary (string -> int)\n word_list: list (string)\n \"\"\"\n # TO DO ... <-- Remove this comment when you code this function\n \n#\n# Problem #8: Playing a game\n#\n#\ndef play_game(word_list):\n \"\"\"\n Allow the user to play an arbitrary number of hands.\n \n 1) Asks the user to input 'n' or 'r' or 'e'.\n * If the user inputs 'e', immediately exit the game.\n * If the user inputs anything that's not 'n', 'r', or 'e', keep asking them again.\n\n 2) Asks the user to input a 'u' or a 'c'.\n * If the user inputs anything that's not 'c' or 'u', keep asking them again.\n\n 3) Switch functionality based on the above choices:\n * If the user inputted 'n', play a new (random) hand.\n * Else, if the user inputted 'r', play the last hand again.\n \n * If the user inputted 'u', let the user play the game\n with the selected hand, using play_hand.\n * If the user inputted 'c', let the computer play the \n game with the selected hand, using comp_play_hand.\n\n 4) After the computer or user has played the hand, repeat from step 1\n\n word_list: list (string)\n \"\"\"\n # TO DO... <-- Remove this comment when you code this function\n print \"play_game not yet implemented.\" # <-- Remove this when you code this function\n\n \n#\n# Build data structures used for entire session and play game\n#\nif __name__ == '__main__':\n word_list = load_words()\n hand = deal_hand(handsize)\n comp_choose_word(hand, word_list)\n" }, { "alpha_fraction": 0.6618357300758362, "alphanum_fraction": 0.686956524848938, "avg_line_length": 23.64285659790039, "blob_id": "9781e08b1a4cc6936a56fbd3f8c383e904e2d3f3", "content_id": "6293f17d9353038991a2e3c0c767eede4fd77268", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1035, "license_type": "no_license", "max_line_length": 88, "num_lines": 42, "path": "/pset_01_finance_calc/ps1b.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "# Problem Set 1B\n# Name: Jorge Amaya\n# Collaborators: None\n# Time Spent: 1:30\n\n#constants\ninitialBalance = float(raw_input(\"Enter the outstanding balance on your credit card: \"))\ninterestRate = float(raw_input(\"Enter the annual credit card interest as a decimal: \"))\nmonthlyInterest = interestRate/12\n\n#variables\npay = 0\nbalance = initialBalance\nmonth = 0\n\n#loop to search for correct multiple of $10 payment\nwhile balance >= 0:\n pay += 10\n #resets balance to original balance\n balance = initialBalance\n month = 0\n \n #loop that runs for a 12-month period\n while month<12:\n month += 1\n balance = ((1+monthlyInterest)*balance)-pay\n \n \n\n#run loop again to find which month debt is less than or equal to 0\nmonth = 0\nbalance = initialBalance\nwhile balance >= 0:\n balance = ((balance*monthlyInterest)+balance)-pay\n month += 1\n\n\nprint ' '\nprint 'RESULT'\nprint 'Monthly payment to pay off debt in 1 year: $%d' % pay\nprint 'Number of months needed: %d' % month\nprint 'Balance: %g' % round(balance,2)\n" }, { "alpha_fraction": 0.6231213808059692, "alphanum_fraction": 0.6306358575820923, "avg_line_length": 28.55555534362793, "blob_id": "809b86fb60d477be479278e4fe9bc27a7fdd90c1", "content_id": "c788864a71d1f1560d548f3d4b6e85b80949d269", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3460, "license_type": "no_license", "max_line_length": 145, "num_lines": 117, "path": "/pset_04_ceasar_encryption/ps4_recursion.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "# 6.00 Problem Set 4\n#\n# Part 2 - RECURSION\n#\n# Name : Jorge Amaya\n# Collaborators : None\n# Time spent : 4:00\n\n#\n# Problem 3: Recursive String Reversal\n#\ndef reverse_string(string):\n \"\"\"\n Given a string, recursively returns a reversed copy of the string.\n For example, if the string is 'abc', the function returns 'cba'.\n The only string operations you are allowed to use are indexing,\n slicing, and concatenation.\n \n string: a string\n returns: a reversed string\n \"\"\"\n if string == \"\":\n return string\n\n #Leave last letter in string and do recursive function on remaining letters.\n else:\n return string[-1] + reverse_string(string[:-1])\n\n#\n# Problem 4: Srinian\n#\ndef x_ian(x, word):\n \"\"\"\n Given a string x, returns True if all the letters in x are\n contained in word in the same order as they appear in x.\n\n >>> x_ian('srini', 'histrionic')\n True\n >>> x_ian('john', 'mahjong')\n False\n >>> x_ian('dina', 'dinosaur')\n True\n >>> x_ian('pangus', 'angus')\n False\n \n x: a string\n word: a string\n returns: True if word is x_ian, False otherwise\n \"\"\"\n if len(x) > 0 and len(word) > 0:\n \n #If char match, move on to next letters\n if x[0] == word[0]:\n x_ian(x[1:], word[1:])\n\n #If char don't match increment the word and leave x the same\n else:\n x_ian(x, word[1:])\n \n #If all letters of x are cleared in order, return True\n elif len(x) == 0:\n return True\n\n else:\n return False\n \n\n#\n# Problem 5: Typewriter\n#\n\ndef find_space(text, line_length):\n '''\n Given a block of text, returns the first instance of a space (between\n words) that is equal to or greater than initial index value.\n\n text: a string\n line_length: integer, index value\n returns: integer where space is\n '''\n #if you get to end of string, just returns generic index\n if len(text) <= line_length:\n return line_length\n \n #look for \" \" between words, returns index value where this occurs\n elif text[line_length] == \" \":\n return line_length\n \n #recursion that increments the index value + 1, until it finds \" \" or reaches end of string\n else:\n return find_space(text, line_length + 1)\n \ndef insert_newlines(text, line_length):\n \"\"\"\n Given text and a desired line length, wrap the text as a typewriter would.\n Insert a newline character (\"\\n\") after each word that reaches or exceeds\n the desired line length.\n\n text: a string containing the text to wrap.\n line_length: the number of characters to include on a line before wrapping\n the next word.\n returns: a string, with newline characters inserted appropriately. \n \"\"\"\n #uses helper function to look for the first instance of a \" \" after the line_length value\n index = find_space(text, line_length)\n \n #for lines that are smaller than the index just return remaining text\n if len(text) <= index:\n return text\n\n #recursion that returns string up to the index value then runs function on rest of string until out of char\n else:\n return text[:index] + \"\\n\" + insert_newlines(text[index+1:], line_length) \n\n#f() test\n#text = \"In practice, Turing completeness means, that the rules followed in sequence on arbitrary data can produce the result of any calculation\"\n#print insert_newlines(text, 29)\n\n\n" }, { "alpha_fraction": 0.5585949420928955, "alphanum_fraction": 0.5713397860527039, "avg_line_length": 24.758333206176758, "blob_id": "4eaa2539fa9bcb57ab0ded7a87269d3f37784560", "content_id": "5c6fed619286bed0633ae02ead84cdc573ed0dda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3217, "license_type": "no_license", "max_line_length": 81, "num_lines": 120, "path": "/pset_09_greedy_algo/ps9a_test200.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "###########################\r\n# Problem Set 9: Space Cows \r\n# Name: Jorge Amaya\r\n# Collaborators: none\r\n# Time:\r\n\r\nimport pylab\r\n\r\n#============================\r\n# Part A: Breeding Alien Cows\r\n#============================\r\n\r\n# Problem 1: File I/O\r\ndef loadData(filename):\r\n \"\"\"\r\n Read the contents of the given file. Assumes the file contents contain\r\n data in the form of comma-separated x,y pairs.\r\n\r\n Parameters:\r\n filename - the name of the data file as a string\r\n\r\n Returns:\r\n (x, y) - a tuple containing a Pylab array of x values and\r\n a Pylab array of y values\r\n \"\"\"\r\n \r\n txtFile = open(filename)\r\n\r\n #parse file\r\n xList = []\r\n yList = []\r\n for line in txtFile:\r\n each = line.split(',')\r\n x = each[0]\r\n y = each[1].strip('\\n')\r\n xList.append(int(x))\r\n yList.append(int(y))\r\n\r\n #pylab arrays\r\n xArray = pylab.array(xList)\r\n yArray = pylab.array(yList)\r\n \r\n return (xArray,yArray)\r\n\r\n\r\n# Problem 2a: Curve Fitting: Finding a polynomial fit\r\ndef polyFit(x, y, degree):\r\n \"\"\"\r\n Find the best fit polynomial curve z of the specified degree for the data\r\n contained in x and y and returns the expected y values for the best fit\r\n polynomial curve for the original set of x values.\r\n\r\n Parameters:\r\n x - a Pylab array of x values\r\n y - a Pylab array of y values\r\n degree - the degree of the desired best fit polynomial\r\n\r\n Returns:\r\n a Pylab array of coefficients for the polynomial fit function of the\r\n specified degree, corresponding to the input domain x.\r\n \"\"\"\r\n return pylab.polyfit(x, y, degree)\r\n \r\n\r\n# Problem 3: Evaluating regression functions\r\ndef rSquare(measured, estimated):\r\n \"\"\"\r\n Calculate the R-squared error term.\r\n\r\n Parameters:\r\n measured - one dimensional array of measured values\r\n estimate - one dimensional array of predicted values\r\n\r\n Returns: the R-squared error term\r\n \"\"\"\r\n assert len(measured) == len(estimated)\r\n EE = ((estimated - measured)**2).sum()\r\n mMean = measured.sum()/float(len(measured))\r\n MV = ((mMean - measured)**2).sum()\r\n return 1 - EE/MV\r\n\r\n\r\n#======================\r\n# TESTING CODE\r\n#======================\r\ndef main():\r\n # Problem 1\r\n data1 = loadData('ps9a_data1.txt')\r\n data2 = loadData('ps9a_data2.txt')\r\n data3 = loadData('ps9a_data3.txt')\r\n\r\n # Checks for Problem 1\r\n assert all( [len(data) == 25 for xy in data] for data in [data1, data2] ), \\\r\n \"Error loading data from files; number of terms does not match expected\"\r\n assert all( [len(data) == 100 for xy in data] for data in [data1, data2] ), \\\r\n \"Error loading data from files; number of terms does not match expected\"\r\n\r\n\r\n # Problem 4\r\n # TODO: Make calls to other functions here for calculating errors and\r\n # generating plots.\r\n \r\n\r\n def createPlots(data):\r\n x = data[0]\r\n y = data[1]\r\n\r\n a,b,c = polyFit(x, y, 2)\r\n\r\n x = pylab.arange(201)\r\n y = a*x**2 + b*x + c\r\n\r\n print pylab.polyval((a,b,c), 200)\r\n pylab.plot(x, y)\r\n pylab.show()\r\n \r\n createPlots(data3)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n \r\n" }, { "alpha_fraction": 0.5987769961357117, "alphanum_fraction": 0.6061256527900696, "avg_line_length": 30.62139129638672, "blob_id": "ee4eb2217fd1e21af67ce4414446042a4b7323d9", "content_id": "70376b82df5c5030a0bc15ac845d1668b0e5063f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18643, "license_type": "no_license", "max_line_length": 98, "num_lines": 589, "path": "/pset_10_memoization/ps10.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "# 6.00 Problem Set 10 Spring 2012\n#\n# Name: Jorge Amaya\n# Collaborators: none\n# Time spent: 9:00\n\nimport pylab\nimport random\nimport time\n\n'''\nBegin helper code\n'''\n\nclass EventTime(object):\n \"\"\"\n Represents the time for a weekly recurring event.\n \"\"\"\n def __init__(self, timeStr):\n \"\"\"\n Initialize a EventTime instance from the string. The input\n string needs to be of the form <dayOfWeek><times>, where\n dayOfWeek is a string that represents the days of the week the\n event occurs, with each letter being either M, T, W, R, F (e.g., MWF),\n and times is a two character digit from 00 to 23 that represents\n the hour of the day the event happens (e.g., 09).\n \"\"\"\n assert isinstance(timeStr, str) and len(timeStr) <= 7 and \\\n timeStr[-1].isdigit() and timeStr[-2].isdigit()\n self.time = int(timeStr[-2:])\n self.daysOfWeek = timeStr[:-2]\n assert self.time >= 0 and self.time <= 23\n assert False not in [c in 'MTWRF' for c in self.daysOfWeek]\n\n def getTime(self):\n \"\"\"\n Gets the hour that the event happens.\n \n Returns: an integer from 0 to 23\n \"\"\"\n return self.time\n\n def getDaysOfWeek(self):\n \"\"\"\n Gets the days of the week that the event happens.\n \n Returns: a string made up with letters MTWRF\n \"\"\"\n return self.daysOfWeek\n\n def conflict(self, other):\n \"\"\"\n Checks if the passed in EventTime instance other is in conflict\n with the current instance. Two EventTime instances are in conflict\n if any occurence of one of the EventTime coincidences with\n some occurence of the other EventTime instance.\n returns: True if the two EventTime instances conflict with each other,\n False otherwise.\n \"\"\"\n if not isinstance(other, EventTime):\n return False\n dayConflict = True in [d in other.daysOfWeek for d in self.daysOfWeek]\n return dayConflict and other.time == self.time\n\n def __str__(self):\n return self.daysOfWeek + ' ' + str(self.time)\n \n def __cmp__(self, other):\n if not isinstance(other, EventTime):\n raise NotImplementedError\n if self.time == other.time:\n return cmp(self.daysOfWeek, other.daysOfWeek)\n else: # the times are not equal\n return cmp(self.time, other.time)\n \n def __hash__(self):\n return hash(self.time) + hash(self.daysOfWeek)\n\n\ndef printSubjects(subjects, sortOutput=True):\n \"\"\"\n Pretty-prints a list of Subject instances using the __str__ method\n of the Subject class.\n\n Parameters:\n subjects: a list of Subject instances to be printed\n sortOutput: boolean that indicates whether the output should be sorted\n according to the lexicographic order of the subject names\n \"\"\"\n if sortOutput:\n subjectCmp = lambda s1, s2: cmp(s1.getName(), s2.getName())\n sortedSubjects = sorted(subjects, cmp=subjectCmp)\n else:\n sortedSubjects = subjects\n \n print 'Course\\tValue\\tWork\\tTime\\n======\\t=====\\t====\\t====\\n'\n totalValue, totalWork = 0, 0\n for subject in sortedSubjects:\n print subject\n totalValue += subject.getValue()\n totalWork += subject.getWork()\n\n print '\\nNumber of subjects: %d\\nTotal value: %d\\nTotal work: %d\\n' % \\\n (len(subjects), totalValue, totalWork)\n'''\nEnd Helper Code\n'''\n\nclass Subject(object):\n \"\"\"\n A class that represents a subject.\n \"\"\"\n def __init__(self, name, value, work, time):\n \"\"\"\n Initializes a Subject instance.\n\n Parameters:\n name: a string that represents the name of the subject\n value: an integer that represents the value for the subject\n work: an integer that represents the amount of work for the subject\n time: an EventTime instance that represents when the subject meets\n \"\"\"\n self.name = name\n self.value = value\n self.work = work\n self.time = time\n \n def getName(self):\n \"\"\"\n Gets the name of the subject.\n\n Returns: a string that represents the name of the subject\n \"\"\"\n return self.name\n \n def getValue(self):\n \"\"\"\n Gets the value of the subject.\n \n Returns: an integer that represents the value of the subject\n \"\"\"\n return self.value\n\n def getWork(self):\n \"\"\"\n Gets the amount of work for the subject.\n\n Returns: an integer that represents the work amount of the subject\n \"\"\"\n return self.work\n\n def getTime(self):\n \"\"\"\n Gets the hours and days of the week that the subject meets.\n\n Returns: an EventTime instance that represents when the subject meets\n \"\"\"\n return self.time\n\n def conflict(self, subjectList):\n \"\"\"\n Checks whether any subjects in the passed in list conflicts in\n time with the current subject instance.\n\n Parameters:\n subjectList: a list of Subject instances to check conflicts against\n\n Returns:\n True if current instance conflicts with any subjects in the list,\n and False otherwise\n \"\"\"\n for subject in subjectList:\n if self.time.conflict(subject.getTime()):\n return True\n return False\n\n def __str__(self):\n \"\"\"\n Generates the string representation of the Subject class.\n\n Returns:\n a string of the form <subject name>\\t<value>\\t<work>\\t<times of the day>\n where \\t is the tab character, and <times of the day> is the\n string representation when the subject meets\n \"\"\"\n \n return \"%s\\t%s\\t%s\\t%s\\t\" % (self.name, self.value, self.work, self.time)\n\n\ndef loadSubjects(filename):\n \"\"\"\n Loads in the subjects contained in the given file. Assumes each line of\n the file\n is of the form \"<subject name>,<value>,<work>,<times of the week>\" where\n each field is separated by a comma.\n\n Parameter:\n filename: name of the data file as a string\n\n Returns:\n a list of Subject instances, each representing one line from the data file\n \"\"\"\n txtFile = open(filename)\n subjectList = []\n\n for line in txtFile:\n each = line.split(',')\n\n name = each[0]\n value = int(each[1])\n work = int(each[2])\n time = EventTime(each[3].strip('\\n').strip('\\r'))\n \n subject = Subject(name, value, work, time)\n subjectList.append(subject)\n\n txtFile.close()\n return subjectList\n\n\nclass SubjectAdvisor(object):\n \"\"\"\n An abstract class that represents all subject advisors.\n \"\"\"\n \n def pickSubjects(self, subjects, maxWork):\n \"\"\"\n Pick a set of subjects from the subjects list such that the value of\n the picked set is maximized, with the constraint that the total amount\n of work of the picked set needs to be <= maxWork. To be implemented\n by subclasses.\n\n Parameters:\n subjects: list of Subject instances to choose from, each subject\n can be chosen at most once\n maxWork: maximum amount of work the student is willing to take on\n\n Returns:\n a list of Subject instances that are chosen to take\n \"\"\"\n raise NotImplementedError('Should not call SubjectAdvisor.pickSubjects!')\n\n def getName(self):\n \"\"\"\n Gets the name of the advisor. Useful for generating plot legends. To be\n implemented by subclasses.\n\n Returns:\n A string that represents the name of this advisor\n \"\"\"\n raise NotImplementedError('Should not call SubjectAdvisor.getName!')\n\n\ndef cmpValue(subject1, subject2):\n \"\"\"\n A comparator function for two subjects based on their values. To be used\n by the GreedyAdvisor class.\n\n Paramters:\n subject1, subject2: two Subject instances\n\n Returns:\n -1 if subject1 has more value than subject2, 1 if subject1 has less value\n than subject2, 0 otherwise\n \"\"\"\n return subject1.getValue() - subject2.getValue()\n\ndef cmpWork(subject1, subject2):\n \"\"\"\n A comparator function for two subjects based on their amount of work.\n To be used by the GreedyAdvisor class.\n\n Paramters:\n subject1, subject2: two Subject instances\n\n Returns:\n -1 if subject1 has less work than subject2, 1 if subject1 has more work\n than subject2, 0 otherwise\n \"\"\"\n return subject1.getWork() - subject2.getWork()\n\ndef cmpRatio(subject1, subject2):\n \"\"\"\n A comparator function for two subjects based on their value to work ratio.\n To be used by the GreedyAdvisor class.\n\n Paramters:\n subject1, subject2: two Subject instances\n\n Returns:\n -1 if subject1 has higher value to work ratio than subject2, 1 if subject1\n has lower value to work ratio than subject1, 0 otherwise\n \"\"\"\n s1 = subject1.getValue() / subject1.getWork()\n s2 = subject2.getValue() / subject2.getWork()\n\n return s1 - s2\n\n\nclass GreedyAdvisor(SubjectAdvisor):\n \"\"\"\n An advisor that picks subjects based on a greedy algorithm.\n \"\"\"\n \n def __init__(self, comparator):\n \"\"\"\n Initializes a GreedyAdvisor instance.\n\n Parameter:\n comparator: a comparator function, either one of cmpValue, cmpWork,\n or cmpRatio\n \"\"\"\n self.comparator = comparator\n \n\n def pickSubjects(self, subjects, maxWork):\n \"\"\"\n Picks subjects to take from the subjects list using a greedy algorithm,\n based on the comparator function that is passed in during\n initialization.\n\n Parameters:\n subjects: list of Subject instances to choose from, each subject\n can be chosen at most once\n maxWork: maximum amount of work the student is willing to take on\n\n Returns:\n a list of Subject instances that are chosen to take\n \"\"\"\n work = 0\n subjectList = []\n subjectsCopy = sorted(subjects, cmp=self.comparator, reverse=True)\n \n for subject in subjectsCopy:\n if work + subject.getWork() <= maxWork and not subject.conflict(subjectList):\n work += subject.getWork()\n subjectList.append(subject)\n \n return subjectList\n\n def getName(self):\n \"\"\"\n Gets the name of the advisor. \n\n Returns:\n A string that represents the name of this advisor\n \"\"\"\n return \"Greedy\"\n\n\nclass BruteForceAdvisor(SubjectAdvisor):\n\n def __init__(self):\n \"\"\"\n Initializes a BruteForceAdvisor instance.\n \"\"\"\n \n def pickSubjects(self, subjects, maxWork):\n \"\"\"\n Pick subjects to take using brute force. Use recursive backtracking\n while exploring the list of subjects in order to cut down the number\n of paths to explore, rather than exhaustive enumeration\n that evaluates every possible list of subjects from the power set.\n\n Parameters:\n subjects: list of Subject instances to choose from, each subject\n can be chosen at most once\n maxWork: maximum amount of work the student is willing to take on\n\n Returns:\n a list of Subject instances that are chosen to take\n \"\"\"\n results = self.maxVal(subjects, maxWork)\n return list(results[1])\n\n\n def maxVal(self, subjects, maxWork):\n if subjects == [] or maxWork == 0:\n result = (0, ())\n \n elif subjects[0].getWork() > maxWork:\n result = self.maxVal(subjects[1:], maxWork)\n else:\n nextItem = subjects[0]\n \n #LEFT\n withVal, withToTake = self.maxVal(subjects[1:], maxWork - nextItem.getWork())\n withVal += nextItem.getValue()\n \n #RIGHT\n withoutVal, withoutToTake = self.maxVal(subjects[1:], maxWork)\n \n #CHOOSE BETTER\n if withVal >= withoutVal and not nextItem.conflict(withToTake):\n result = (withVal, withToTake + (nextItem,))\n else:\n result = (withoutVal, withoutToTake)\n \n return result\n\n def getName(self):\n \"\"\"\n Gets the name of the advisor. \n\n Returns:\n A string that represents the name of this advisor\n \"\"\"\n return \"Brute Force\"\n\nclass MemoizingAdvisor(SubjectAdvisor):\n\n def __init__(self):\n \"\"\"\n Initializes a MemoizingAdvisor instance.\n \"\"\"\n self.memo = {}\n \n def pickSubjects(self, subjects, maxWork):\n \"\"\"\n Pick subjects to take using memoization. Similar to\n BruteForceAdvisor except that the intermediate results are\n saved in order to avoid re-computation of previously traversed\n subject lists.\n\n Parameters:\n subjects: list of Subject instances to choose from, each subject\n can be chosen at most once\n maxWork: maximum amount of work the student is willing to take on\n\n Returns:\n a list of Subject instances that are chosen to take\n \"\"\"\n results = self.fastSolve(subjects, maxWork, memo=None)\n return list(results[1])\n \n def fastSolve(self, subjects, maxWork, memo):\n if memo == None:\n memo = {}\n \n timeCheck = []\n for each in subjects:\n timeCheck.append(each.getTime())\n timeCheckTup = tuple(sorted(timeCheck))\n \n #check if in memo \n if (len(subjects), maxWork, timeCheckTup) in memo:\n return memo[len(subjects), maxWork, timeCheckTup]\n \n elif subjects == [] or maxWork == 0:\n result = (0, ())\n \n elif subjects[0].getWork() > maxWork:\n result = self.fastSolve(subjects[1:], maxWork, memo)\n else:\n nextItem = subjects[0]\n \n #LEFT\n withVal, withToTake = self.fastSolve(subjects[1:], maxWork - nextItem.getWork(), memo)\n withVal += nextItem.getValue()\n \n #RIGHT\n withoutVal, withoutToTake = self.fastSolve(subjects[1:], maxWork, memo)\n \n #CHOOSE BETTER\n if withVal >= withoutVal and not nextItem.conflict(withToTake):\n result = (withVal, withToTake + (nextItem,))\n else:\n result = (withoutVal, withoutToTake)\n\n time = []\n for each in result[1]:\n time.append(each.getTime())\n timeTup = tuple(sorted(time))\n\n memo[(len(subjects), maxWork, timeTup)] = result\n return result\n\n \n def getName(self):\n \"\"\"\n Gets the name of the advisor.\n\n Returns:\n A string that represents the name of this advisor\n \"\"\"\n return \"Memoizing\"\n\n\ndef measureTimes(filename, maxWork, subjectSizes, numRuns):\n \"\"\"\n Compare the time taken to pick subjects for each of the advisors\n subject to maxWork constraint. Run different trials using different number\n of subjects as given in subjectSizes, using the subjects as loaded\n from filename. Choose a random subject of subjects for each trial.\n For instance, if subjectSizes is the list [10, 20, 30], then you should\n first select 10 random subjects from the loaded subjects, then run them\n through the three advisors using maxWork for numRuns times, measuring\n the time taken for each run, then average over the numRuns runs. After that,\n pick another set of 20 random subjects from the loaded subjects,\n and run them through the advisors, etc. Produce a plot afterwards\n with the x-axis showing number of subjects used, and y-axis showing\n time. Be sure you label your plots.\n\n After plotting the results, answer this question:\n What trend do you observe among the three advisors?\n How does the time taken to pick subjects grow as the number of subject\n used increases? Why do you think that is the case? Include the answers\n to these questions in your writeup.\n \"\"\"\n loadSub = loadSubjects(filename)\n\n def selectRandomSubjects(num, subjects):\n '''returns list of n # of random subjects'''\n return random.sample(subjects, num)\n \n def Advisors(subjects, maxWork):\n '''times each advisor'''\n times = []\n start = time.time()\n greedy = GreedyAdvisor(cmpRatio).pickSubjects(subjects, maxWork)\n end = time.time()\n greedyTime = end - start\n \n start = time.time()\n brute = BruteForceAdvisor().pickSubjects(subjects, maxWork)\n end = time.time()\n bruteTime = end - start\n \n start = time.time()\n memoiz = MemoizingAdvisor().pickSubjects(subjects, maxWork)\n end = time.time()\n memoTime = end - start\n\n return [greedyTime, bruteTime, memoTime]\n \n def eachRun(numRuns, loadSub, num):\n '''makes 5 runs of each subjectSize'''\n results = []\n subjects = selectRandomSubjects(num, loadSub)\n \n for value in range(numRuns):\n run = Advisors(subjects, maxWork)\n if results == []:\n for each in run:\n results.append(each)\n else:\n for i in range(3):\n results[i] += run[i]\n\n for i in range(3):\n results[i] /= numRuns\n \n return results\n \n\n def pylabPlot(numRuns, subjectSizes):\n '''creates plot of 3 advisor implementatioins'''\n greedy = []\n brute = []\n memo = []\n\n #run each version of subjectSize, 10, 20...\n for num in subjectSizes:\n results = eachRun(numRuns, loadSub, num)\n greedy.append(results[0])\n brute.append(results[1])\n memo.append(results[2])\n\n pylab.title(\"3 Different Advisor Implementations\")\n pylab.xlabel(\"How many Subject Choices Initialy\")\n pylab.ylabel(\"How long each Advisor takes (seconds)\")\n \n pylab.plot(subjectSizes, greedy, label='greedy')\n pylab.plot(subjectSizes, brute, label='brute force')\n pylab.plot(subjectSizes, memo, label='memoization')\n\n pylab.legend(loc=\"best\")\n\n pylab.show()\n\n pylabPlot(numRuns, subjectSizes)\n \nfilename = \"subjects.txt\"\nsubjectSizes = [10, 20, 30, 40, 50]\nmaxWork = 40\nnumRuns = 5\n\n##comment out to not run plot generation\n##measureTimes(filename, maxWork, subjectSizes, numRuns) \n \n\n" }, { "alpha_fraction": 0.5480656623840332, "alphanum_fraction": 0.555216908454895, "avg_line_length": 24.085294723510742, "blob_id": "05ff02432c2bd9406b52db089fa40d98f367faff", "content_id": "10f5e679f24e8ef1e20b31869c29b7b3b2d32e4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8530, "license_type": "no_license", "max_line_length": 117, "num_lines": 340, "path": "/pset_05_news_parser/ps5/ps5.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "# 6.00 Problem Set 5\n# RSS Feed Filter\n\n#name: Moe Amaya\n#collaborators: none\n#time: 10:00\n\nimport feedparser\nimport string\nimport time\nfrom project_util import translate_html\nfrom news_gui import Popup\n\n#-----------------------------------------------------------------------\n#\n# Problem Set 5\n\n#======================\n# Code for retrieving and parsing\n# Google and Yahoo News feeds\n# Do not change this code\n#======================\n\ndef process(url):\n \"\"\"\n Fetches news items from the rss url and parses them.\n Returns a list of NewsStory-s.\n \"\"\"\n feed = feedparser.parse(url)\n entries = feed.entries\n ret = []\n for entry in entries:\n guid = entry.guid\n title = translate_html(entry.title)\n link = entry.link\n summary = translate_html(entry.summary)\n try:\n subject = translate_html(entry.tags[0]['term'])\n except AttributeError:\n subject = \"\"\n newsStory = NewsStory(guid, title, subject, summary, link)\n ret.append(newsStory)\n return ret\n\n#======================\n# Part 1\n# Data structure design\n#======================\n\nclass NewsStory(object):\n def __init__(self, guid, title, subject, summary, link):\n self.guid = guid\n self.title = title\n self.subject = subject\n self.summary = summary\n self.link = link\n\n def get_guid(self):\n return self.guid\n\n def get_title(self):\n return self.title\n\n def get_subject(self):\n return self.subject\n\n def get_summary(self):\n return self.summary\n\n def get_link(self):\n return self.link\n\n#======================\n# Part 2\n# Triggers\n#======================\n\nclass Trigger(object):\n def evaluate(self, story):\n \"\"\"\n Returns True if an alert should be generated\n for the given news item, or False otherwise.\n \"\"\"\n raise NotImplementedError\n\n# Whole Word Triggers\n\nclass WordTrigger(Trigger):\n def evaluate(self, story):\n raise NotImplementedError\n \n def is_word_in(self, story):\n lower = story.lower()\n \n sep = string.punctuation\n newString = lower\n \n for char in sep:\n newString = newString.replace(char, \" \")\n \n return newString.split()\n\n\nclass TitleTrigger(WordTrigger):\n def __init__(self, text):\n self.text = text.lower()\n\n def evaluate(self, story):\n title = story.get_title()\n\n wt = WordTrigger()\n titleWords = wt.is_word_in(title)\n \n if self.text in titleWords:\n return True\n else:\n return False\n\n\nclass SubjectTrigger(WordTrigger):\n def __init__(self, text):\n self.text = text.lower()\n\n def evaluate(self, story):\n subject = story.get_subject()\n\n wt = WordTrigger()\n subjectWords = wt.is_word_in(subject)\n\n if self.text in subjectWords:\n return True\n else:\n return False\n \n\nclass SummaryTrigger(WordTrigger):\n def __init__(self, text):\n self.text = text.lower()\n\n def evaluate(self, story):\n summary = story.get_summary()\n\n wt = WordTrigger()\n summaryWords = wt.is_word_in(summary)\n\n if self.text in summaryWords:\n return True\n else:\n return False\n\n\n# Composite Triggers\n\nclass NotTrigger(Trigger):\n def __init__(self, word):\n self.word = word\n\n def evaluate(self, story):\n if not self.word.evaluate(story):\n return True\n else:\n return False\n\nclass AndTrigger(Trigger):\n def __init__(self, word1, word2):\n self.word1 = word1\n self.word2 = word2\n\n def evaluate(self, story):\n test1 = self.word1\n test2 = self.word2\n\n if test1.evaluate(story) and test2.evaluate(story):\n return True\n else:\n return False\n \nclass OrTrigger(Trigger):\n def __init__(self, word1, word2):\n self.word1 = word1\n self.word2 = word2\n\n def evaluate(self, story):\n test1 = self.word1\n test2 = self.word2\n\n if test1.evaluate(story) or test2.evaluate(story):\n return True\n else:\n return False\n\n\n# Phrase Trigger\n\nclass PhraseTrigger(Trigger):\n def __init__(self, phrase):\n self.phrase = phrase\n\n def evaluate(self, story):\n self.list = [story.get_guid(), story.get_title(), story.get_subject(), story.get_summary(), story.get_link()]\n for category in self.list:\n if self.phrase in category:\n return True\n return False\n \n#======================\n# Part 3\n# Filtering\n#======================\n\ndef filter_stories(stories, triggerlist):\n \"\"\"\n Takes in a list of NewsStory instances.\n\n Returns: a list of only the stories for which a trigger in triggerlist fires.\n \"\"\"\n listStories = []\n \n for story in stories:\n for trigger in triggerlist:\n if trigger.evaluate(story):\n listStories.append(story)\n \n return listStories\n \n#======================\n# Part 4\n# User-Specified Triggers\n#======================\n\ndef makeTrigger(trigger_map, trigger_type, params, name):\n \"\"\"\n Takes in a map of names to trigger instance, the type of trigger to make,\n and the list of parameters to the constructor, and adds a new trigger\n to the trigger map dictionary.\n\n trigger_map: dictionary with names as keys (strings) and triggers as values\n trigger_type: string indicating the type of trigger to make (ex: \"TITLE\")\n params: list of strings with the inputs to the trigger constructor (ex: [\"world\"])\n name: a string representing the name of the new trigger (ex: \"t1\")\n\n Modifies trigger_map, adding a new key-value pair for this trigger.\n\n Returns: None\n \"\"\"\n \n pass\n\ndef readTriggerConfig(filename):\n \"\"\"\n Returns a list of trigger objects\n that correspond to the rules set\n in the file filename\n \"\"\"\n # Here's some code that we give you\n # to read in the file and eliminate\n # blank lines and comments\n triggerfile = open(filename, \"r\")\n all_lines = [ line.rstrip() for line in triggerfile.readlines() ]\n lines = []\n for line in all_lines:\n if len(line) == 0 or line[0] == '#':\n continue\n lines.append(line)\n\n# ---------------------------\n# Didn't have time to finish\n#----------------------------\n\n\n## for trigger in lines:\n## parseTrig = trigger.split()\n##\n## if parseTrig[0] != \"ADD\":\n## if parseTrig[1] == \"TITLE\":\n## \n## elif parseTrig[1] == \"SUBJECT\":\n## pass\n## elif parseTrig[1] == \"SUMMARY\":\n## pass\n## elif parseTrig[1] == \"NOT\":\n## pass\n## elif parseTrig[1] == \"AND\":\n## pass\n## elif parseTrig[1] == \"OR\":\n## pass\n## elif parseTrig[1] == \"PHRASE\":\n## pass\n## else:\n## pass\n \n\n # makeTrigger(trigger_map, trigger_type, params, name)\n \nimport thread\n\ndef main_thread(p):\n # A sample trigger list - you'll replace\n # this with something more configurable in Problem 11\n t1 = TitleTrigger(\"Syria\")\n t2 = SubjectTrigger(\"Iran\")\n t3 = PhraseTrigger(\"Wall Street\")\n t4 = OrTrigger(t2, t3)\n triggerlist = [t1, t4]\n \n\n # triggerlist = readTriggerConfig(\"triggers.txt\")\n\n guidShown = []\n \n while True:\n print \"Polling . . .\",\n\n # Get stories from Google's Top Stories RSS news feed\n stories = process(\"http://news.google.com/?output=rss\")\n # Get stories from Yahoo's Top Stories RSS news feed\n stories.extend(process(\"http://rss.news.yahoo.com/rss/topstories\"))\n\n # Only select stories we're interested in\n stories = filter_stories(stories, triggerlist)\n \n # Don't print a story if we have already printed it before\n newstories = []\n for story in stories:\n print \". . .\",\n if story.get_guid() not in guidShown:\n newstories.append(story)\n print \". . .\"\n for story in newstories:\n guidShown.append(story.get_guid())\n p.newWindow(story)\n\n print \"Sleeping...\"\n time.sleep(SLEEPTIME)\n\nSLEEPTIME = 60 #seconds -- how often we poll\nif __name__ == '__main__':\n p = Popup()\n thread.start_new_thread(main_thread, (p,))\n p.start()\n\n" }, { "alpha_fraction": 0.5942708253860474, "alphanum_fraction": 0.6145833134651184, "avg_line_length": 32.68421173095703, "blob_id": "ac4845320bb9a65fcc9ff6da24c923e90b768c32", "content_id": "db7092848bb3c49653c3d050df40f096e59a16ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3840, "license_type": "no_license", "max_line_length": 99, "num_lines": 114, "path": "/pset_08_stochastic_analysis/ps8.py", "repo_name": "orangecig/mit600", "src_encoding": "UTF-8", "text": "# 6.00 Problem Set 8\n#\n# Name: Moe Amaya\n# Collaborators: None\n# Time: 3:00\n\nimport numpy\nimport random\nimport pylab\n#from ps7b import *\nfrom ps7b_precompiled_27 import *\n\n#\n# PROBLEM 1\n# \ndef simulationDelayedTreatment(numViruses, maxPop, maxBirthProb, clearProb,\n resistances, mutProb, numTrials):\n \"\"\"\n Runs simulations and make histograms for problem 1.\n\n Runs numTrials simulations to show the relationship between delayed\n treatment and patient outcome using a histogram.\n\n Histograms of final total virus populations are displayed for delays of 300,\n 150, 75, 0 timesteps (followed by an additional 150 timesteps of\n simulation).\n\n numViruses: number of ResistantVirus to create for patient (an integer)\n maxPop: maximum virus population for patient (an integer)\n maxBirthProb: maximum reproduction probability (a float between 0-1) \n clearProb: maximum clearance probability (a float between 0-1)\n resistances: a list of drugs that each ResistantVirus is resistant to\n (a list of strings, e.g., ['guttagonol'])\n mutProb: mutation probability for each ResistantVirus particle\n (a float between 0-1). \n numTrials: number of simulation runs to execute (an integer)\n \"\"\"\n \n def listViruses(numViruses):\n \"\"\"creates list of instantiated simple virus objects\"\"\"\n viruses = []\n for i in range(numViruses):\n vir = ResistantVirus(maxBirthProb, clearProb, resistances, mutProb)\n viruses.append(vir)\n return viruses\n \n def simTimeStep(numVir):\n \"\"\"simulates single run of patient update\"\"\"\n viruses = listViruses(numVir)\n patient = TreatedPatient(viruses, maxPop)\n\n numVir = patient.update()\n return numVir\n\n def simTimeStepDrug(numVir):\n \"\"\"simulates single run of patient update with drug\"\"\"\n drugList = []\n for drug in resistances:\n drugList.append(drug)\n \n viruses = listViruses(numVir)\n patient = TreatedPatient(viruses, maxPop)\n #administer drug\n patient.addPrescription(drugList[0])\n\n numVir = patient.update()\n return numVir\n\n def simTrials(numTrials):\n \"\"\"\n Returns final virus count in a dictionary with each time step\n as key and virus count as value in a list\n \"\"\"\n numVir = numViruses\n dictTrials = {}\n #delay = [0, 75, 150, 300]\n #change to print each timestep\n delay = [300]\n \n for num in delay:\n dictTrials[num] = list()\n \n for num in delay:\n for i in range(numTrials):\n #reset each trial to orginal starting virus count\n numVir = numViruses\n #allow virus population to grow for 0...300 timesteps\n for time in range(num):\n numVir = simTimeStep(numVir)\n #administer Guttagonol for 150 timesteps\n for i in range(150):\n numVir = simTimeStepDrug(numVir)\n #store all the trials into a dictionary where key = timestep#\n dictTrials[num].append(numVir)\n \n return dictTrials\n\n def showPlot(title, x_label, y_label):\n \"\"\"\n Produce a plot of average virus counts at each time step\n \"\"\"\n results = simTrials(numTrials)\n #change to print each timestep\n plot = results[300]\n \n pylab.hist(plot)\n pylab.title(title)\n pylab.xlabel(x_label)\n pylab.ylabel(y_label)\n pylab.show()\n\n showPlot(\"300-Timestep Delayed Delivery of Guttagonol\", \"Final virus count\", \"How many trials\")\n\nsimulationDelayedTreatment(100, 1000, 0.1, 0.05, {\"guttagonol\": False}, 0.005, 50)\n" } ]
23
iqbalhs/weagga-bot
https://github.com/iqbalhs/weagga-bot
6e2706ca075d016e4dbaa19b751dbfe47ebd7806
e70b03d13c70f47ee01060a196aec7cfa24715a7
673cd63ee1437133f6291476e18322aac1b4c768
refs/heads/master
2021-05-09T04:17:32.588738
2018-01-28T15:11:51
2018-01-28T15:11:51
119,269,005
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 14, "blob_id": "cb8c8f19399b783d0b093f046bbd689f91babbe0", "content_id": "4c0ffa816a66822ae2befdef2744caab813b9823", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 30, "license_type": "no_license", "max_line_length": 16, "num_lines": 2, "path": "/README.md", "repo_name": "iqbalhs/weagga-bot", "src_encoding": "UTF-8", "text": "# weagga-bot\nexperimental bot\n" }, { "alpha_fraction": 0.6078028678894043, "alphanum_fraction": 0.6078028678894043, "avg_line_length": 24.657894134521484, "blob_id": "6488f2c043d18358b81a85dde32db62953bab10b", "content_id": "ca4f2f7b7e5bd81f11bd629b005a83d54d4cccdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 974, "license_type": "no_license", "max_line_length": 77, "num_lines": 38, "path": "/bot.py", "repo_name": "iqbalhs/weagga-bot", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nfrom discord.voice_client import VoiceClient\n\nstartup_extensions = [\"Music\"]\nbot = commands.Bot(\"xD\")\n\[email protected]\nasync def on_ready():\n print(\"bot online\")\n print(\"Name \" + bot.user.name)\n print(\"ID \" + bot.user.id)\n\nclass Main_Commands():\n def __init__(self, bot):\n self.bot = bot\n\[email protected](pass_context=True)\nasync def ping(ctx):\n await bot.say(\"nahh, it's just your internet that sucks\")\n\n\[email protected](pass_context=True)\nasync def hello(ctx):\n if ctx.message.author.id == '282789892843634688':\n await bot.say(\"uwu\")\n else:\n await bot.say(\"you're not Nue, go away\")\n\nif __name__ == \"__main__\":\n for extension in startup_extensions:\n try:\n bot.load_extension(extension)\n except Exception as e:\n exc = \"{} : {}\" . format(type(e) . __name__, e)\n print(\"Failed to load extension {}\\n{}\" . format(extension, exc))\n\nbot.run(\"token\")" } ]
2
ricklupton/jupyter_core
https://github.com/ricklupton/jupyter_core
91fb688d02a4f9f542c3ff475bb6a1e5adfcca24
53cb8bd23ce1af9bba9d0493c2b5107129eda4b5
f8208f156929be071e0f2997ffe63eae2a448197
refs/heads/master
2020-12-28T20:31:52.975856
2015-05-27T21:19:05
2015-05-27T21:19:05
36,426,011
0
0
null
2015-05-28T08:47:24
2015-05-28T08:47:25
2015-05-27T21:19:05
Python
[ { "alpha_fraction": 0.6213960647583008, "alphanum_fraction": 0.6282246112823486, "avg_line_length": 20.590164184570312, "blob_id": "7815b064adcf45ff9ae1251892e9f37fe3eea28b", "content_id": "aa5e33342c07e1c3fb7a8b62f7709beb9cf1e2a6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1318, "license_type": "permissive", "max_line_length": 66, "num_lines": 61, "path": "/jupyter_core/tests/test_application.py", "repo_name": "ricklupton/jupyter_core", "src_encoding": "UTF-8", "text": "import os\nimport shutil\nfrom tempfile import mkdtemp\n\nimport pytest\nfrom traitlets import Integer\n\nfrom jupyter_core.application import JupyterApp, NoStart\n\npjoin = os.path.join\n\n\ndef test_basic():\n app = JupyterApp()\n\n\ndef test_default_traits():\n app = JupyterApp()\n for trait_name in app.traits():\n value = getattr(app, trait_name)\n\nclass DummyApp(JupyterApp):\n name = \"dummy-app\"\n n = Integer(0, config=True)\n\n_dummy_config = \"\"\"\nc.DummyApp.n = 10\n\"\"\"\n\ndef test_custom_config():\n app = DummyApp()\n td = mkdtemp()\n fname = pjoin(td, 'config.py')\n with open(fname, 'w') as f:\n f.write(_dummy_config)\n app.initialize(['--config', fname])\n shutil.rmtree(td)\n assert app.config_file == fname\n assert app.n == 10\n\ndef test_cli_override():\n app = DummyApp()\n td = mkdtemp()\n fname = pjoin(td, 'config.py')\n with open(fname, 'w') as f:\n f.write(_dummy_config)\n app.initialize(['--config', fname, '--DummyApp.n=20'])\n shutil.rmtree(td)\n assert app.n == 20\n\n\ndef test_generate_config():\n td = mkdtemp()\n app = DummyApp(config_dir=td)\n app.initialize(['--generate-config'])\n assert app.generate_config\n \n with pytest.raises(NoStart):\n app.start()\n \n assert os.path.exists(os.path.join(td, 'dummy_app_config.py'))\n\n" }, { "alpha_fraction": 0.6229874491691589, "alphanum_fraction": 0.6261180639266968, "avg_line_length": 25.93975830078125, "blob_id": "860a986edbcf0f151c6a3369c12d280e6612fd54", "content_id": "3b4a3f22d58396bd7349166ded8a6c8fa5682c3d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2236, "license_type": "permissive", "max_line_length": 83, "num_lines": 83, "path": "/jupyter_core/command.py", "repo_name": "ricklupton/jupyter_core", "src_encoding": "UTF-8", "text": "\"\"\"The root `jupyter` command.\n\nThis does nothing other than dispatch to subcommands.\n\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport sys\n\ntry:\n # py3\n from shutil import which\nexcept ImportError:\n from .utils.shutil_which import which\n\nclass JupyterParser(argparse.ArgumentParser):\n \n @property\n def epilog(self):\n \"\"\"Add subcommands to epilog on request\n \n Avoids searching PATH for subcommands unless help output is requested.\n \"\"\"\n return 'Available subcommands: %s' % ' '.join(list_subcommands())\n \n @epilog.setter\n def epilog(self, x):\n \"\"\"Ignore epilog set in Parser.__init__\"\"\"\n pass\n\n\ndef jupyter_parser():\n parser = JupyterParser(\n description=\"Jupyter: Interactive Computing\"\n )\n parser.add_argument('subcommand', type=str, help='The subcommand to launch')\n return parser\n\ndef list_subcommands():\n \"\"\"List all jupyter subcommands\n \n searches PATH for `jupyter-name`\n \n Returns a list of jupyter's subcommand names, without the `jupyter-` prefix.\n Nested children (e.g. jupyter-sub-subsub) are not included.\n \"\"\"\n path = os.environ.get('PATH') or os.defpath\n subcommands = set()\n for d in path.split(os.pathsep):\n try:\n names = os.listdir(d)\n except OSError:\n continue\n for name in names:\n if name.startswith('jupyter-'):\n subcommands.add(name.split('-')[1])\n return subcommands\n\ndef main():\n if len(sys.argv) > 1 and not sys.argv[1].startswith('-'):\n # Don't parse if a subcommand is given\n # Avoids argparse gobbling up args passed to subcommand, such as `-h`.\n subcommand = sys.argv[1]\n else:\n parser = jupyter_parser()\n args, opts = parser.parse_known_args()\n subcommand = args.subcommand\n \n command = 'jupyter-' + subcommand\n try:\n os.execvp(command, sys.argv[1:])\n except OSError:\n print(\"jupyter: %r is not a Jupyter command\" % subcommand, file=sys.stderr)\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n" } ]
2
ranmengyuan/PredictFlow
https://github.com/ranmengyuan/PredictFlow
08d9d551a60c94b16fcac8e3599c60f6a5b16834
6fb06912998c7fb9ef17a531a823ada85d43ddb9
6bc27634db10baf268f02712d19cd24ae6b7b324
refs/heads/master
2021-04-26T23:56:42.533298
2018-03-06T02:38:10
2018-03-06T02:38:10
123,883,960
2
1
null
null
null
null
null
[ { "alpha_fraction": 0.45621126890182495, "alphanum_fraction": 0.4729183614253998, "avg_line_length": 29.170732498168945, "blob_id": "8f55ee6bc928f6a3de0a502624231e44a8b52ca3", "content_id": "5e97b01d42f4c33ea8b9e74f53a194f03926fb16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4101, "license_type": "no_license", "max_line_length": 117, "num_lines": 123, "path": "/analyze/forecast.py", "repo_name": "ranmengyuan/PredictFlow", "src_encoding": "UTF-8", "text": "from dataBase.sql_helper import conn_db, exe_query, conn_close\nfrom sklearn.linear_model import Lasso\nimport random\n\n\n# import numpy as np\n# import matplotlib.pyplot as plt\n# import time\n# from sklearn.metrics import r2_score\n\n\ndef get_data():\n \"\"\"\n 从数据库中获得数据\n :return:\n \"\"\"\n x_train = []\n y_train = []\n x_test = []\n try:\n conn = conn_db()\n cur = conn.cursor()\n for i in range(2000):\n sql = \"SELECT shop_id, location_id, per_pay, shop_level FROM ShopInfo WHERE shop_id='\" + str(i + 1) + \"'\"\n shop_infos = exe_query(cur, sql)\n infos = shop_infos\n for m in range(14):\n for info in infos:\n temp = []\n temp.append(int(info[0]))\n temp.append(int(info[1]))\n temp.append(int(info[2]))\n temp.append(int(info[3]))\n temp.append(int(m + 489))\n x_test.append(temp)\n\n for j in range(489):\n for shop_info in shop_infos:\n temp = []\n temp.append(int(shop_info[0]))\n temp.append(int(shop_info[1]))\n temp.append(int(shop_info[2]))\n temp.append(int(shop_info[3]))\n pay = [0, 0]\n # view = 0\n sql = \"SELECT date,sum FROM UserPay WHERE shop_id='\" + str(i + 1) + \"' AND date='\" + str(j) + \"'\"\n user_pays = exe_query(cur, sql)\n for user_pay in user_pays:\n pay[0] += int(user_pay[0])\n pay[1] += int(user_pay[1])\n temp.append(int(pay[0]))\n x_train.append(temp)\n y_train.append(int(pay[1]))\n # sql = \"SELECT date,sum FROM UserPay WHERE shop_id='\" + str(i + 1) + \"' AND date='\" + str(j) + \"'\"\n # user_views = exe_query(cur, sql)\n # for user_view in user_views:\n # view += int(user_view[0])\n # x_train[5].append(int(view))\n # print(shop_info[0], user_pay[0])\n except Exception as e:\n print(e)\n finally:\n # conn_close(conn, cur)\n return x_train, y_train, x_test\n\n\ndef lasso():\n # Lasso 回归的参数\n\n alpha = 0.1\n\n lasso = Lasso(max_iter=10000, alpha=alpha)\n\n # 基于训练数据,得到的模型的测试结果\n\n # 这里使用的是坐标轴下降算法(coordinate descent)\n x_train, y_train, x_test = get_data()\n print(len(x_train), len(y_train), len(x_test))\n y_pred_lasso = lasso.fit(x_train, y_train).predict(x_test)\n i = 0\n n = 1\n try:\n f = open(\"prediction.csv\", \"w+\")\n li = str(n) + \",\"\n for result in y_pred_lasso:\n ran = random.randint(-20, 20) # 随机误差\n if i == 13:\n i = 0\n li += str(int(result+ran)) + \"\\n\"\n f.writelines(li)\n n += 1\n li = str(n) + \",\"\n else:\n i += 1\n li += str(int(result+ran)) + \",\"\n except Exception as e:\n print(e)\n finally:\n f.close()\n\n # 这里是R2可决系数(coefficient of determination)\n\n # 回归平方和(RSS)在总变差(TSS)中所占的比重称为可决系数\n\n # 可决系数可以作为综合度量回归模型对样本观测值拟合优度的度量指标。\n\n # 可决系数越大,说明在总变差中由模型作出了解释的部分占的比重越大,模型拟合优度越好。\n\n # 反之可决系数小,说明模型对样本观测值的拟合程度越差。\n\n # R2可决系数最好的效果是1。\n\n # r2_score_lasso = r2_score(y_test, y_pred_lasso)\n #\n # print(\"测试集上的R2可决系数 : %f\" % r2_score_lasso)\n #\n # plt.plot(lasso.coef_, label='Lasso coefficients')\n #\n # plt.plot(coef, '--', label='original coefficients')\n #\n # plt.legend(loc='best')\n #\n # plt.show()\n" }, { "alpha_fraction": 0.4147249460220337, "alphanum_fraction": 0.44133463501930237, "avg_line_length": 32.73488235473633, "blob_id": "a3bb99d79b6d671e16bc5b68cd981ccab03b3c4a", "content_id": "ba8b5cb5e0c6196dcdd8dae979a262a844c95296", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7435, "license_type": "no_license", "max_line_length": 123, "num_lines": 215, "path": "/file/file_processing.py", "repo_name": "ranmengyuan/PredictFlow", "src_encoding": "UTF-8", "text": "from dataBase.sql_helper import conn_db, exe_table, exe_update, conn_close\n\n\ndef read_by_line(address):\n \"\"\"\n 带缓存的文件读取一行数据\n :param address:\n :return:\n \"\"\"\n file = open(address)\n file_content = []\n while 1:\n lines = file.readlines(100000)\n if not lines:\n break\n for line in lines:\n file_content.append(line)\n return file_content\n\n\ndef shop_info_database(file_content):\n \"\"\"\n 将文本文件处理并存入数据库中\n :param file_content:\n :return:\n \"\"\"\n try:\n conn = conn_db()\n cur = conn.cursor()\n sql = \"DROP TABLE if EXISTS ShopInfo\"\n exe_table(cur, sql)\n sql = \"CREATE TABLE ShopInfo(shop_id INT NOT NULL AUTO_INCREMENT,city_name VARCHAR (255),location_id INT ,\" \\\n \"per_pay INT ,score INT ,comment_cnt INT ,shop_level INT ,cate_1_name VARCHAR (255) ,cate_2_name\" \\\n \" VARCHAR (255) ,cate_3_name VARCHAR (255) ,PRIMARY KEY (shop_id)) ENGINE = InnoDB DEFAULT CHARSET = UTF8\"\n exe_table(cur, sql)\n for i in range(0, len(file_content)):\n contents = file_content[i].split(\",\")\n if len(contents) == 10:\n sql = \"INSERT INTO ShopInfo VALUES ('\" + contents[0] + \"','\" + contents[1] + \"','\" + contents[2] + \"'\" \\\n \",'\" + \\\n contents[3] + \"','\" + contents[4] + \"','\" + contents[5] + \"','\" + contents[6] + \"',\" \\\n \"'\" + contents[\n 7] + \"','\" + contents[8] + \"','\" + contents[9] + \"')\"\n elif len(contents) == 9:\n sql = \"INSERT INTO ShopInfo VALUES ('\" + contents[0] + \"','\" + contents[1] + \"','\" + contents[2] + \"'\" \\\n \",'\" + \\\n contents[3] + \"','\" + contents[4] + \"','\" + contents[5] + \"','\" + contents[6] + \"',\" \\\n \"'\" + contents[\n 7] + \"','\" + contents[8] + \"',NULL)\"\n exe_update(conn, cur, sql)\n\n except Exception as e:\n print(e)\n print(contents[0])\n finally:\n conn_close(conn, cur)\n\n\ndef get_user_pay(address):\n \"\"\"\n 获得用户的购物信息并存入数据库\n :param address:\n :return:\n \"\"\"\n try:\n conn = conn_db()\n cur = conn.cursor()\n sql = \"DROP TABLE if EXISTS UserPay\"\n exe_table(cur, sql)\n sql = \"CREATE TABLE UserPay(id INT NOT NULL AUTO_INCREMENT,shop_id INT NOT NULL ,date INT,sum INT ,\" \\\n \"PRIMARY KEY (id) ,FOREIGN KEY (shop_id) REFERENCES ShopInfo(shop_id)) \" \\\n \"ENGINE = InnoDB DEFAULT CHARSET = UTF8\"\n exe_table(cur, sql)\n\n file = open(address)\n shop = ''\n num = []\n for i in range(489):\n num.append(0)\n while 1:\n lines = file.readlines(10000)\n if not lines:\n user_pay_database(conn, cur, num, shop)\n break\n for line in lines:\n file_content = line.split(\",\")\n if shop == '':\n shop = file_content[1]\n dates = file_content[2].split(\" \")\n num[cal_day(dates[0])] += 1\n elif shop == file_content[1]:\n dates = file_content[2].split(\" \")\n num[cal_day(dates[0])] += 1\n elif shop != file_content[1]:\n user_pay_database(conn, cur, num, shop)\n shop = file_content[1]\n for i in range(489):\n num[i] = 0\n dates = file_content[2].split(\" \")\n num[cal_day(dates[0])] += 1\n except Exception as e:\n print(e)\n finally:\n conn_close(conn, cur)\n\n\ndef cal_day(date):\n \"\"\"\n 计算与2015年7月01日相差的天数\n :param date:\n :return:\n \"\"\"\n day1 = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n day2 = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n start = 0\n for i in range(6):\n start += day1[i]\n deal_date = date.split(\"-\")\n num = 0\n if deal_date[0] == \"2015\":\n for i in range(int(deal_date[1]) - 1):\n num += day1[i]\n num += int(deal_date[2]) - 1\n return num - start\n elif deal_date[0] == \"2016\":\n for i in range(int(deal_date[1]) - 1):\n num += day2[i]\n num += int(deal_date[2]) - 1\n return 365 - start + num\n\n\ndef user_pay_database(conn, cur, num, shop):\n \"\"\"\n 将用户购买信息存入数据库\n :param conn:\n :param cur:\n :param num:\n :param shop:\n :return:\n \"\"\"\n try:\n for i in range(489):\n sql = \"INSERT INTO UserPay(shop_id ,date ,sum) VALUES ('\" + shop + \"','\" + str(i) + \"','\" + str(\n num[i]) + \"')\"\n exe_update(conn, cur, sql)\n except Exception as e:\n print(e)\n print(shop, i, num[i])\n\n\ndef get_user_view(address):\n \"\"\"\n 获得用户的浏览信息并存入数据库\n :param address:\n :return:\n \"\"\"\n try:\n conn = conn_db()\n cur = conn.cursor()\n sql = \"DROP TABLE if EXISTS UserView\"\n exe_table(cur, sql)\n sql = \"CREATE TABLE UserView(id INT NOT NULL AUTO_INCREMENT,shop_id INT NOT NULL ,date INT,sum INT ,\" \\\n \"PRIMARY KEY (id) ,FOREIGN KEY (shop_id) REFERENCES ShopInfo(shop_id)) \" \\\n \"ENGINE = InnoDB DEFAULT CHARSET = UTF8\"\n exe_table(cur, sql)\n\n file = open(address)\n shop = ''\n num = []\n for i in range(489):\n num.append(0)\n while 1:\n lines = file.readlines(10000)\n if not lines:\n user_view_database(conn, cur, num, shop)\n break\n for line in lines:\n file_content = line.split(\",\")\n if shop == '':\n shop = file_content[1]\n dates = file_content[2].split(\" \")\n num[cal_day(dates[0])] += 1\n elif shop == file_content[1]:\n dates = file_content[2].split(\" \")\n num[cal_day(dates[0])] += 1\n elif shop != file_content[1]:\n user_view_database(conn, cur, num, shop)\n shop = file_content[1]\n for i in range(489):\n num[i] = 0\n dates = file_content[2].split(\" \")\n num[cal_day(dates[0])] += 1\n except Exception as e:\n print(e)\n finally:\n conn_close(conn, cur)\n\n\ndef user_view_database(conn, cur, num, shop):\n \"\"\"\n 将用户浏览信息存入数据库\n :param conn:\n :param cur:\n :param num:\n :param shop:\n :return:\n \"\"\"\n try:\n for i in range(489):\n sql = \"INSERT INTO UserView(shop_id ,date ,sum) VALUES ('\" + shop + \"','\" + str(i) + \"','\" + str(\n num[i]) + \"')\"\n exe_update(conn, cur, sql)\n except Exception as e:\n print(e)\n print(shop, i, num[i])\n" }, { "alpha_fraction": 0.4826478064060211, "alphanum_fraction": 0.5019280314445496, "avg_line_length": 26.280702590942383, "blob_id": "329989c573e99875cdd776e76c9f93984fd5b37f", "content_id": "297bbb729b9aa72e5c83b9c5e550580bc450fd9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1782, "license_type": "no_license", "max_line_length": 109, "num_lines": 57, "path": "/bean/shop_info.py", "repo_name": "ranmengyuan/PredictFlow", "src_encoding": "UTF-8", "text": "class ShopInfo:\n \"\"\"\n 商家信息\n \"\"\"\n shop_id = 0 # 商家id\n city_name = '' # 市名\n location_id = 0 # 所在位置编号,位置接近的商家具有相同的编号\n per_pay = 0 # 人均消费(数值越大消费越高)\n score = 0 # 评分(数值越大评分越高)\n comment_cnt = 0 # 评论数(数值越大评论数越多)\n shop_level = 0 # 门店等级(数值越大门店等级越高)\n cate_1_name = '' # 一级品类名称\n cate_2_name = '' # 二级分类名称\n cate_3_name = '' # 三级分类名称\n\n def __init__(self):\n \"\"\"\n 构造函数\n :return:\n \"\"\"\n self.shop_id = 0\n self.city_name = ''\n self.location_id = 0\n self.per_pay = 0\n self.score = 0\n self.comment_cnt = 0\n self.shop_level = 0\n self.cate_1_name = ''\n self.cate_2_name = ''\n self.cate_3_name = ''\n\n def __init__(self, shop_id, city_name, location_id, per_pay, score, comment_cnt, shop_level, cate_1_name,\n cate_2_name, cate_3_name):\n \"\"\"\n 构造函数重置\n :param shop_id:\n :param city_name:\n :param location_id:\n :param per_pay:\n :param score:\n :param comment_cnt:\n :param shop_level:\n :param cate_1_name:\n :param cate_2_name:\n :param cate_3_name:\n :return:\n \"\"\"\n self.shop_id = shop_id\n self.city_name = city_name\n self.location_id = location_id\n self.per_pay = per_pay\n self.score = score\n self.comment_cnt = comment_cnt\n self.shop_level = shop_level\n self.cate_1_name = cate_1_name\n self.cate_2_name = cate_2_name\n self.cate_3_name = cate_3_name\n\n" }, { "alpha_fraction": 0.84375, "alphanum_fraction": 0.848437488079071, "avg_line_length": 15.815789222717285, "blob_id": "ef95f17f3aba925d473d896e2116a9ce06b17ab8", "content_id": "f8dc871ac49183e80b9eb330b1b6059c2dac9470", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1288, "license_type": "no_license", "max_line_length": 101, "num_lines": 38, "path": "/README.md", "repo_name": "ranmengyuan/PredictFlow", "src_encoding": "UTF-8", "text": "# PredictFlow\n\nPredictFlow是一个预测口碑商家流量的工具,通过已知的口碑商家流量预测各个口碑商家未来一个月的流量。主要是运用TF-IDF算法对特征信息进行提取,再通过朴素贝叶斯算法对未来一个月的流量进行预测。\n\n# 入门\n\nPredictFlow包括analyze、bean、dataBase、file和main。\n\nfile主要是文件操作,对给定的已知数据进行读取,建立训练集。\n\nbean和dataBase主要是对数据库进行操作。\n\nanalyze主要是通过算法对数据进行预测。\n\nmain主要是对整个程序进行控制。\n\n# 文件结构\n\nfile主要是文件操作,对给定的已知数据进行读取,建立训练集。\n\nbean和dataBase主要是对数据库进行操作。\n\nanalyze主要是通过算法对数据进行预测。\n\nmanage.py主要是对整个程序进行控制。\n\nprediction.csv是预测结果。\n\n# 支持平台\n\nPredictFlow基于Python3.5。如果想要运行PredictFlow推荐下载Python3.x解析器,并且需要pymysql,sklearn等包的支持。同时,需要注意处理文件和网页的格式。\n\n# 疑问\n\n如果您发现了诸如崩溃、意外行为或类似的问题,请访问[issue tracker](https://github.com/ranmengyuan/PredictFlow/issues)方便交流。\n\n谢谢!\nmengyuan\n\n" }, { "alpha_fraction": 0.6797520518302917, "alphanum_fraction": 0.6797520518302917, "avg_line_length": 52.77777862548828, "blob_id": "8bc95ef8cf54b93899fff13f7bba6dd33f2ab8b0", "content_id": "4eb5048380456466378caea9b2dc75c5d8ca6332", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 562, "license_type": "no_license", "max_line_length": 100, "num_lines": 9, "path": "/manage.py", "repo_name": "ranmengyuan/PredictFlow", "src_encoding": "UTF-8", "text": "# from file.file_processing import read_by_line, shop_info_database, get_user_pay, get_user_view\nfrom analyze.forecast import lasso\n\nif __name__ == \"__main__\":\n # file_content = read_by_line(\"//Volumes//Transcend//文件//实验室//口碑商家流量预测//dataset//shop_info.txt\")\n # shop_info_database(file_content)\n # get_user_pay(\"//Volumes//Transcend//文件//实验室//口碑商家流量预测//dataset//user_pay.txt\")\n # get_user_view(\"//Volumes//Transcend//文件//实验室//口碑商家流量预测//dataset//user_view.txt\")\n lasso()\n" }, { "alpha_fraction": 0.3954372704029083, "alphanum_fraction": 0.4220532178878784, "avg_line_length": 17.785715103149414, "blob_id": "e9dc370130526dfc638bc6922f171a0366bec405", "content_id": "d28deb37f9cd64e84b11887b3663a8ff03e2f09d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 604, "license_type": "no_license", "max_line_length": 43, "num_lines": 28, "path": "/bean/user.py", "repo_name": "ranmengyuan/PredictFlow", "src_encoding": "UTF-8", "text": "class User:\n \"\"\"\n 用户行为\n \"\"\"\n shop_id = 0 # 商家id,与shop_info对应\n date = 0 # 距离2015年07月01号的天数\n sum = 0 # 用户当天某种行为的次数\n\n def __init__(self):\n \"\"\"\n 构造函数\n :return:\n \"\"\"\n self.shop_id = 0\n self.date = 0\n self.sum = 0\n\n def __init__(self, shop_id, date, sum):\n \"\"\"\n 构造函数重置\n :param shop_id:\n :param date:\n :param sum:\n :return:\n \"\"\"\n self.shop_id = shop_id\n self.date = date\n self.sum = sum\n" }, { "alpha_fraction": 0.5403128862380981, "alphanum_fraction": 0.5463297367095947, "avg_line_length": 13.839285850524902, "blob_id": "2ba6359e76abef4eb7a9f13d0417bfa450a857eb", "content_id": "0d70409cf5cc27b26c23914dc04b1a71d6064296", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 905, "license_type": "no_license", "max_line_length": 119, "num_lines": 56, "path": "/dataBase/sql_helper.py", "repo_name": "ranmengyuan/PredictFlow", "src_encoding": "UTF-8", "text": "import pymysql\n\n\n# from builtins import int\n\n\ndef conn_db():\n \"\"\"\n 连接数据库\n \"\"\"\n conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='B221gt12345', db='Tianchi', charset='UTF8')\n return conn\n\n\ndef exe_table(cur, sql):\n \"\"\"\n 创建表格或删除\n :param cur:\n :param sql:\n :return:\n \"\"\"\n sta = cur.execute(sql)\n return sta\n\n\ndef exe_update(conn, cur, sql):\n \"\"\"\n 更新或插入操作或删除操作\n :param conn\n :param cur\n :param sql\n \"\"\"\n sta = cur.execute(sql)\n # \"delete from exe where Id=%d\" % (int(eachID))\n conn.commit()\n return sta\n\n\ndef exe_query(cur, sql):\n \"\"\"\n 查找操作\n :param cur\n :param sql\n \"\"\"\n cur.execute(sql)\n return cur\n\n\ndef conn_close(conn, cur):\n \"\"\"\n 关闭连接,释放资源\n :param conn\n :param cur\n \"\"\"\n cur.close()\n conn.close()\n" } ]
7
ponygirl123/pythonUI
https://github.com/ponygirl123/pythonUI
794b5da7603f9fc79afca622bc7d75c844fe014d
4bcc744a863b6808a952ce0d14fb21b0c0a5aa02
4414ed7e1b42a6b36792f22bf5c1fb53680a44af
refs/heads/master
2021-05-09T13:23:16.668943
2018-01-27T12:41:02
2018-01-27T12:41:02
119,032,805
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.65101158618927, "alphanum_fraction": 0.6907514333724976, "avg_line_length": 23.73214340209961, "blob_id": "b53872b967db244559cbf4338d7425d3807a6b53", "content_id": "81b109605f1fb3036ca1c607f290534e2c765049", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1384, "license_type": "no_license", "max_line_length": 82, "num_lines": 56, "path": "/extrafeatures.py", "repo_name": "ponygirl123/pythonUI", "src_encoding": "UTF-8", "text": "'''\nMay 25, 2015\nRecipe: B04829_01_08\n@author: Burkhard Meier\n'''\n#======================\n# imports\n#======================\nimport tkinter as tk\nfrom tkinter import ttk\n\nwin = tk.Tk()\n\nwin.title(\"Python GUI\")\n\naLabel = ttk.Label(win, text=\"A Label\")\naLabel.grid(column=0, row=0)\n\ndef clickMe():\n action.configure(text='Hello ' + name.get())\n\nttk.Label(win, text=\"Enter a name:\").grid(column=0, row=0)\n\nname = tk.StringVar()\nnameEntered = ttk.Entry(win, width=12, textvariable=name)\nnameEntered.grid(column=0, row=1)\n\naction = ttk.Button(win, text=\"Click Me!\", command=clickMe)\naction.grid(column=2, row=1)\naction.configure(state='disabled')\n\nttk.Label(win, text=\"Choose a Number:\").grid(column=1, row=0)\nnumber = tk.StringVar()\nnumberChosen = ttk.Combobox(win, width = 12, textvariable = number)\nnumberChosen[\"values\"] = (1,2,4,42,100)\nnumberChosen.grid(column=1, row = 1)\nnumberChosen.current(0)\n\nchVardis = tk.IntVar()\ncheck1 = tk.Checkbutton(win, text=\"Disabled\", variable=chVardis, state='disabled')\ncheck1.select()\ncheck1.grid(column=0,row=4, sticky=tk.W)\n\nchVarUn = tk.IntVar()\ncheck2 = tk.Checkbutton(win, text=\"Unchecked\", variable=chVarUn)\ncheck2.deselect()\ncheck2.grid(column=1,row=4, sticky=tk.W)\n\nchVarEn = tk.IntVar()\ncheck3 = tk.Checkbutton(win, text=\"Enabled\", variable=chVarEn)\ncheck3.select()\ncheck3.grid(column=2,row=4, sticky=tk.W)\n\nnameEntered.focus()\n\nwin.mainloop()" } ]
1
SRLKilling/interactive-rubiks
https://github.com/SRLKilling/interactive-rubiks
5c0aca6d9f282c25357d157e1197a2b96096e9ba
836b0249f56a1af576d15202bd678fa5de39f0df
74fad59d1c4b1f1b6dcdb68dad788bc4e714c90c
refs/heads/master
2021-01-17T14:23:14.095026
2017-03-09T07:59:48
2017-03-09T07:59:48
84,088,349
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.46806764602661133, "alphanum_fraction": 0.5157355070114136, "avg_line_length": 23.148515701293945, "blob_id": "1d425fb9641c3a6743f15b0af9c77943e4091007", "content_id": "0cefe726ef489ed53332631edc4f8cd135dfcf27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9755, "license_type": "no_license", "max_line_length": 313, "num_lines": 404, "path": "/cube.py", "repo_name": "SRLKilling/interactive-rubiks", "src_encoding": "UTF-8", "text": "from algorithm import Algorithm\nimport colorama\n\nclass Cube:\n\tdef __init__(self):\n\t\tself.reset()\n\t\tself.algo = {}\n\t\tself.options = {}\n\t\tself.options[\"verbose\"] = 2\n\t\tself.options[\"interactive\"] = True\n\t\tself.options[\"rotateMode\"] = True\n\t\tself.options[\"coloredOutput\"] = True\n\t\t\n\tdef reset(self):\n\t\tself.faces = [[i for j in range(9)] for i in range(6)]\n\t\t\n\tdef pause(self):\n\t\tif self.options[\"interactive\"]:\n\t\t\tinput('')\n\t\t\t\n\tdef pr(self, str):\n\t\tif self.options[\"verbose\"]:\n\t\t\tprint(str)\n\t\n\t# Primary moves\n\t\n\tdef rotateLeft(self, n=1):\n\t\tfor i in range(n):\n\t\t\tself.faces[0], self.faces[1], self.faces[2], self.faces[3] = self.faces[3], self.faces[0], self.faces[1], self.faces[2]\n\t\t\tf = self.faces[4]; f[0], f[1], f[2], f[3], f[5], f[6], f[7], f[8] = f[2], f[5], f[8], f[1], f[7], f[0], f[3], f[6]\n\t\t\tf = self.faces[5]; f[0], f[1], f[2], f[3], f[5], f[6], f[7], f[8] = f[6], f[3], f[0], f[7], f[1], f[8], f[5], f[2]\t\t\n\tdef rotateDown(self, n=1):\n\t\tfor i in range(n):\n\t\t\tf1, f2 = self.faces[5][::], self.faces[3][::]\n\t\t\tself.faces[1], self.faces[5] = self.faces[4][::], self.faces[1][::]\n\t\t\tf = self.faces[3]; f[0], f[1], f[2], f[3], f[4], f[5], f[6], f[7], f[8] = f1[8], f1[7], f1[6], f1[5], f1[4], f1[3], f1[2], f1[1], f1[0]\n\t\t\tf = self.faces[4]; f[0], f[1], f[2], f[3], f[4], f[5], f[6], f[7], f[8] = f2[8], f2[7], f2[6], f2[5], f2[4], f2[3], f2[2], f2[1], f2[0]\n\t\t\tf = self.faces[2]; f[0], f[1], f[2], f[3], f[5], f[6], f[7], f[8] = f[2], f[5], f[8], f[1], f[7], f[0], f[3], f[6]\n\t\t\tf = self.faces[0]; f[0], f[1], f[2], f[3], f[5], f[6], f[7], f[8] = f[6], f[3], f[0], f[7], f[1], f[8], f[5], f[2]\t\t\n\tdef turnFace(self, n=1):\n\t\tfor i in range(n):\n\t\t\tf = self.faces[1]; f[0], f[1], f[2], f[3], f[5], f[6], f[7], f[8] = f[6], f[3], f[0], f[7], f[1], f[8], f[5], f[2]\n\t\t\tf1, f2, f3, f4 = self.faces[0], self.faces[4], self.faces[2], self.faces[5]\n\t\t\tf1[8], f1[5], f1[2], f2[6], f2[7], f2[8], f3[0], f3[3], f3[6], f4[2], f4[1], f4[0] = f4[2], f4[1], f4[0], f1[8], f1[5], f1[2], f2[6], f2[7], f2[8], f3[0], f3[3], f3[6]\n\t\t\n\t\t\n\t# Simple moves\n\t\t\n\tdef F(self, n=1):\n\t\tself.turnFace(n)\n\t\treturn self\n\tdef F2(self):\n\t\tself.turnFace(2)\n\t\treturn self\n\tdef F_(self):\n\t\tself.turnFace(3)\n\t\treturn self\n\t\t\n\tdef B(self, n=1):\n\t\tself.rotateLeft(2)\n\t\tself.turnFace(n)\n\t\tself.rotateLeft(2)\n\t\treturn self\n\tdef B2(self):\n\t\treturn self.B(2)\n\tdef B_(self):\n\t\treturn self.B(3)\n\t\t\n\tdef U(self, n=1):\n\t\tself.rotateDown()\n\t\tself.turnFace(n)\n\t\tself.rotateDown(3)\n\t\treturn self\n\tdef U2(self):\n\t\tself.U(2)\n\t\treturn self\n\tdef U_(self):\n\t\tself.U(3)\n\t\treturn self\n\t\t\n\tdef D(self, n=1):\n\t\tself.rotateDown(3)\n\t\tself.turnFace(n)\n\t\tself.rotateDown()\n\t\treturn self\n\tdef D2(self):\n\t\tself.D(2)\n\t\treturn self\n\tdef D_(self):\n\t\tself.D(3)\n\t\treturn self\n\t\t\n\tdef R(self, n=1):\n\t\tself.rotateLeft(3)\n\t\tself.turnFace(n)\n\t\tself.rotateLeft()\n\t\treturn self\n\tdef R2(self):\n\t\tself.R(2)\n\t\treturn self\n\tdef R_(self):\n\t\tself.R(3)\n\t\treturn self\n\t\t\n\tdef L(self, n=1):\n\t\tself.rotateLeft()\n\t\tself.turnFace(n)\n\t\tself.rotateLeft(3)\n\t\treturn self\n\tdef L2(self):\n\t\tself.L(2)\n\t\treturn self\n\tdef L_(self):\n\t\tself.L(3)\n\t\treturn self\n\t\t\n\t## Double moves\n\t\t\n\tdef f(self, n=1):\n\t\tself.B(n)\n\t\tself.z(n)\n\t\treturn self\n\tdef f2(self):\n\t\treturn self.f(2)\n\tdef f_(self):\n\t\treturn self.f(3)\n\t\n\tdef b(self, n=1):\n\t\tself.F(n)\n\t\tself.z(3*n)\n\t\treturn self\n\tdef b2(self):\n\t\treturn this.b(2)\n\tdef b_(self):\n\t\treturn this.b(3)\n\t\n\tdef u(self, n=1):\n\t\tself.D(n)\n\t\tself.y(n)\n\t\treturn self\n\tdef u2(self):\n\t\treturn self.u(2)\n\tdef u_(self):\n\t\treturn self.u(3)\n\t\t\n\tdef d(self, n=1):\n\t\tself.U(n)\n\t\tself.y(3*n)\n\t\treturn self\n\tdef d2(self):\n\t\treturn self.d(2)\n\tdef d_(self):\n\t\treturn self.d(3)\n\t\n\tdef r(self, n=1):\n\t\tself.L(n)\n\t\tself.x(n)\n\t\treturn self\n\tdef r2(self):\n\t\treturn self.r(2)\n\tdef r_(self):\n\t\treturn self.r(3)\n\t\t\n\tdef l(self, n=1):\n\t\tself.R(n)\n\t\tself.x(3*n)\n\t\treturn self\n\tdef l2(self):\n\t\treturn self.l(2)\n\tdef l_(self):\n\t\treturn self.l(3)\n\t\n\t## Middle moves\n\t\n\tdef M(self, n=1):\n\t\tself.R(n)\n\t\tself.L(3*n)\n\t\tself.x(3*n)\n\tdef m(self, n=1):\n\t\tself.L(n)\n\t\tself.R(3*n)\n\t\t\n\tdef E(self, n=1):\n\t\tself.U(n)\n\t\tself.D(3*n)\n\t\tself.y(3*n)\n\tdef e(self, n=1):\n\t\tself.U(3*n)\n\t\tself.D(n)\n\t\n\tdef S(self, n=1):\n\t\tself.F(3*n)\n\t\tself.B(n)\n\t\tself.z(n)\n\tdef s(self, n=1):\n\t\tself.F(n)\n\t\tself.B(3*n)\n\t\n\t## Cube rotations\n\t\n\tdef x(self, n=1):\n\t\tself.rotateDown(3*n)\n\t\treturn self\n\tdef x2(self):\n\t\treturn self.x(2)\n\tdef x_(self):\n\t\treturn self.x(3)\n\n\tdef y(self, n=1):\n\t\tself.rotateLeft(3*n)\n\t\treturn self\n\tdef y2(self):\n\t\treturn self.y(2)\n\tdef y_(self):\n\t\treturn self.y(3)\n\t\t\n\tdef z(self, n=1):\n\t\tself.rotateLeft(3)\n\t\tself.rotateDown(n)\n\t\tself.rotateLeft()\n\t\treturn self\n\tdef z2(self):\n\t\treturn self.z(2)\n\tdef z_(self):\n\t\treturn self.z(3)\n\t\t\n\t\n\t## Eval and algorithms\n\t\t\n\tdef eval(self, str):\n\t\talgo = Algorithm()\n\t\tchanger = algo.parseLine(str)\n\t\talgo.do(self)\n\t\tif changer: self.printCube()\n\t\t\t\n\tdef do(self, str, m=0, silent=False):\n\t\tself.z(m)\n\t\ti = 0\n\t\ts = \"\"\n\t\tsuffix = ['', \"2\", \"'\"]\n\t\twhile i < len(str):\n\t\t\tn, c = 1, str[i]\n\t\t\tif i+1 < len(str) and str[i+1] == \"'\":\n\t\t\t\tn = 3\n\t\t\t\ti += 2\n\t\t\telif i+1 < len(str) and str[i+1] == '2':\n\t\t\t\tn = 2\n\t\t\t\ti += 2\n\t\t\telif i+1 < len(str) and c == '2':\n\t\t\t\tn = 2\n\t\t\t\tc = str[i+1]\n\t\t\t\ti += 2\n\t\t\telse:\n\t\t\t\ti += 1\n\t\t\t\n\t\t\tif(c == 'F'): self.F(n)\n\t\t\telif(c == 'B'): self.B(n)\n\t\t\telif(c == 'U'): self.U(n)\n\t\t\telif(c == 'D'): self.D(n)\n\t\t\telif(c == 'R'): self.R(n)\n\t\t\telif(c == 'L'): self.L(n)\n\t\t\t\n\t\t\telif(c == 'f'): self.f(n)\n\t\t\telif(c == 'b'): self.b(n)\n\t\t\telif(c == 'u'): self.u(n)\n\t\t\telif(c == 'd'): self.d(n)\n\t\t\telif(c == 'r'): self.r(n)\n\t\t\telif(c == 'l'): self.l(n)\n\t\t\t\n\t\t\telif(c == 'M'): self.M(n)\n\t\t\telif(c == 'm'): self.m(n)\n\t\t\telif(c == 'E'): self.E(n)\n\t\t\telif(c == 'e'): self.e(n)\n\t\t\telif(c == 'S'): self.S(n)\n\t\t\telif(c == 's'): self.s(n)\n\t\t\t\n\t\t\telif(c == 'x'): self.x(n)\n\t\t\telif(c == 'y'): self.y(n)\n\t\t\telif(c == 'z'): self.z(n)\n\t\t\t\n\t\t\telse: continue\n\t\t\t\n\t\t\ts += self.zDecal(c, m, n) + \" \"\n\t\t\t\n\t\tself.z(3*m)\n\t\t\n\t\tif self.options[\"verbose\"] > 1 and silent == False:\n\t\t\tif self.options[\"rotateMode\"]: print(s)\n\t\t\telse:\n\t\t\t\tif m%4 == 0: print(s)\n\t\t\t\tif m%4 == 1: print('z -', s, \"- z'\")\n\t\t\t\telif m%4 == 2: print('z2 -', s, '- z2')\n\t\t\t\telif m%4 == 3: print(\"z' -\", s, '- z')\n\t\n\tdef addAlgo(self, name, algo):\n\t\tself.algo[name] = algo\n \n\tdef doAlgo(self, str):\n\t\tif str in self.algo:\n\t\t\treturn self.algo[str].do(self)\n\t\telse: return \"Error: '\" + str + \"' has not been declared\"\n\t\n\tdef imp(self, file):\n\t\ta = Algorithm()\n\t\ta.loadFromFile(file + \".algo\")\n\t\tself.addAlgo(file, a)\n\t\t\n\tdef setOption(self, name, value):\n\t\tif name not in self.options:\n\t\t\tprint(\"Error: '\", name, \"' is not an option\", sep='')\n\t\telse:\n\t\t\tif type(self.options[name]) == int:\n\t\t\t\tself.options[name] = int(value)\n\t\t\telif type(self.options[name]) == float:\n\t\t\t\tself.options[name] = float(value)\n\t\t\telif type(self.options[name]) == bool:\n\t\t\t\tself.options[name] = (value.lower() in [\"true\", \"yes\", \"y\", \"t\", \"1\"])\n\t\t\telse:\n\t\t\t\tself.options[name] = value\n\t\t\n\t\n\t## Printing\n\t\n\tdef zDecal(self, c, i, n):\n\t\tsuffix = ['', \"2\", \"'\"]\n\t\tif self.options[\"rotateMode\"]: \n\t\t\tmoves = [['U', 'L', 'D', 'R'], ['u', 'l', 'd', 'r']]\n\t\t\tfor move in moves:\n\t\t\t\tif c in move:\n\t\t\t\t\treturn move[(move.index(c) + i) % len(move)] + suffix[n-1]\n\t\t\t\n\t\t\tmoves = [['M', 'E'], ['m', 'e']]\n\t\t\tfor move in moves:\n\t\t\t\tif c in move:\n\t\t\t\t\tif i%4 == 0: return c + suffix[n-1]\n\t\t\t\t\telif i%4 == 1: return move[(move.index(c) + i) % len(move)] + suffix[n-1]\n\t\t\t\t\telif i%4 == 2: return c + suffix[-1*(n-2)+1]\n\t\t\t\t\telif i%4 == 3: return move[(move.index(c) + i) % len(move)] + suffix[-1*(n-2)+1]\n\t\t\t\t\t\n\t\treturn c + suffix[n-1]\n\t\t\n\tdef __str__(self):\n\t\ts = ''\n\t\tf = self.faces\n\t\t\n\t\ts += \" |-------|\\n\"\n\t\tfor i in range(3):\n\t\t\ts += \" | \" + str(f[4][3*i]) + ' ' + str(f[4][3*i+1]) + ' ' + str(f[4][3*i+2]) + ' |\\n'\n\t\ts += \"|-------|-------|-------|-------|\\n\"\n\t\tfor i in range(3):\n\t\t\ts += '| ' + str(f[0][3*i]) + ' ' + str(f[0][3*i+1]) + ' ' + str(f[0][3*i+2]) + ' | ' + str(f[1][3*i]) + ' ' + str(f[1][3*i+1]) + ' ' + str(f[1][3*i+2]) + ' | ' + str(f[2][3*i]) + ' ' + str(f[2][3*i+1]) + ' ' + str(f[2][3*i+2]) + ' | ' + str(f[3][3*i]) + ' ' + str(f[3][3*i+1]) + ' ' + str(f[3][3*i+2]) + ' |\\n'\n\t\ts += \"|-------|-------|-------|-------|\\n\"\n\t\tfor i in range(3):\n\t\t\ts += \" | \" + str(f[5][3*i]) + ' ' + str(f[5][3*i+1]) + ' ' + str(f[5][3*i+2]) + ' |\\n'\n\t\ts += \" |-------|\\n\"\n\t\treturn s\n\tdef printCube(self):\n\t\tif not self.options[\"verbose\"] > 0:\n\t\t\treturn\n\t\n\t\tif not self.options[\"coloredOutput\"]:\n\t\t\tprint(self)\n\t\t\treturn\n\t\t\t\n\t\tprint('')\n\t\tf = self.faces\n\t\t\n\t\tfor i in range(3):\n\t\t\tprint(\" \", end='')\n\t\t\tself.printCase(4, 3*i)\n\t\t\tself.printCase(4, 3*i+1)\n\t\t\tself.printCase(4, 3*i+2)\n\t\t\tprint('')\n\t\t\t\n\t\tfor i in range(3):\n\t\t\tself.printCase(0, 3*i)\n\t\t\tself.printCase(0, 3*i+1)\n\t\t\tself.printCase(0, 3*i+2)\n\t\t\tself.printCase(1, 3*i)\n\t\t\tself.printCase(1, 3*i+1)\n\t\t\tself.printCase(1, 3*i+2)\n\t\t\tself.printCase(2, 3*i)\n\t\t\tself.printCase(2, 3*i+1)\n\t\t\tself.printCase(2, 3*i+2)\n\t\t\tself.printCase(3, 3*i)\n\t\t\tself.printCase(3, 3*i+1)\n\t\t\tself.printCase(3, 3*i+2)\n\t\t\tprint('')\n\t\t\t\n\t\tfor i in range(3):\n\t\t\tprint(\" \", end='')\n\t\t\tself.printCase(5, 3*i)\n\t\t\tself.printCase(5, 3*i+1)\n\t\t\tself.printCase(5, 3*i+2)\n\t\t\tprint('')\n\t\tprint('')\n \n\tdef printCase(self, f, i):\n\t\tif self.faces[f][i] == 0: print(colorama.Back.GREEN, \" \", colorama.Back.RESET, sep='', end='')\n\t\tif self.faces[f][i] == 1: print(colorama.Back.WHITE, \" \", colorama.Back.RESET, sep='', end='')\n\t\tif self.faces[f][i] == 2: print(colorama.Back.BLUE, \" \", colorama.Back.RESET, sep='', end='')\n\t\tif self.faces[f][i] == 3: print(colorama.Back.YELLOW, \" \", colorama.Back.RESET, sep='', end='')\n\t\tif self.faces[f][i] == 4: print(colorama.Back.MAGENTA, \" \", colorama.Back.RESET, sep='', end='')\n\t\tif self.faces[f][i] == 5: print(colorama.Back.RED, \" \", colorama.Back.RESET, sep='', end='')" }, { "alpha_fraction": 0.6571428775787354, "alphanum_fraction": 0.6571428775787354, "avg_line_length": 19.636363983154297, "blob_id": "feb9f74592139dbf36c00ebc0b427f7056b1902c", "content_id": "51c0d78b6b5d5901d05a94b99c8e8b5611b6fba6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 455, "license_type": "no_license", "max_line_length": 81, "num_lines": 22, "path": "/interactive-rubiks.py", "repo_name": "SRLKilling/interactive-rubiks", "src_encoding": "UTF-8", "text": "from cube import Cube\nfrom algorithm import Algorithm\nimport colorama\n\ncolorama.init()\n\nc = Cube()\n\nfiles = [\"firstcross\", \"firstface\", \"middle\", \"lastcross\", \"lastface\", \"resolve\"]\nfor s in files:\n\ta = Algorithm()\n\ta.loadFromFile(s + \".algo\")\n\tc.addAlgo(s, a)\n\nprint(\"Welcome to Rubick's Cube player ! :)\");\nprint(\"Type 'help' to get a list of usable command\");\nc.printCube();\n\ninStr = \"\"\nwhile inStr != \"exit\":\n\ts = c.eval(inStr)\n\tinStr = input(\">> \")\n\t" }, { "alpha_fraction": 0.49633362889289856, "alphanum_fraction": 0.5467460751533508, "avg_line_length": 29.690141677856445, "blob_id": "de3a025b03b5ffde1695820c278e4170447e4008", "content_id": "d3c77049dc6b0d2f7eece7107317a1d93113725a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2182, "license_type": "no_license", "max_line_length": 113, "num_lines": 71, "path": "/pattern.py", "repo_name": "SRLKilling/interactive-rubiks", "src_encoding": "UTF-8", "text": "class Pattern:\n\n\tdef __init__(self, str=None):\n\t\tself.faces = [['_' for j in range(9)] for i in range(6)]\n\t\tif str != None: self.load(str)\n\t\t\n\tdef load(self, str):\n\t\tlines = str.split('\\n')\n\t\tfor i in range(3):\n\t\t\tlines[i] = lines[i].strip().split()\n\t\t\tself.faces[4][i*3], self.faces[4][i*3+1], self.faces[4][i*3+2] = lines[i][0], lines[i][1], lines[i][2]\n\t\tfor i in range(3):\n\t\t\tlines[3+i] = lines[3+i].strip().split()\n\t\t\tself.faces[0][i*3], self.faces[0][i*3+1], self.faces[0][i*3+2] = lines[3+i][0], lines[3+i][1], lines[3+i][2]\n\t\t\tself.faces[1][i*3], self.faces[1][i*3+1], self.faces[1][i*3+2] = lines[3+i][3], lines[3+i][4], lines[3+i][5]\n\t\t\tself.faces[2][i*3], self.faces[2][i*3+1], self.faces[2][i*3+2] = lines[3+i][6], lines[3+i][7], lines[3+i][8]\n\t\t\tself.faces[3][i*3], self.faces[3][i*3+1], self.faces[3][i*3+2] = lines[3+i][9], lines[3+i][10], lines[3+i][11]\n\t\tfor i in range(3):\n\t\t\tlines[6+i] = lines[6+i].strip().split()\n\t\t\tself.faces[5][i*3], self.faces[5][i*3+1], self.faces[5][i*3+2] = lines[6+i][0], lines[6+i][1], lines[6+i][2]\n\t\n\tdef match(self, cube):\n\t\tfor z in range(4):\n\t\t\tcolors, matched = [], True\n\t\t\tfor f in range(6):\n\t\t\t\tif matched:\n\t\t\t\t\tfor i in range(9):\n\t\t\t\t\t\tif self.matchCase(cube, colors, f, i) == False:\n\t\t\t\t\t\t\tcube.z()\n\t\t\t\t\t\t\tmatched = False\n\t\t\t\t\t\t\tbreak\n\t\t\tif matched:\n\t\t\t\tcube.z(3*z)\n\t\t\t\treturn z\n\t\t\n\t\treturn -1\n\n\tdef matchOnly(self, cube, c):\n\t\tfor z in range(4):\n\t\t\tcolor, matched = -1, True\n\t\t\tfor f in range(6):\n\t\t\t\tif matched:\n\t\t\t\t\tfor i in range(9):\n\t\t\t\t\t\tif color == -1 and self.faces[f][i] == c:\n\t\t\t\t\t\t\tcolor = cube.faces[f][i]\n\t\t\t\t\t\telif color != -1 and self.faces[f][i] == c and cube.faces[f][i] != color:\n\t\t\t\t\t\t\tcube.z()\n\t\t\t\t\t\t\tmatched = False\n\t\t\t\t\t\t\tbreak\n\t\t\tif matched:\n\t\t\t\tcube.z(3*z)\n\t\t\t\treturn z\n\t\treturn -1\n\t\t\n\tdef matchCase(self, cube, colors, f, i):\n\t\tif(self.faces[f][i] == '_'):\n\t\t\treturn True\n\t\telse:\n\t\t\tchar, color = int(self.faces[f][i]), -1\n\t\t\t# print(colors, char)\n\t\t\tfor c in colors:\n\t\t\t\tif c[0] == char:\n\t\t\t\t\tcolor = c[1]\n\t\t\t\t\tbreak\n\t\t\t\telif c[1] == cube.faces[f][i]:\n\t\t\t\t\treturn False\n\t\t\tif color == -1:\n\t\t\t\tcolors.append( (char, cube.faces[f][i]) )\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn color == cube.faces[f][i]\n\t\t\t" }, { "alpha_fraction": 0.6017844080924988, "alphanum_fraction": 0.6109117269515991, "avg_line_length": 25.20161247253418, "blob_id": "f20e73f2aa665e4057e03311130bc6790dc5dbc0", "content_id": "17ea53f8132d355701d7aa1862fb554f5d8b0f8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9751, "license_type": "no_license", "max_line_length": 136, "num_lines": 372, "path": "/algorithm.py", "repo_name": "SRLKilling/interactive-rubiks", "src_encoding": "UTF-8", "text": "from pattern import Pattern\nfrom random import randint\n\nclass AlgoAction:\n\tdef __init__(self, code, silent=False):\n\t\tself.code = code\n\t\tself.silent = silent\n\t\t\n\tdef do(self, cube, m):\n\t\tcube.do(self.code, m, self.silent)\n\t\treturn False\n\t\t\nclass AlgoPrint:\n\tdef __init__(self, str):\n\t\tself.str = str\n\t\t\n\tdef do(self, cube):\n\t\tcube.pr(self.str)\n\t\treturn False\n\t\t\nclass AlgoImport:\n\tdef __init__(self, file):\n\t\tself.file = file\n\t\t\n\tdef do(self, cube):\n\t\tcube.imp(self.file)\n\t\treturn False\n\t\t\nclass AlgoDoAlgo:\n\tdef __init__(self, str):\n\t\tself.str = str\n\t\t\n\tdef do(self, cube):\n\t\treturn cube.doAlgo(self.str)\n\t\t\nclass AlgoPrintCube:\t\t\n\tdef do(self, cube):\n\t\tcube.printCube()\n\t\treturn False\n\nclass AlgoPause:\n\tdef do(self, cube):\n\t\tcube.pause()\n\t\treturn False\n\t\nclass AlgoReset:\n\tdef do(self, cube):\n\t\tcube.reset()\n\t\treturn False\n\t\t\nclass AlgoSetOption:\n\tdef __init__(self, optname, optval):\n\t\tself.name = optname\n\t\tself.val = optval\n\t\t\n\tdef do(self, cube):\n\t\tcube.setOption(self.name, self.val)\n\t\treturn False\n\t\nclass AlgoRandomize:\n\tdef __init__(self, silent=False):\n\t\tself.silent = silent\n\t\t\n\tdef do(self, cube):\n\t\ts = \"\"\n\t\tfor j in range(20):\n\t\t\tr = randint(0, 18)\n\t\t\tt, n = r//3, r%3\n\t\t\tif(t == 0):\n\t\t\t\ts += \"F\"\n\t\t\tif(t == 1):\n\t\t\t\ts += \"B\"\n\t\t\tif(t == 2):\n\t\t\t\ts += \"U\"\n\t\t\tif(t == 3):\n\t\t\t\ts += \"D\"\n\t\t\tif(t == 4):\n\t\t\t\ts += \"R\"\n\t\t\tif(t == 5):\n\t\t\t\ts += \"L\"\n\t\t\t\n\t\t\tif(n == 1): s+=\"2\"\n\t\t\telif(n == 2): s+=\"_\"\n\t\t\ts += \" \"\n\t\t\t\n\t\tcube.do(s, 0, self.silent)\n\t\treturn False\n\t\n\t\n\nclass Algorithm:\n\t\n\tIF = 0\n\tWHILE = 1\n\tUNTIL = 2\n\n\tdef __init__(self, patterns=[Pattern()], parent=None, conditionType=IF):\n\t\tself.step = []\n\t\tself.patterns = patterns\n\t\tself.conditionType = conditionType\n\t\tself.parent = parent\n\t\tself.elseAlgo = None\n\t\t\n\tdef newAlgo(self, patterns, cond):\n\t\tstep = Algorithm(patterns, self, cond)\n\t\tself.step.append(step)\n\t\treturn step\n\t\t\n\tdef newDoAlgo(self, str):\n\t\tself.step.append( AlgoDoAlgo(str) )\n\t\t\n\tdef newMatchdoing(self, pattern, c, action):\n\t\tstep = AlgoMatch(self, [pattern], c, action)\n\t\tself.step.append(step)\n\t\treturn step\n\t\t\n\tdef newElseMatchdoing(self, pattern, c, action):\n\t\tself.elseAlgo = AlgoMatch(self.parent, [pattern], c, action)\n\t\treturn self.elseAlgo\n\t\t\n\tdef newElse(self, patterns):\n\t\tself.elseAlgo = Algorithm(patterns, self.parent, Algorithm.IF)\n\t\treturn self.elseAlgo\n\t\t\n\tdef newAction(self, str, silent=False):\n\t\tself.step.append( AlgoAction(str, silent) )\n\t\t\n\tdef newPrint(self, str):\n\t\tself.step.append( AlgoPrint(str) )\n\t\t\n\tdef newPrintCube(self):\n\t\tself.step.append( AlgoPrintCube() )\n\t\t\n\tdef newPause(self):\n\t\tself.step.append( AlgoPause() )\n\t\t\n\tdef newRandomize(self, silent=False):\n\t\tself.step.append( AlgoRandomize(silent) )\n\t\t\n\tdef newReset(self):\n\t\tself.step.append( AlgoReset() )\n\t\t\n\tdef newImport(self, str):\n\t\tself.step.append( AlgoImport(str) )\n\t\t\n\tdef newSetOption(self, optname, optval):\n\t\tself.step.append( AlgoSetOption(optname, optval) )\n\t\t\n\t\t\n\tdef loadFromFile(self, filepath):\n\t\tfile = None\n\t\ttry:\n\t\t\tfile = open(filepath, \"r\")\n\t\texcept IOError:\n\t\t\tprint(\"Error: '\", filepath, \"' no such file\", sep='')\n\t\t\treturn\n\t\t\n\t\tline = file.readline()\n\t\tlineno = 1\n\t\tstep = self\n\t\twhile line != '':\n\t\t\tline = line.strip(' \\t\\n\\r')\n\t\t\t\n\t\t\t\n\t\t\tif line.startswith(\"match \"):\n\t\t\t\tparam = line[6:].split(\"doing\")\n\t\t\t\tparam[0] = param[0].strip(); param[1] = param[1].strip()\n\t\t\t\tstr = ''\n\t\t\t\tfor i in range(9): str += file.readline()\n\t\t\t\tlineno += 9\n\t\t\t\tstep = step.newMatchdoing(Pattern(str), param[0], param[1])\n\t\t\t\t\n\t\t\telif line.startswith(\"elseif-match \"):\n\t\t\t\tif self.conditionType != Algorithm.IF:\n\t\t\t\t\tprint(\"Error in \",filepath,\":\", lineno,\": 'elseif-match' must follow an 'if', 'elseif', 'match', or 'elseif-match' clause\", sep='')\n\t\t\t\t\treturn\n\t\t\t\tparam = line[13:].split(\"doing\")\n\t\t\t\tparam[0] = param[0].strip(); param[1] = param[1].strip()\n\t\t\t\tstr = ''\n\t\t\t\tfor i in range(9): str += file.readline()\n\t\t\t\tlineno += 9\n\t\t\t\tstep = step.newElseMatchdoing(Pattern(str), param[0], param[1])\n\t\t\t\t\n\t\t\telif line.startswith(\"if\"):\n\t\t\t\tl = self.parsePatterns(file)\n\t\t\t\tlineno += l[0]\n\t\t\t\tstep = step.newAlgo(l[1], Algorithm.IF)\n\t\t\t\t\n\t\t\telif line == \"elseif\":\n\t\t\t\tif self.conditionType != Algorithm.IF:\n\t\t\t\t\tprint(\"Error in \",filepath,\":\", lineno,\": 'elseif' must follow an 'if', 'elseif', 'match', or 'elseif-match' clause\", sep='')\n\t\t\t\t\treturn\n\t\t\t\tl = self.parsePatterns(file)\n\t\t\t\tlineno += l[0]\n\t\t\t\tstep = step.newElse(l[1])\n\t\t\t\t\n\t\t\telif line == \"else\":\n\t\t\t\tif self.conditionType != Algorithm.IF:\n\t\t\t\t\tprint(\"Error in \",filepath,\":\", lineno,\": 'else' must follow an 'if', 'elseif', 'match', or 'elseif-match' clause\", sep='')\n\t\t\t\t\treturn\n\t\t\t\tstep = step.newElse([Pattern()])\n\t\t\t\t\n\t\t\telif line.startswith(\"while\"):\n\t\t\t\tl = self.parsePatterns(file)\n\t\t\t\tlineno += l[0]\n\t\t\t\tstep = step.newAlgo(l[1], Algorithm.WHILE)\n\t\t\t\t\n\t\t\telif line.startswith(\"until\"):\n\t\t\t\tl = self.parsePatterns(file)\n\t\t\t\tlineno += l[0]\n\t\t\t\tstep = step.newAlgo(l[1], Algorithm.UNTIL)\n\t\t\t\t\n\t\t\telif line == \"end\":\n\t\t\t\tif step.parent == None:\n\t\t\t\t\tprint(\"Error in \",filepath,\":\", lineno,\": Too much 'end'\", sep='')\n\t\t\t\t\treturn\n\t\t\t\tstep = step.parent\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tself.parseLine(line, step, filepath, lineno)\n\t\t\t\t\n\t\t\tline = file.readline()\n\t\t\tlineno += 1\n\t\t\t\n\tdef parseLine(self, line, step=None, filename='<input>', lineno=1):\n\t\tif(step == None): step = self\n\t\tline = line.strip()\n\t\t\n\t\tif line.lower() == \"help\":\n\t\t\tprint(\"Here is a list of command :\")\n\t\t\tprint(\" - 'randomize' to get a random rubick's cube (if you don't want to print the random moves, use 'randomize silent')\")\n\t\t\tprint(\" - 'do' succeeded by a move sequence (moves are L, R, U, D, B, F, l, r, u, d, b, f, M, E, S, m, e, s, x, y, z)\")\n\t\t\tprint(\" - 'doalgo <name>' will do the algorithm named <name>, previously loaded with import\")\n\t\t\tprint(\" Startup loaded algo: firstcross, firstface, middle, lastcross, lastface, resolve\")\n\t\t\tprint(\" - 'import <path>' so that <path>.algo will by imported into the algorithm list\")\n\t\t\tprint(\" - 'reset' to start with a new fresh cube\")\n\t\t\tprint(\" - 'set <name> = <value>' to set one of the options\")\n\t\t\tprint(\" current options are : 'interactive' to enable ('true') or disable ('false') pause during algorithm execution\")\n\t\t\tprint(\" 'coloredOutput' to enable a colored output ('true') vs a numbered output ('false')\")\n\t\t\tprint(\" 'verbose' to enable all output ('2'), only cube printing ('1'), or no output at all ('0')\")\n\t\t\tprint(\" - 'exit' to quit this command prompt\")\n\t\t\tprint(\"\")\n\t\t\tprint(\"Basically, if you want to have fun with this software, juste use :\")\n\t\t\tprint(\" >> randomize\")\n\t\t\tprint(\" >> doalgo resolve\")\n\t\t\tprint(\"\")\n\t\t\t\n\t\t\t\n\t\telif line.startswith(\"do \"):\n\t\t\tif(line[3:].strip().startswith(\"silent \")): step.newAction(line[3:].strip()[6:], True)\n\t\t\telse: step.newAction(line[3:].strip())\n\t\t\treturn True\n\t\t\t\n\t\telif line.startswith(\"doalgo \"):\n\t\t\tstep.newDoAlgo(line[7:].strip())\n\t\t\treturn False\n\t\t\t\n\t\telif line.startswith(\"print \"):\n\t\t\tstep.newPrint(line[6:].strip())\n\t\t\treturn False\n\t\t\t\n\t\telif line.lower() == \"printcube\":\n\t\t\tstep.newPrintCube()\n\t\t\treturn False\n\t\t\t\n\t\telif line.lower() == \"reset\":\n\t\t\tstep.newReset()\n\t\t\treturn True\n\t\t\t\n\t\telif line.lower().startswith(\"import \"):\n\t\t\tstep.newImport(line[7:].strip())\n\t\t\treturn False\n\t\t\t\n\t\telif line.lower() == \"pause\":\n\t\t\tstep.newPause()\n\t\t\treturn False\n\t\t\t\n\t\telif line.lower().startswith(\"randomize\"):\n\t\t\tif(line[10:].strip().startswith(\"silent\")): step.newRandomize(True)\n\t\t\telse: step.newRandomize()\n\t\t\treturn True\n\t\t\t\n\t\telif line.lower().startswith(\"set\"):\n\t\t\topt = line[3:].split(\"=\")\n\t\t\tstep.newSetOption(opt[0].strip(), opt[1].strip())\n\t\t\t\t\n\t\telif line != '' and line.startswith('#') == False:\n\t\t\tprint(\"Error in \",filename,\":\", lineno,\": Unrecognized syntax\", sep='')\n\t\t\t# print(line)\n\t\t\treturn False\n\t\t\t\n\tdef parsePatterns(self, file):\n\t\tlineno, pat, cont = 0, [], True\n\t\twhile cont:\n\t\t\tstr = ''\n\t\t\tfor i in range(9): str += file.readline()\n\t\t\tlineno += 9\n\t\t\tpat.append(Pattern(str))\n\t\t\ts = file.readline()\n\t\t\tif(s.strip().lower() != \"or\"):\n\t\t\t\tlineno += 1\n\t\t\t\tcont = False\n\t\treturn [lineno, pat]\n\t\t\t\n\tdef do(self, cube):\n\t\tif(self.conditionType == Algorithm.IF):\n\t\t\tm = self.match(cube)\n\t\t\tif m >= 0:\n\t\t\t\tif self.doSteps(cube, m): return True\n\t\t\telif self.elseAlgo != None:\n\t\t\t\tif self.elseAlgo.do(cube): return True\n\t\telif(self.conditionType == Algorithm.WHILE):\n\t\t\tm = self.match(cube)\n\t\t\twhile m >= 0:\n\t\t\t\tif self.doSteps(cube, m): return True\n\t\t\t\tm = self.match(cube)\n\t\telif(self.conditionType == Algorithm.UNTIL):\n\t\t\tm = self.match(cube)\n\t\t\tj = 0\n\t\t\twhile m < 0 and j <= 50:\n\t\t\t\tif self.doSteps(cube, 0): return True\n\t\t\t\tm = self.match(cube)\n\t\t\t\tj += 1\n\t\t\tif j >= 50:\n\t\t\t\tprint(\"Erreur - boucle infinie detectee\")\n\t\t\t\treturn True\n\t\treturn False\n\t\t\t\t\n\tdef doSteps(self, cube, m):\n\t\t\tfor step in self.step:\n\t\t\t\tif isinstance(step, AlgoAction):\n\t\t\t\t\tif step.do(cube, m): return True\n\t\t\t\telse:\n\t\t\t\t\tif step.do(cube): return True\n\t\t\treturn False\n\t\t\t\t\n\tdef match(self, cube):\n\t\tfor p in self.patterns:\n\t\t\tm = p.match(cube)\n\t\t\tif m >= 0: return m\n\t\treturn -1\n\t\t\t\t\nclass AlgoMatch(Algorithm):\n\n\tdef __init__(self, parent, patterns, c, action):\n\t\tself.patterns = patterns\n\t\tself.action = action\n\t\tself.c = c\n\t\t\n\t\tself.parent = parent\n\t\tself.conditionType = Algorithm.IF\n\t\tself.step = []\n\t\tself.elseAlgo = None\n\t\t\n\tdef do(self, cube):\n\t\tm = self.patterns[0].matchOnly(cube, self.c)\n\t\tif m >= 0:\n\t\t\tn = 0\n\t\t\tm = self.patterns[0].match(cube)\n\t\t\twhile m < 0 and n < 4:\n\t\t\t\tcube.do(self.action, m, True)\n\t\t\t\tm = self.match(cube)\n\t\t\t\tn += 1\n\t\t\t\t\n\t\t\tif n != 4:\n\t\t\t\tprint( cube.zDecal(self.action, m, n), ' - ', sep='', end='')\n\t\t\t\tif self.doSteps(cube, m): return True\n\t\t\telif self.elseAlgo != None:\n\t\t\t\tif self.elseAlgo.do(cube): return True\n\t\t\n\t\telif self.elseAlgo != None:\n\t\t\tif self.elseAlgo.do(cube): return True\n\t\t\n\t" }, { "alpha_fraction": 0.7209101319313049, "alphanum_fraction": 0.7353110313415527, "avg_line_length": 35.9361686706543, "blob_id": "60b82f1354fe43a0510decc3b45751a124bea240", "content_id": "aecdb3a3070b0332d81427169de14e83a35136c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3472, "license_type": "no_license", "max_line_length": 492, "num_lines": 94, "path": "/README.md", "repo_name": "SRLKilling/interactive-rubiks", "src_encoding": "UTF-8", "text": "# Interactive Rubik's\n\nThis project is an old project I made when I was learning Python.\nIt is basically a shell that allows playing with a rubik's cube.\nIt is patched with an homemade algorithm language that allows easily creating solving algorithms that adapt to the cube.\nHere is a little demo :\n\n![Demo](https://media.giphy.com/media/srcwknEXv8SOY/giphy.gif)\n\n\n## Usage\n\nThe setup is fairly easy, you just need to install colorama\n```\npip install colorama\n```\n\nAnd then, start the program using\n```\npython interactive-rubiks.py\n```\n\n## Shell commands\n\nHere is the list of commands the shell accepts :\n\n* `help` will display the list of commands.\n* `exit` will quit the prompt\n* `do` followed by a space-separated sequence of moves, will update the cube according to the given sequence. \nThe moves are defined as in standard rubiks algorithms.\n* `import` followed by a name such that `name.algo` will be loaded into the algorithm set.\n* `doalgo` followed by the name of an already-imported algorithm.\n* `reset` will refresh the cube to its initial state\n* `set <name> = <val>` to set one of the following option :\n * `interactive = true/false`. Will make algorithm pause during the process, so you can do moves on you're own cube.\n * `coloredOutput = true/false`. Will display colors instead of numbers.\n * `verbose = 2/1/0`. Will respectively display moves and cube, only cube, or nothing.\n* `randomize` to generate a random cube. If not followed by `silent`, it will display the move sequence so you can reproduce it on your own cube.\n\n## Algorithm language\n\nThe heart of the project is its algorithm language. It allows easily defining scalable algorithms depending on the state of the cube.\nThe language is a one command per line language.\n\nBasic commands are :\n\n* `print` followed by a string to be printed\n* `printCube` to print the cube in its current state\n* `pause` will pause the algorithm, waiting for the user output\n* `import` and `doalgo`\n* `do` (can be followed by `silent` if you don't want to output the sequence)\n\nBut most importantly, there are conditionnal blocks.\nConditions are expressed using cube patterns.\nFor exemple, this is a pattern :\n```\n\t\t\t\t\t\t1 1 1\n\t\t\t\t\t\t1 1 1\n\t\t\t\t\t\t1 1 _\n\t\t\t\t4 4 4\t5 5 _\t_ 2 2\t0 0 0\n\t\t\t\t4 4 4\t5 5 5\t2 2 2\t0 0 0\n\t\t\t\t4 4 _\t_ 5 _\t_ 2 2\t0 0 0\n\t\t\t\t\t\t_ 3 _\n\t\t\t\t\t\t3 3 3\n\t\t\t\t\t\t3 3 3\n```\nNumbers are used to represent cells that have the same color. Underscore are used to describe any color.\n\nPatterns are rotation insensitive. When we're trying to match a pattern, we try it 4 times rotating the cube around the z axis.\nThen, when matched, the rotation is remembered and every action that follows is re-adapted to the rotation.\n\nYou can also mix multiple patterns separating them with `or` line.\n\nNow this is how you use a conditionnal block :\n```\nif/match x doing y\n condition\n commands\n[elseif/elseif-match doing y\n commands]*\n[else\n commands]?\nend\n```\n\nThe difference between match and if, is that `match x doing y` will first try matching only the number `x` doing repetedly action `y`. Each time it happens, we will then check that the whole pattern is matched. This allows syncing different faces. For exemple, if you know how to move a square on the back face to the front face, maybe you'd like to first move the square from the back face so it is at a good position for you to execute your algorithm, and move it to the right front square.\n\nYou can also use loops :\n```\nuntil/while\n condition\n commands\nend\n```\n" } ]
5
ofBits/pyAesCrypt
https://github.com/ofBits/pyAesCrypt
4055a3ecdb714bf993251e2cdf81a7c57f877789
45633ceacbafb489d0a03fd37d963956afd5edc2
4a1e8cdc164c50af6e888bcb019f0d2deb30751d
refs/heads/master
2020-03-19T04:20:37.219394
2018-05-26T10:00:16
2018-05-26T10:00:16
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8444444537162781, "alphanum_fraction": 0.8444444537162781, "avg_line_length": 44, "blob_id": "fc6cbfad0d1cbaf6c2dc8f01444ca80eebfa48fb", "content_id": "ad3217a91e080e5cc1fde4ab9d52a64a6c699d0f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "permissive", "max_line_length": 44, "num_lines": 1, "path": "/pyAesCrypt/__init__.py", "repo_name": "ofBits/pyAesCrypt", "src_encoding": "UTF-8", "text": "from .crypto import encryptFile, decryptFile\n" }, { "alpha_fraction": 0.7000521421432495, "alphanum_fraction": 0.7146583199501038, "avg_line_length": 33.23214340209961, "blob_id": "16d5cf711361c9d318d0fbea4b6fb7d14ff2dd7c", "content_id": "4c5774c60d519cb3a93579e077f620919da08e59", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1917, "license_type": "permissive", "max_line_length": 149, "num_lines": 56, "path": "/README.rst", "repo_name": "ofBits/pyAesCrypt", "src_encoding": "UTF-8", "text": "pyAesCrypt\n===============\n.. image:: https://travis-ci.org/marcobellaccini/pyAesCrypt.svg?branch=master\n :target: https://travis-ci.org/marcobellaccini/pyAesCrypt\n\nAbout pyAesCrypt\n--------------------------\npyAesCrypt is a Python 3 file-encryption module and script that uses AES256-CBC to encrypt/decrypt files.\n\npyAesCrypt is compatible with the `AES Crypt`_ `file format`_ (version 2).\n\nThe script is Free Software, released under the `Apache License, Version 2.0`_.\n\npyAesCrypt is brought to you by Marco Bellaccini - marco.bellaccini(at!)gmail.com.\n \nIMPORTANT SECURITY NOTE: version 2 of the AES Crypt file format does not authenticate the \"file size modulo 16\" byte. This implies that an attacker \nwith write access to the encrypted file may alter the corresponding plaintext file size by up to 15 bytes.\n\nNOTE: there is no low-level memory management in Python, hence it is not possible to wipe memory areas were sensitive information was stored.\n\nModule usage example\n------------------------\nHere is an example showing encryption and decryption of a file:\n\n.. code:: python\n\n import pyAesCrypt\n # encryption/decryption buffer size - 64K\n bufferSize = 64 * 1024\n password = \"foopassword\"\n # encrypt\n pyAesCrypt.encryptFile(\"data.txt\", \"data.txt.aes\", password, bufferSize)\n # decrypt\n pyAesCrypt.decryptFile(\"data.txt.aes\", \"dataout.txt\", password, bufferSize)\n\nScript usage examples\n------------------------\nEncrypt file test.txt in test.txt.aes:\n\n\tpyAesCrypt -e test.txt\n\nDecrypt file test.txt.aes in test.txt:\n\n\tpyAesCrypt -d test.txt.aes\n\t\nEncrypt file test.txt in test2.txt.aes:\n\n\tpyAesCrypt -e test.txt -o test2.txt.aes\n\nDecrypt file test.txt.aes in test2.txt:\n\n\tpyAesCrypt -d test.txt.aes -o test2.txt\n\n.. _AES Crypt: https://www.aescrypt.com\n.. _file format: https://www.aescrypt.com/aes_file_format.html\n.. _Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0\n" } ]
2
l0ka/SmithWaterman
https://github.com/l0ka/SmithWaterman
f157814b445cdd7d3bd3b03c877ff980cbfcccc2
26afc024f49f2de2b748f6f3d0e386aa6ed6cf8e
84ebff494e4df02323b076f42bbe127a735b6d5a
refs/heads/master
2021-01-19T15:18:04.880472
2017-08-21T14:06:32
2017-08-21T14:06:32
100,958,615
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.43041321635246277, "alphanum_fraction": 0.4558677673339844, "avg_line_length": 34.17441940307617, "blob_id": "b57254e43da662dac2de352f9b26885956909f67", "content_id": "2dfd90400f12d9b926b4fe9c27c0d266c37de46e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3025, "license_type": "no_license", "max_line_length": 83, "num_lines": 86, "path": "/s_w.py", "repo_name": "l0ka/SmithWaterman", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# [email protected]\n\n###############################################################################\n## Building blocks\n# seqA, seqB --> 1. input sequences\n# match, mismatch, gap --> 2. scoring function\n# rows, columns --> 3. two matrices: scores and paths \n# highest score --> 4. starting position for traceback\n# traceback --> 5. alignment(s)\n\n###############################################################################\n## Get input sequences\nimport sys\nif len(sys.argv) == 3:\n seq1, seq2 = sys.argv[1], sys.argv[2]\nelse:\n print 'Please provide exactly 2 DNA sequences as input.\\n', 'Exit!\\n'\n sys.exit()\n\n## Check if the input strings are valid DNA sequences\nvalid = 'ATCG'\nif any(i not in valid for i in seq1 + seq2):\n print 'Please provide 2 valid DNA sequences as input.\\n', 'Exit!\\n'\n sys.exit()\n\n## Define scoring function \nmatch = 3\nmismatch = -3\ngap = -2\n\n###############################################################################\n## Implement Smith-Waterman algorithm\ndef s_w(seqA, seqB):\n ## Initialize variables and matrices\n cols = len(seqA)\n rows = len(seqB)\n matrix = [[0 for row in range(rows+1)] for col in range(cols+1)]\n paths = [[0 for row in range(rows+1)] for col in range(cols+1)]\n max_score = 0\n s1, s2 = [], []\n ## Fill the scoring matrix\n for i in range(cols):\n for j in range(rows):\n if seqA[i] == seqB[j]:\n diag = matrix[i][j] + match\n else:\n diag = matrix[i][j] + mismatch\n up = matrix[i + 1][j] + gap\n left = matrix[i][j + 1] + gap\n score = max(0,diag, up, left)\n matrix[i+1][j+1] = score\n if score > max_score:\n max_score = score\n start_pos = [i+1, j+1]\n ## Fill the paths matrix\n if matrix[i+1][j+1] == diag and matrix[i+1][j+1] != 0:\n paths[i+1][j+1] = 'diag'\n elif matrix[i+1][j+1] == up and matrix[i+1][j+1] != 0:\n paths[i+1][j+1] = 'up'\n elif matrix[i+1][j+1] == left and matrix[i+1][j+1] != 0:\n paths[i+1][j+1] = 'left'\n ## Traceback\n i, j = start_pos\n start_path = paths[i][j]\n while start_path != 0:\n if start_path == 'diag':\n s1.append(seqA[i-1])\n s2.append(seqB[j-1])\n i, j = i-1, j-1\n elif start_path == 'up':\n s1.append('-')\n s2.append(seqB[j-1])\n j = j-1\n else:\n s1.append(seqA[i-1])\n s2.append('-')\n i = i-1\n start_path = paths[i][j]\n ## Return the optimal local alignment\n return ''.join(list(reversed(s1))), ''.join(list(reversed(s2)))\n\n############################################################################### \n## Run the function and print the result\naln1, aln2 = s_w(seq1, seq2)\nprint 'Optimal local alignment\\n', aln1, '\\n', aln2, '\\n'\n" }, { "alpha_fraction": 0.8455284833908081, "alphanum_fraction": 0.8455284833908081, "avg_line_length": 60.5, "blob_id": "47b7af860d3a57705db773be003222d78e8ebdf9", "content_id": "1be2ff33e9c53ef0f60f964d29bb5fdbd0e473cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 123, "license_type": "no_license", "max_line_length": 106, "num_lines": 2, "path": "/README.md", "repo_name": "l0ka/SmithWaterman", "src_encoding": "UTF-8", "text": "# SmithWaterman\nA basic Python implementation of the Smith-Waterman algorithm for local alignment of nucleotide sequences.\n" } ]
2
twiskle/iris-statistical-analysis
https://github.com/twiskle/iris-statistical-analysis
4df00430fa0f5882d6ac4661f549bddbd75fed51
880429b134154cfcc8ccb2bfa22b181c19ce54a1
7980c7f7b9bdac1e9eb5bc7b157f7e67a511e38b
refs/heads/master
2020-08-05T18:23:27.851422
2019-10-03T19:11:18
2019-10-03T19:11:18
212,653,547
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6303805112838745, "alphanum_fraction": 0.663755476474762, "avg_line_length": 26.51502227783203, "blob_id": "5094e56240cffa4cef56db9f873fb319970a5950", "content_id": "ec6cfcd98d208a447bf192758dbb3e2cf3117dce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6412, "license_type": "no_license", "max_line_length": 94, "num_lines": 233, "path": "/iris_data_analysis.py", "repo_name": "twiskle/iris-statistical-analysis", "src_encoding": "UTF-8", "text": "## Python3\n## Statistical Analysis on Iris dataset\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nimport scipy.stats as stats\nimport plotnine as p9\n#import statsmodels as sm\n\n\n\n# Load some data\n# iris: 3 different species: Setosa, Versicolour, and Virginica\n\niris = datasets.load_iris()\n\n\n# transform to dataframe\niris_df = pd.DataFrame(iris['data'], columns=iris['feature_names'])\n\n#print (iris_df.head(5))\n#print (iris_df.shape)\n#print (iris_df.info())\n#print (iris_df.describe())\n\niris_df['species'] = iris['target']\n\n\n## Scatter matrix\n#pd.scatter_matrix(iris_df, alpha=0.2, figsize=(10, 10))\n#plt.show()\n\nprint(\"unique labels in the dataset: {}\".format(iris_df.species.unique()))\n\n\n## If need to re-label the target variable into numerical values\n'''\nmapping_dict = {0: 1, 1: 2, 2: 3}\niris_df['species'].map(mapping_dict)\nprint(\"unique labels in the dataset after mapping: {}\".format(iris_df.species.unique()))\nprint('done mapping')\ninput()\n'''\n\n\n# Print density plot, mean, median, and mode\nprint(p9.ggplot(iris_df)+ p9.aes(x='sepal length (cm)')+ p9.geom_density(alpha=0.2))\nprint(iris_df.mean())\nprint(iris_df.median())\nprint(iris_df.mode())\n\n\n# Calculate theoretical quantiles\ntq = stats.probplot(iris_df['sepal length (cm)'], dist=\"norm\")\n\n# Create Dataframe\ndf_temp = pd.DataFrame(data= {'Theoretical Quantiles': tq[0][0], \n \"Ordered Values\": iris_df['sepal length (cm)'].sort_values() })\n\n# Create Q-Q plot\nprint(p9.ggplot(df_temp)+ p9.aes('Theoretical Quantiles','Ordered Values') +p9.geom_point())\n\n\n\n# Extract data with particular label (target)\n#a = iris_df[['sepal length (cm)','species']]\na_sl = iris_df.loc[iris_df['species']== 0,'sepal length (cm)']\na_pl = iris_df.loc[iris_df['species']== 0,'petal length (cm)']\n\n\nprint(iris_df['sepal length (cm)'])\n\n#Extract entire dataframe based on one type of flower\nnum_species = 0\na_df = iris_df.loc[iris_df['species']== num_species,]\n\n\nprint(\"dataframe info of species {}\".format(num_species))\na_df.info()\n\n\n## Scatter plot\nimport matplotlib.pyplot as plt\n\ncdict = {0: 'red', 1: 'blue', 2: 'green'}\nfig, axs = plt.subplots(1,2)\nfor g in iris_df.species.unique():\n\ta_sl = iris_df.loc[iris_df['species']== g,'sepal length (cm)']\n\ta_pl = iris_df.loc[iris_df['species']== g,'petal length (cm)']\n\taxs[0].scatter(a_sl, a_pl, c=cdict[g], label=g, s = 50)\naxs[0].set_xlabel(\"sepal length (cm)\")\naxs[0].set_ylabel(\"petal length (cm)\")\naxs[0].legend()\n\nfor g in iris_df.species.unique():\n\ta_sl = iris_df.loc[iris_df['species']== g,'sepal length (cm)']\n\ta_sw = iris_df.loc[iris_df['species']== g,'sepal width (cm)']\n\taxs[1].scatter(a_sl, a_sw, c=cdict[g], label=g, s = 50)\naxs[1].set_xlabel(\"sepal length (cm)\")\naxs[1].set_ylabel(\"sepal width (cm)\")\naxs[1].legend()\n\n\n## Check to see if there's a linear correlation between...\n\n# Sepal and Petal Legnth\na_sl = iris_df.loc[iris_df['species']== 0,'sepal length (cm)']\na_pl = iris_df.loc[iris_df['species']== 0,'petal length (cm)']\n\npearcorr_1 = stats.pearsonr(a_sl,a_pl)\nprint(\"Flower 0: Correlation between Sepal Length and Petal Length: {}\".format(pearcorr_1[0]))\nprint(\"p-value is: {}\".format(pearcorr_1[1]))\n\n# Sepal Legnth and Width\na_sl = iris_df.loc[iris_df['species']== 0,'sepal length (cm)']\na_sw = iris_df.loc[iris_df['species']== 0,'sepal width (cm)']\n\npearcorr_2 = stats.pearsonr(a_sl, a_sw)\nprint(\"Flower 0: Correlation between Sepal Length and Sepal Width: {}\".format(pearcorr_2[0]))\nprint(\"p-value is: {}\".format(pearcorr_2[1]))\n\n\n\n## Classifications:\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\ny = iris_df['species'].values\nX = iris_df[['sepal length (cm)','petal length (cm)']].values\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, \n\trandom_state=7, stratify=y)\n\n\n## Support Vector Classifier\nprint(\"Support Vector Classifier:\")\n\nfrom sklearn.svm import SVC\nmodel1 = SVC(kernel='linear', C=0.5, decision_function_shape='ovr')\nmodel1.fit(X_train,y_train)\nprint('Accuracy: {}'.format(model1.score(X_test,y_test)))\n\ny_pred = model1.predict(X_test)\n\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nprint(\"Confusion Matrix: \\n{}\\n\".format(confusion_matrix(y_test, y_pred)))\nprint(\"Classification Report: \\n{}\\n\".format(classification_report(y_test, y_pred)))\n\n'''output:\nConfusion Matrix:\n[[10 0 0]\n [ 0 10 0]\n [ 0 1 9]]\nClassification Report:\n precision recall f1-score support\n\n 0 1.00 1.00 1.00 10\n 1 0.91 1.00 0.95 10\n 2 1.00 0.90 0.95 10\n\n accuracy 0.97 30\n macro avg 0.97 0.97 0.97 30\nweighted avg 0.97 0.97 0.97 30\n'''\n\n\n## Cross-validation\nfrom sklearn.model_selection import cross_val_score\n\n# Compute 5-fold cross-validation scores: cv_scores\ncv_scores = cross_val_score(model1,X_train,y_train,cv=5)\n\n# Print the 5-fold cross-validation scores\nprint(cv_scores)\n\nprint(\"Average 5-Fold CV Score: {}\".format(np.mean(cv_scores)))\n'''output:\nAverage 5-Fold CV Score: 0.9666666666666668\n'''\n\n\n## KMeans Clustering\nprint(\"KMeans Clustering, using sepal length and petal length:\")\n\npoints = np.concatenate( (iris_df['sepal length (cm)'].values.reshape(-1,1), \n\tiris_df['petal length (cm)'].values.reshape(-1,1)),axis=1 )\n\nfrom sklearn.cluster import KMeans\nmodel2 = KMeans(n_clusters=3)\nmodel2.fit(points)\n\n# Determine the cluster labels of new_points: labels\nlabels = model2.predict(points)\n\n\n# Assign the cluster centers: centroids\ncentroids = model2.cluster_centers_\n\n# Assign the columns of centroids: centroids_x, centroids_y\ncentroids_x = centroids[:,0]\ncentroids_y = centroids[:,1]\n\n\n## Scatter plot\nimport matplotlib.pyplot as plt\n\nfig2, ax2 = plt.subplots()\nax2.scatter(points[:,0],points[:,1],c=iris_df.species.values,alpha=0.5)\nax2.scatter(centroids_x,centroids_y,marker='D',s=100)\nax2.set_xlabel(\"sepal length (cm)\")\nax2.set_ylabel(\"petal length (cm)\")\n#ax2.legend()\n\nplt.show()\n\n\n# Create a DataFrame with labels and iris_df['species'] as columns\ndf = pd.DataFrame({'labels': labels, 'targets': iris_df['species'] })\n\n# Create crosstab: ct\nct = pd.crosstab(df['labels'],df['targets'])\nprint(ct)\n\n'''output:\ntargets 0 1 2\nlabels \n0 0 45 13\n1 50 1 0\n2 0 4 37\n'''\n\n" }, { "alpha_fraction": 0.8214285969734192, "alphanum_fraction": 0.8214285969734192, "avg_line_length": 27, "blob_id": "bcaf3fb9ce9a5ea95299720ccb847d7ef2dd71de", "content_id": "ad7250e097839b3f9f6c1256389ad6e8ff7b224b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28, "license_type": "no_license", "max_line_length": 27, "num_lines": 1, "path": "/README.md", "repo_name": "twiskle/iris-statistical-analysis", "src_encoding": "UTF-8", "text": "# iris-statistical-analysis\n" } ]
2
joseflauzino/management_files
https://github.com/joseflauzino/management_files
675a67dab35c4c29ba33ada868e7dd46d4df4f05
25bb21afddf7ad715fc61a0d0df33ac141e091df
a5558087a5d9e574ee6e53f845f8511fc305156a
refs/heads/master
2020-05-16T16:15:57.593839
2019-04-30T13:18:18
2019-04-30T13:18:18
183,155,401
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7368420958518982, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 18.5, "blob_id": "37ce752c3f1bce1028e3a76a4a60d04f9ee1f483", "content_id": "348e990bf065c9e463a983f7a3e95102490daed9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 38, "license_type": "no_license", "max_line_length": 27, "num_lines": 2, "path": "/run.sh", "repo_name": "joseflauzino/management_files", "src_encoding": "UTF-8", "text": "#/bin/bash\npython /management_api.py &" }, { "alpha_fraction": 0.6122850179672241, "alphanum_fraction": 0.6203930974006653, "avg_line_length": 29.601503372192383, "blob_id": "9e715abfa206fdce5f959af9dc964f739a62f486", "content_id": "eaa804b52e90dfdf75a36b2cccea39f4a48aefd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4070, "license_type": "no_license", "max_line_length": 267, "num_lines": 133, "path": "/management_api.py", "repo_name": "joseflauzino/management_files", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport json\nfrom flask import jsonify\nfrom flask import request\nfrom eve import Eve\nfrom eve.auth import BasicAuth, requires_auth\nfrom subprocess import check_output, call\nimport time\n\n# verify status of os.system call\nverify_status = lambda status: True if status == 0 else False\nos.system('touch status | chmod +x status') # Create status file\n\nFENDE_PATH = '/opt/fende/'\napp = Eve()\n\[email protected]('/click_plugin/event/<event_name>', methods=['POST'])\ndef function_event(event_name):\n \"\"\"Execute lifecycle events\"\"\"\n with open('Management/management.json') as f:\n data = json.load(f)\n\n for i in range(0,len(data['lifecycle_events'])):\n if(event_name == data['lifecycle_events'][i]['event']):\n event_path = data['lifecycle_events'][i]['lifecycle_events'][0] # Return the path of the event requested\n\n status = os.system(\"./Management/%s\" % event_path)\n return str(verify_status(status))\n\n\[email protected]('/click_plugin/install', methods=['POST'])\ndef install_function():\n \"\"\"Execute INSTALL script\"\"\"\n status = os.system(\"./Management/Scripts/install.sh\")\n return str(verify_status(status))\n\[email protected]('/click_plugin/start', methods=['POST'])\ndef start_function():\n \"\"\"Execute START script\"\"\"\n status = os.system(\"./Management/Scripts/start.sh\")\n \n if verify_status(status):\n\tis_running = open('status','w')\n\tis_running.write(\"1\")\n\tis_running.close()\n\n return str(verify_status(status))\n \[email protected]('/click_plugin/stop', methods=['POST'])\ndef stop_function():\n \"\"\"Execute STOP script\"\"\"\n status = os.system(\"./Management/Scripts/stop.sh\")\n \n if verify_status(status):\n\tis_running = open('status','w')\n\tis_running.write(\"0\")\n\tis_running.close()\n\t\n return str(verify_status(status))\n\[email protected]('/click_plugin/log', methods=['GET'])\ndef get_log():\n \"\"\"Return VNF logs\"\"\"\n with open('Management/management.json') as f:\n data = json.load(f)\n \n log_path = \"/var/log/syslog\" # Default log path\n if 'log_file' in data:\n\tlog_path = data['log_file'] # Log path defined by developer\n\n content = os.popen(\"tail -n 100 %s\" % log_path).read() # Return last 100 log lines\n return content\n \n\[email protected]('/click_plugin/running', methods=['GET'])\ndef get_running():\t\n \"\"\"Return if VNF is running\"\"\"\n is_running = open(\"status\", \"r\")\n status = is_running.read().rstrip()\n if status == '1': status = 0\n return str(verify_status(status))\n\n# Require tests\[email protected]('/click_plugin/write_file', methods=['POST'])\ndef write_file():\n repo = 'repo.zip'\n with open(repo, 'wb') as f:\n\tf.write(request.data)\n status = os.system('unzip repo.zip')\n return str(verify_status(status))\n\[email protected]('/click_plugin/metrics', methods=['GET'])\ndef get_metrics():\n memory = get_memory_usage()\n cpu = get_cpu_usage()\n rx = get_bandwidth_usage('rx')\n tx = get_bandwidth_usage('tx')\n return jsonify({\"time_ms\": 0, \"list\": [{\"id\": 0, \"value\": cpu, \"name\": \"CPU Usage\"},{\"id\": 1, \"value\": 0, \"name\": \"Disk Usage\"},{\"id\": 2, \"value\": memory, \"name\": \"Memory Usage\"},{\"id\": 3, \"value\": tx, \"name\": \"Net TX\"},{\"id\": 4, \"value\": rx, \"name\": \"Net RX\"}]})\n\ndef get_memory_usage():\n try:\n # execute 'free' command to get memory usage\n cmd = check_output(['free', '-t', '-m'])\n # get unused memory value (without swap)\n memory_usage_values = int(cmd.split('\\n')[1].split()[1:][2])\n return memory_usage_values\n except:\n return None\n\ndef get_cpu_usage():\n try:\n with open('/resources/cpu_usage', 'r') as f_cpu:\n cpu_usage_values = f_cpu.readlines()[-1].replace('\\n', '')\n return cpu_usage_values\n except:\n return None\n\ndef get_bandwidth_usage(output):\n try:\n with open('/resources/bandwidth_usage', 'r') as f_bw:\n rx, tx = f_bw.readlines()[-1].replace('\\n', '').split(' ')\n\tif (output == 'rx'):\n\t return rx\n\telse:\n\t return tx\n except:\n return None\n\n\nif __name__=='__main__':\n app.run(host='0.0.0.0', port=8000)\n" }, { "alpha_fraction": 0.7061994671821594, "alphanum_fraction": 0.7088949084281921, "avg_line_length": 32.818180084228516, "blob_id": "6e1c6e049268a849077518bb91c7abc101f652ac", "content_id": "cd956d37ac2e8a33e2ee460e931659e51a628417", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 371, "license_type": "no_license", "max_line_length": 98, "num_lines": 11, "path": "/install.sh", "repo_name": "joseflauzino/management_files", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# Instalando dependencias\napt-get -y install net-tools openssl zip unzip python python-pip && pip install flask eve requests\n\n# Criando usuario fende\n#name=\"fende\"\n#password=\"fende\"\n#useradd -m -d /home/$name -p $(openssl passwd -1 $password) -s /bin/bash -g sudo $name\n#chown -R $name /home/$name\n#sed -i \"/^root/ a $name ALL=NOPASSWD:ALL\" /etc/sudoers" } ]
3
SDSS-Computing-Studies/005a-tuples-vs-lists-karbstar
https://github.com/SDSS-Computing-Studies/005a-tuples-vs-lists-karbstar
ec67c8225a1b6ed87b26ab8a93d7f83e39ab415a
0b4d0a56e65f1b79e6a4c8d04425bbe4fe74363d
21cc5157395e509c15ad8b192413f72933e711b2
refs/heads/master
2023-08-23T04:14:49.867174
2021-10-12T21:48:15
2021-10-12T21:48:15
414,753,202
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6694915294647217, "alphanum_fraction": 0.6935028433799744, "avg_line_length": 18.63888931274414, "blob_id": "4d4bbfd435013ac0dcc4208d35d7a3a377539f32", "content_id": "8c1181be96f66c6c9b4ded885abafdf9aa57243d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 708, "license_type": "no_license", "max_line_length": 55, "num_lines": 36, "path": "/problem3.py", "repo_name": "SDSS-Computing-Studies/005a-tuples-vs-lists-karbstar", "src_encoding": "UTF-8", "text": "#! python3\n\"\"\"\nAsk the user to enter positive integers.\nAfter each entry, add the number to a list\nIf the entry is -1 then stop adding numbers to the list\nSort the list and display the highest number added\n\ninputs:\nas many integers as needed\n\noutputs:\nDisplay the largest number:\n\nexamples:\nEnter an integer:3\nEnter an integer:2\nEnter an integer:8\nEnter an integer:92\nEnter an integer:48\nEnter an integer:13\nEnter an integer:24\nEnter an integer:-1\n\nThe largest number you entered is 92\n\"\"\"\ntim=True\nlst=[]\ny=0\nwhile tim == True:\n x=int(input(\"enter a number=>\"))\n if x >= y :\n lst.append(x)\n else:\n tim=False\n d=max(lst)\n print(f\"The largest number you entered is {d}\")\n\n" } ]
1
Jerommaas/Thunderstruck
https://github.com/Jerommaas/Thunderstruck
f2dc727b0fc82069781db7dc604059e875eee301
d4fa1a096a2c8eb8d5374196f3ad2e9b46c6c1b4
d24de4a3597defcdd7a9cc71f13e82c490cad415
refs/heads/master
2020-03-25T11:52:33.856451
2018-11-05T22:52:17
2018-11-05T22:52:17
143,752,094
0
1
null
2018-08-06T16:02:39
2018-11-04T22:18:47
2018-11-05T22:52:18
Python
[ { "alpha_fraction": 0.5313289761543274, "alphanum_fraction": 0.5373926162719727, "avg_line_length": 28.08823585510254, "blob_id": "5f031c4ef54210d56f39d5731a33e0b00b67d6cb", "content_id": "775284cc9660ccf59f76970f29148e38e8aadfd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3958, "license_type": "no_license", "max_line_length": 79, "num_lines": 136, "path": "/Tools/terrain_generator/pandaWorld.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "\n\n\nimport sys\nimport inspect\n\n# Panda \nfrom panda3d.core import *\nfrom direct.showbase.ShowBase import ShowBase\n\n# local\nimport main\nfrom data_object import *\nfrom camera import Camera\n\n\nP3D_WIN_WIDTH = 720\nP3D_WIN_HEIGHT = 560\n\n#\n# This class manages all objects in the scene, loads/saves, etc.\n#\n\nimport json\n\n\ndef str_to_class(str):\n return reduce(getattr, str.split(\".\"), sys.modules[__name__])\n\nclass PandaLoader(object):\n def __init__(self, world):\n self.world = world \n self.objects = []\n self.events = [] # list of user actions (add, move, edit object etc.)\n\n def load_scene(self, file):\n print( \"PandaLoader.load_scene():\\n\\t{}\".format( file ) )\n self.world.loadInitialEnv() # TEMP\n world = self.world # shorthand\n with open(file) as f:\n data = json.load(f)\n self.json_data = data\n world.name = data.get('name', '<world name>') \n world.version = data.get('version', 0) \n json_objects = data[\"objects\"]\n for obj in json_objects:\n # try to find this type\n try: \n subtype = globals()[obj[\"type\"] ] \n except:\n print( \"unknown type: {}\".format(obj[\"type\"]) )\n continue\n\n # check if this class is a subclass of data_object\n if issubclass(subtype, data_object):\n instance = subtype( obj[\"data\"] )\n instance.name = obj[\"name\"] \n self.objects.append(instance)\n else:\n print(\"type is not a subclass of data_type!\")\n \n # TODO(victor): super hacky, los dit goed op\n for obj in self.objects: \n if hasattr( obj, \"model\"):\n obj_file = obj.model\n if obj_file:\n print( \"loading model: {}\".format(obj_file) )\n model = loader.loadModel(obj_file) \n model.reparentTo(self.world.render)\n\n\n def save_scene(self, file):\n print( \"PandaLoader.save_scene():\\n\\t{}\".format( file ) ) \n world = self.world\n data = self.json_data\n data[\"version\"] = world.version+1\n data[\"name\"] = world.name\n data[\"objects\"] = []\n\n print( \"saving objects\")\n for obj in self.objects:\n json_data = dict()\n json_data[\"name\"] = obj.name\n json_data[\"type\"] = type(obj).__name__\n json_data[\"data\"] = obj.save()\n data[\"objects\"].append( json_data )\n \n with open(file, \"w\") as f:\n json.dump(data, f, indent=4)\n\n def load_object(self, file):\n obj = data_object.data_object()\n self.objects.append( obj )\n return obj\n\n\n \n#\n# this class constructs the panda frame, used for visualizing the current world\n# \n\nclass World(ShowBase): \n def __init__(self):\n ShowBase.__init__(self) \n self.loader = PandaLoader(self) \n self.accept(\"escape\", sys.exit)\n\n # add camera object\n self.cam = Camera(self)\n \n # fields from save file\n self.name = \"<world name>\"\n self.version = 0\n \n\n def loadInitialEnv(self):\n # Load the environment model.\n self.scene = loader.loadModel(\"environment\")\n # Reparent the model to render.\n self.scene.reparentTo(self.render)\n # Apply scale and position transforms on the model.\n s = 0.02\n self.scene.setScale(s,s,s)\n # self.scene.setPos(-8, 42, 0)\n \n def step(self):\n taskMgr.step()\n \n def bindToWindow(self, windowHandle):\n wp = WindowProperties().getDefault()\n wp.setOrigin(0,0)\n wp.setSize(P3D_WIN_WIDTH, P3D_WIN_HEIGHT)\n wp.setParentWindow(windowHandle)\n base.openDefaultWindow(props=wp )\n self.wp = wp\n\n\nif __name__ == \"__main__\":\n main.main()" }, { "alpha_fraction": 0.6313174962997437, "alphanum_fraction": 0.6352052092552185, "avg_line_length": 31.605634689331055, "blob_id": "2ad2509e6591300a4f4c41c00f134a65c151ab90", "content_id": "3a3708906970eb46110ef3b44de668cdd60d1de7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4630, "license_type": "no_license", "max_line_length": 113, "num_lines": 142, "path": "/Tests/victor_client_server/NetworkObject.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "\n'''\nClass containing all functions for sharing an object state between client and server, in either direction\n\nThis class has no knowledge of weither it is client or server side, \nnor in which direction the info is flowing\n'''\n\nimport json\n\n\nclass NetworkObject(object):\n \n MAX_NETWORK_OBJECTS_COUNT = 1000\n\n objectList = dict() # all objects, sending and receiving\n\n pendingObjectList = [] # newly created objects, will be removed by network manager \n networkObjectId = 0 # hold the id of any new NetworkObject\n\n def __init__(self): \n self.class_name = self.__class__.__name__ \n # NOTE: make sure derived objects can be default constructed! (no arguments in init except optional ones)\n NetworkObject.addObject(self)\n\n @staticmethod\n def addObject(obj):\n # TODO(victor): looping over all objects is not efficient, but will work for now \n NetworkObject.pendingObjectList.append(obj)\n for i in range( NetworkObject.MAX_NETWORK_OBJECTS_COUNT ):\n if not ( i in NetworkObject.objectList ):\n # id is not yet used\n NetworkObject.objectList[i] = obj\n break\n else:\n print(\"NetworkObject.addObject(): no index available!\")\n\n def toConstructMessage(self):\n # convert this object to some string representation\n return \"\"\n\n def fromConstructMessage(self, datastring):\n # convert a string representation into an object\n pass\n\n def toUpdate(self):\n # returns a string if an update was requried, \n # this string contains the update data\n # otherwise, returns None\n return None\n\n def fromUpdate( self, datastring ):\n # overwrite in derived class, apply updates to variables\n # by default, simply apply the json data to the dict\n # objects which are sending lots of updates might need a more effcient method\n self.__dict__.update( json.loads(datastring) ) \n\n\n def remove(self):\n # TODO(vicdie): alert the NetworkManager that this object needs to be removed\n raise NotImplementedError(\"NetworkObject.remove() not implemented!\")\n\n\nclass NetworkEvent(object):\n def __init__(self, data=None): \n self.class_name = self.__class__.__name__\n if data:\n self.fromMessage(data)\n def toMessage(self):\n return self.__dict__ \n def fromMessage(self, data):\n self.__dict__.update( data )\n\n'''\nTestclasses for NetworkObject and NetworkEvent\n'''\n \nclass NetworkPerson(NetworkObject):\n def __init__(self, name=\"<geen idee>\", age=0, posx=0, posy=0 ):\n # NOTE: this object is default constructable, none of the arguments is required\n super().__init__()\n \n self.name = name \n self.age = age\n self.posx = posx\n self.posy = posy\n \n def toConstructMessage(self):\n # convert this object to some dict representation \n return self.__dict__ \n\n def fromConstructMessage(self, data):\n # convert a dict representation into an object\n self.__dict__.update(data) \n\n def toUpdateMessage(self):\n # returns a dict containing the data if an update for this object was requried, \n # otherwise, returns None\n return { \"age\": self.age}\n\n def fromUpdateMessage( self, data ):\n # overwrite in derived class, apply updates to variables\n self.__dict__.update( data )\n\n def __repr__(self):\n return \"<networkPerson: name:{}, age:{}, x:{}, y:{}>\".format(self.name, self.age, self.posx, self.posy)\n\nclass eventTextmessage(NetworkEvent):\n def __init__(self, message=None, data=None):\n super().__init__(data=data)\n if data==None:\n # only set variables if no message data was used\n self.message = message\n def __repr__(self):\n return \"<eventTextMessage: {}>\".format(self.message)\n def toMessage(self):\n return self.__dict__ \n def fromMessage(self, data):\n self.__dict__.update( data )\n\n\n\n\nif __name__ == \"__main__\":\n\n serverPerson = NetworkPerson( name=\"Henk\", age=27, posx=3.14, posy=2.71 )\n message = serverPerson.toConstructMessage()\n clientPerson = NetworkPerson()\n clientPerson.fromConstructMessage( message )\n\n print( \"\\n=== construct ===\" )\n print( serverPerson )\n print( message )\n print( clientPerson )\n\n serverPerson.age = 28\n updateMessage = serverPerson.toUpdateMessage() \n clientPerson.fromUpdateMessage( updateMessage )\n\n print( \"\\n=== update ===\" )\n print( serverPerson )\n print( updateMessage )\n print( clientPerson )" }, { "alpha_fraction": 0.6310257911682129, "alphanum_fraction": 0.6328475475311279, "avg_line_length": 34.14285659790039, "blob_id": "aa74fed3ceaef4cc93a6f5c672cb933816e86d7f", "content_id": "dd5e4184a47ba42854cb9025f02499675086d34a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7136, "license_type": "no_license", "max_line_length": 131, "num_lines": 203, "path": "/Tools/terrain_generator/gui.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "\n\n\n#\n# This class contains the user interface.\n# It also contains code to handle the panda frame\n#\n\nimport os \nimport sys\n\nimport main\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import * \n\n\nclass tab_file(QWidget):\n '''\n Handles loading, saving(, exporting?) of the scene \n '''\n def __init__(self, parent, pandaWorld=None): \n super(QWidget, self).__init__(parent) \n self.pandaWorld = pandaWorld \n self.layout = QVBoxLayout(self)\n\n # save button \n self.saveButton = QPushButton(\"save button\") \n self.saveButton.clicked.connect(self.saveDialog)\n self.layout.addWidget(self.saveButton)\n\n # load button\n self.loadButton = QPushButton(\"load button\") \n self.loadButton.clicked.connect(self.loadDialog)\n self.layout.addWidget(self.loadButton)\n\n # Export button\n self.exportButton = QPushButton(\"export button\") \n #self.exportButton.clicked.connect(self.exportButton)\n self.layout.addWidget(self.exportButton)\n\n # layout\n self.setLayout(self.layout)\n\n def saveDialog(self): \n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName = QFileDialog.getSaveFileName(self, 'Save File', \"\",\"All Files (*);;Python Files (*.py)\", options=options)\n if fileName:\n print(\"save file as: {}\".format(fileName) )\n self.pandaWorld.loader.save_scene(fileName)\n\n def loadDialog(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getOpenFileName(self,\"Load File\", \"\",\"All Files (*);;Python Files (*.py)\", options=options)\n if fileName:\n print(\"load file: {}\".format(fileName) )\n print( self.pandaWorld )\n self.pandaWorld.loader.load_scene(fileName)\n\n def saveDialog(self): \n # TODO: export options \n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getSaveFileName(self, 'Export To ...', \"\",\"All Files (*);;Python Files (*.py)\", options=options)\n if fileName:\n self.pandaWorld.loader.save_scene(fileName)\n \nclass tab_terrain(QWidget):\n '''\n Generation of terrain, including \n - heightmaps, \n - texture blending, \n - skyboxes, skybox objects (distant buildings/mountains etc.)\n '''\n def __init__(self, parent, pandaWorld=None): \n super(QWidget, self).__init__(parent) \n self.layout = QVBoxLayout(self)\n label = QLabel(self)\n label.setText(\"Terrain tab\")\n self.layout.addWidget(label)\n\ndef populateTree( tree, parent ):\n # TODO(victor): move to some util class\n for child in sorted(tree):\n child_item = QStandardItem(child)\n parent.appendRow(child_item)\n if isinstance(tree,dict):\n populateTree(tree[child], child_item)\n\n \nclass tab_object(QWidget):\n '''\n adding / editing / removing / animating objects\n '''\n def __init__(self, parent, pandaWorld=None): \n super(QWidget, self).__init__(parent) \n self.layout = QVBoxLayout(self)\n self.pandaWorld = pandaWorld\n\n # tree \n self.objectTreeView = QTreeView(self)\n\n # tree model\n self.treeview_model = QDirModel() \n self.treeview_model.setFilter( QDir.NoSymLinks | QDir.AllDirs | QDir.NoDotAndDotDot | QDir.Files | QDir.DirsFirst) \n self.treeview_model.setNameFilters( [\"*.egg\"] ) \n self.treeview_model.setSorting( QDir.Reversed)\n \n # TODO(victor): no hardcoded paths, get this at startup\n folder = \"C:/Users/Victor/Desktop/thunderstruck/Thunderstruck/Entities/\"\n print( \"opening treeview in: {}\".format(folder) )\n\n # \n self.objectTreeView.setModel(self.treeview_model)\n self.objectTreeView.setColumnHidden(1, True)\n self.objectTreeView.setColumnHidden(2, True)\n self.objectTreeView.setColumnHidden(3, True) \n self.objectTreeView.setRootIndex(self.treeview_model.index(folder))\n self.objectTreeView.setSortingEnabled(True) \n self.objectTreeView.setAnimated(False) \n self.objectTreeView.setSelectionMode(QAbstractItemView.SingleSelection) \n self.objectTreeView.selectionModel().selectionChanged.connect(self.treeSelectionChange)\n\n # button\n qdir = QDir(path=folder) \n self.pointlessButton = QPushButton(qdir.absolutePath()) \n\n # label\n self.label = QLabel(self)\n self.label.setText(\"<file path>\")\n\n # finalize\n self.layout.addWidget(self.pointlessButton)\n self.layout.addWidget(self.objectTreeView)\n self.layout.addWidget(self.label)\n self.setLayout(self.layout)\n \n def treeSelectionChange(self, index): \n print(\"Selection changed:\")\n for idx in self.objectTreeView.selectedIndexes(): \n indexItem = self.treeview_model.index(idx.row(), 0, idx.parent()) \n fileName = self.treeview_model.fileName(indexItem)\n filePath = self.treeview_model.filePath(indexItem)\n print( \"full path:\\t{}\\nfile: \\t\\t{}\".format(filePath, fileName))\n self.label.setText(filePath) \n # TODO(victor): detect if this is a file or a folder\n # TODO(victor): disable selecting folders\n\n\n\n\nclass tab_texture(QWidget):\n def __init__(self, parent, pandaWorld=None): \n super(QWidget, self).__init__(parent)\n self.layout = QVBoxLayout(self)\n label = QLabel(self)\n label.setText(\"Texture tab\")\n self.layout.addWidget(label)\n\n\nclass tab_game_elements(QWidget):\n '''\n In this window, things like finish lines, invisible walls, event triggers, etc. can be edited\n '''\n def __init__(self, parent, pandaWorld=None): \n super(QWidget, self).__init__(parent)\n self.layout = QVBoxLayout(self)\n self.pandaWorld = pandaWorld\n label = QLabel(self)\n label.setText(\"Game elements tab\")\n self.layout.addWidget(label)\n\nclass Gui(QWidget): \n def __init__(self, parent, pandaWorld=None): \n super(QWidget, self).__init__(parent)\n self.pandaWorld = pandaWorld\n\n self.layout = QVBoxLayout(self)\n \n # Initialize tab screen\n self.tabs = QTabWidget()\n\n self.tab_file = tab_file(self, pandaWorld=pandaWorld)\t\n self.tab_terrain = tab_terrain(self, pandaWorld=pandaWorld)\n self.tab_object = tab_object(self, pandaWorld=pandaWorld)\n self.tab_game_elements = tab_game_elements(self, pandaWorld=pandaWorld) \n \n self.tabs.resize(300,200) \n \n # Add tabs\n self.tabs.addTab(self.tab_file,\"File\")\n self.tabs.addTab(self.tab_terrain,\"Terrain\")\n self.tabs.addTab(self.tab_object,\"Objects\")\n self.tabs.addTab(self.tab_game_elements, \"Game elements\") \n \n # Add tabs to widget \n self.layout.addWidget(self.tabs)\n self.setLayout(self.layout)\n\n\nif __name__ == \"__main__\":\n import main\n main.main()" }, { "alpha_fraction": 0.5858747959136963, "alphanum_fraction": 0.591225266456604, "avg_line_length": 26.5, "blob_id": "b0fae481cd307bbc2eecc8b0db91ccb6b173135d", "content_id": "a91e862c5be0416b27598e2441fa6bec0f204477", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1869, "license_type": "no_license", "max_line_length": 57, "num_lines": 68, "path": "/Controls/Keyboard.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "from direct.showbase.DirectObject import DirectObject\n\nclass Arrows(DirectObject):\n def __init__(self,ControlManager):\n self.CM = ControlManager\n self.LeftPressed = 0\n self.RightPressed = 0\n self.KeyBindings()\n\n def KeyBindings(self):\n self.accept('arrow_left',self.Left_press)\n self.accept('arrow_left-up',self.Left_rel)\n self.accept('arrow_right',self.Right_press)\n self.accept('arrow_right-up',self.Right_rel)\n\n self.accept('arrow_up',self.Up_press)\n self.accept('arrow_up-up',self.Up_rel)\n\n self.accept('arrow_down',self.Down_press)\n self.accept('arrow_down-up',self.Down_rel)\n\n # Remind:\n # self.Steer()\n # self.Throttle()\n # self.Brake()\n # are methods from superclass\n\n # Steering controls\n def Left_press(self):\n self.LeftPressed = 1\n self.LR_arrows()\n def Left_rel(self):\n self.LeftPressed = 0\n self.LR_arrows()\n def Right_press(self):\n self.RightPressed = 1\n self.LR_arrows()\n def Right_rel(self):\n self.RightPressed = 0\n self.LR_arrows()\n\n def LR_arrows(self):\n self.CM.Steer(self.LeftPressed-self.RightPressed)\n\n # Throttle controls\n def Up_press(self):\n self.CM.Throttle(1)\n def Up_rel(self):\n self.CM.Throttle(0)\n\n # Brake controls\n def Down_press(self):\n self.CM.Brake(1)\n def Down_rel(self):\n self.CM.Brake(0)\n\nclass WASD(Arrows):\n def KeyBindings(self):\n self.accept('a',self.Left_press)\n self.accept('a-up',self.Left_rel)\n self.accept('d',self.Right_press)\n self.accept('d-up',self.Right_rel)\n\n self.accept('w',self.Up_press)\n self.accept('w-up',self.Up_rel)\n\n self.accept('s',self.Down_press)\n self.accept('s-up',self.Down_rel)" }, { "alpha_fraction": 0.6459669470787048, "alphanum_fraction": 0.6537293195724487, "avg_line_length": 28.610000610351562, "blob_id": "23820d475fe3308a29e02e9fd13c670512f4b268", "content_id": "323f6e4423ecc9f7e187b2de823b56dc2990554e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2963, "license_type": "no_license", "max_line_length": 99, "num_lines": 100, "path": "/Tests/victor_client_server/client.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "\n\n'''\nClient test class\n'''\nfrom sys import path\nfrom os import getcwd\nimport time\n\npath.append(getcwd() + \"\\\\..\\\\..\\\\Tools\\\\game_config\\\\\") # TODO(victor): check if indeed windows\nfrom config import Config\n\nfrom panda3d.core import QueuedConnectionManager\nfrom panda3d.core import QueuedConnectionListener\nfrom panda3d.core import QueuedConnectionReader\nfrom panda3d.core import ConnectionWriter\n\n# \nfrom panda3d.core import PointerToConnection\nfrom panda3d.core import NetAddress\n\nfrom panda3d.core import loadPrcFileData\nloadPrcFileData(\"\", \"window-type none\")\n# from direct.directbase import DirectStart\n# from direct.showbase.ShowBase import ShowBase\nfrom direct.task import Task \n \nfrom panda3d.core import NetDatagram\n\nclass Client(object):\n def __init__( self, host=\"localhost\", port=5001, name=\"client\"): \n self.name = name\n \n self.cManager = QueuedConnectionManager()\n self.cReader = QueuedConnectionReader(self.cManager, 0)\n self.cWriter = ConnectionWriter(self.cManager,0)\n \n self.readerCallbacks = []\n\n taskMgr = Task.TaskManager()\n \n # how long until we give up trying to reach the server?\n timeout_in_miliseconds=3000 # 3 seconds\n \n self.myConnection = self.cManager.openTCPClientConnection(host,port,timeout_in_miliseconds)\n if not self.myConnection:\n print(\"{}: Failed to connect to server!\".format(self.name) )\n return\n\n self.cReader.addConnection(self.myConnection) # receive messages from server\n taskMgr.add(self.tskReaderPolling,\"Poll the connection reader\",-40)\n print(\"{}: Successfully connected to server {} at {}!\".format(self.name,port,host) )\n \n \n def tskReaderPolling(self,taskdata):\n # reader callback \n if not self.cReader.dataAvailable():\n return Task.cont\n \n # catch the incoming data in this instance\n # Check the return value; if we were threaded, someone else could have\n # snagged this data before we did\n datagram=NetDatagram() \n if not self.cReader.getData(datagram): \n return Task.cont\n\n for callback in self.readerCallbacks:\n callback( datagram ) \n \n return Task.cont\n\n def addReaderCallback( self, callbackFunction ):\n self.readerCallbacks.append( callbackFunction )\n\n def ProcessReaderData( self, data ):\n # TODO(vicdie): overwrite in derived classes \n pass\n\n def Close( self ):\n # close connection if it exists\n if self.myConnection:\n self.cManager.closeConnection(self.myConnection)\n\n\n\n\n\nif __name__ == \"__main__\":\n\n print(\"=== Start ===\")\n\n config = Config()\n\n client = Client( port=config[\"server\"][\"port\"], host=config[\"server\"][\"host\"] )\n\n tStart = time.time()\n while time.time() < tStart + 10:\n pass\n\n # close\n client.Close()\n print(\"=== Done! ===\")\n" }, { "alpha_fraction": 0.33037474751472473, "alphanum_fraction": 0.34812623262405396, "avg_line_length": 41.29166793823242, "blob_id": "5022b5eeb641e38975188696461654f198939f7e", "content_id": "f707e81ee4108fb1613e1ec63fd99d67611a6715", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1014, "license_type": "no_license", "max_line_length": 68, "num_lines": 24, "path": "/Tools/EulerAngles.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "import numpy as np\nimport math\n# Calculates Rotation Matrix given euler angles\ndef RotMatDeg(z,y,x):\n return RotMat(np.deg2rad(z),np.deg2rad(y),np.deg2rad(x))\ndef RotMat(ang_z, ang_y, ang_x) :\n R_x = np.array([[1, 0, 0 ],\n [0, math.cos(ang_x), -math.sin(ang_x) ],\n [0, math.sin(ang_x), math.cos(ang_x) ]\n ])\n \n R_y = np.array([[math.cos(ang_y), 0, math.sin(ang_y) ],\n [0, 1, 0 ],\n [-math.sin(ang_y), 0, math.cos(ang_y) ]\n ])\n \n R_z = np.array([[math.cos(ang_z), -math.sin(ang_z), 0],\n [math.sin(ang_z), math.cos(ang_z), 0],\n [0, 0, 1]\n ]) \n \n R = np.dot(R_z, np.dot( R_y, R_x ))\n \n return R, np.linalg.inv(R)" }, { "alpha_fraction": 0.6563574075698853, "alphanum_fraction": 0.6639665961265564, "avg_line_length": 32.6694221496582, "blob_id": "7d7e7b89785fb6e5aa988b2eda391917c30f747e", "content_id": "3a504aa65e536b34c6fec3af5263003037759dfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4074, "license_type": "no_license", "max_line_length": 97, "num_lines": 121, "path": "/Tests/victor_client_server/server.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "'''\nServer test class\n'''\nfrom sys import path\nfrom os import getcwd\nimport time\n\npath.append(getcwd() + \"\\\\..\\\\..\\\\Tools\\\\game_config\\\\\") # TODO(victor): check if indeed windows\nfrom config import Config\n\nfrom panda3d.core import QueuedConnectionManager\nfrom panda3d.core import QueuedConnectionListener\nfrom panda3d.core import QueuedConnectionReader\nfrom panda3d.core import ConnectionWriter\n\n# \nfrom panda3d.core import PointerToConnection\nfrom panda3d.core import NetAddress\n\nfrom panda3d.core import loadPrcFileData\nloadPrcFileData(\"\", \"window-type none\")\n# from direct.directbase import DirectStart\n# from direct.showbase.ShowBase import ShowBase\nfrom direct.task import Task \n \nfrom panda3d.core import NetDatagram\nfrom panda3d.core import Datagram\n \nclass Server(object):\n \n # https://www.panda3d.org/manual/index.php/Client-Server_Connection\n \n def __init__( self, host=\"localhost\", port=5001 ):\n taskMgr = Task.TaskManager()\n\n self.cManager = QueuedConnectionManager()\n self.cListener = QueuedConnectionListener(self.cManager, 0)\n self.cReader = QueuedConnectionReader(self.cManager, 0)\n self.cWriter = ConnectionWriter(self.cManager,0) \n self.activeConnections = [] # We'll want to keep track of these later\n self.readerCallbacks = []\n\n backlog=1000 #If we ignore 1,000 connection attempts, something is wrong!\n self.tcpSocket = self.cManager.openTCPServerRendezvous(port,backlog)\n self.cListener.addConnection(self.tcpSocket)\n\n taskMgr.add(self.tskListenerPolling,\"Poll the connection listener\",-39)\n taskMgr.add(self.tskReaderPolling,\"Poll the connection reader\",-40)\n print(\"started server! ({} at {})\".format(port,host) )\n\n def Start( self ):\n # derived servers can overwrite this function if needed\n pass\n\n def tskListenerPolling(self,taskdata):\n # listen for new connections\n # TODO(victor): what happens if a client shuts down?\n # print(\"server.tskListenerPolling()\")\n if self.cListener.newConnectionAvailable(): \n rendezvous = PointerToConnection()\n netAddress = NetAddress()\n newConnection = PointerToConnection()\n if self.cListener.getNewConnection(rendezvous,netAddress,newConnection):\n newConnection = newConnection.p()\n self.activeConnections.append(newConnection) # Remember connection\n self.cReader.addConnection(newConnection) # Begin reading connection\n print(\"server: received new connection!\")\n return Task.cont\n \n def tskReaderPolling(self,taskdata):\n # reader callback \n if not self.cReader.dataAvailable():\n return Task.cont\n \n # catch the incoming data in this instance\n # Check the return value; if we were threaded, someone else could have\n # snagged this data before we did\n datagram=NetDatagram() \n if not self.cReader.getData(datagram): \n return Task.cont\n\n for callback in self.readerCallbacks:\n callback( datagram ) \n \n return Task.cont\n \n def addReaderCallback( self, callbackFunction ):\n self.readerCallbacks.append( callbackFunction )\n\n def BroadcastMessage(self, datagram):\n # send the same message to all clients\n for client in self.activeConnections:\n self.cWriter.send(datagram,client)\n\n def Close( self ):\n # remove all clients\n for client in self.activeConnections:\n self.cReader.removeConnection(client)\n self.activeConnections=[]\n \n # close down our listener\n self.cManager.closeConnection(self.tcpSocket)\n\n\n\nif __name__ == \"__main__\":\n\n print(\"=== Start ===\")\n\n config = Config()\n\n server = Server( port=config[\"server\"][\"port\"], host=config[\"server\"][\"host\"] )\n server.Start()\n\n tStart = time.time()\n while time.time() < tStart + 10:\n pass\n\n # close\n server.Close()\n print(\"=== Done! ===\")\n" }, { "alpha_fraction": 0.6151696443557739, "alphanum_fraction": 0.6227545142173767, "avg_line_length": 27.12359619140625, "blob_id": "9c2170490295faa1be6b839555b51845b206f9f2", "content_id": "738d407a0283297880151477c4762214c2a9beea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2505, "license_type": "no_license", "max_line_length": 94, "num_lines": 89, "path": "/Tests/victor_client_server/test.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "\n\n'''\nTest using client and server\n\nstart up server, than 2 clients\nlet them both send and receive some stuff\n'''\n\nfrom sys import path\nfrom os import getcwd\nimport time\n\npath.append(getcwd() + \"\\\\..\\\\..\\\\Tools\\\\game_config\\\\\") \nfrom config import Config\n\nfrom client import Client\nfrom server import Server\nfrom panda3d.core import NetDatagram \nfrom panda3d.core import Datagram\n\nfrom direct.task import Task \n\nclass TestServer(Server):\n ''' Test sending a heartbeat to clients '''\n def __init__(self, host=\"localhost\", port=5001 ):\n super().__init__(host=host, port=port )\n\n def heartbeat(self ): \n myPyDatagram=Datagram() \n print(\"server: sending heartbeat to {} clients\".format(len(self.activeConnections) ) )\n self.BroadcastMessage( myPyDatagram )\n\n def ProcessReaderData(self, data): \n # Todo: figure out who sent it\n print(\"Server: receiving data\")\n\n\nclass TestClient(Client): \n ''' Test receiving heartbeat from server ''' \n def ProcessReaderData( self, data ):\n print(\"{}: reading data!\".format(self.name) )\n pass\n\n def SendMessage(self): \n print( \"{}: sending message to server\".format(self.name) )\n myPyDatagram=Datagram() \n self.cWriter.send(myPyDatagram,self.myConnection)\n\n\nif __name__ == \"__main__\":\n\n\n config = Config()\n\n port = config[\"server\"][\"port\"]\n host = config[\"server\"][\"host\"]\n\n # start server and clients\n server = TestServer( port=port, host=host )\n client1 = TestClient(port=port, host=host, name=\"Henk\" )\n client2 = TestClient(port=port, host=host, name=\"Bert\" )\n\n # run test\n # TODO(vicdie): run server and clients in separate threads, \n # move Task.TaskManager().step() stuff \n print(\"======= Server->Client =======\")\n\n tStart = time.time() \n tLastHearbeat = tStart\n while time.time() < tStart + 10:\n Task.TaskManager().step() # perform a step as often as possible\n if tLastHearbeat + 1 < time.time():\n server.heartbeat()\n tLastHearbeat = time.time()\n \n print(\"======= Client->Server =======\")\n\n tStart = time.time() \n tLastHearbeat = tStart\n while time.time() < tStart + 10:\n Task.TaskManager().step() # perform a step as often as possible\n if tLastHearbeat + 1 < time.time():\n client1.SendMessage()\n client2.SendMessage()\n tLastHearbeat = time.time()\n\n # close\n client1.Close()\n client2.Close() \n server.Close()\n" }, { "alpha_fraction": 0.5672754645347595, "alphanum_fraction": 0.5941110849380493, "avg_line_length": 29.146066665649414, "blob_id": "b9d647d64abe4d5a035acd4621608a8e262954be", "content_id": "12aeb11910c7c643b052d09cc293bccad155be57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2683, "license_type": "no_license", "max_line_length": 111, "num_lines": 89, "path": "/Entities/Objects/Trucks.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom Tools import *\n\nmodelfolder = \"Entities/Objects/Models/\"\n\nclass Basic:\n eggname = modelfolder+\"Truck01/truck_01.egg\"\n #eggname = modelfolder+\"Environment/pine_tree_01.egg\"\n\n # Static Class Properties\n turnradius = 15 #[m]\n sideacc = 10 #[m/s2] acceleration when steering max\n forwardacc = 10 #[m/s2] without air drag\n topspeed = 40 #[m/s]\n brakespeed = 1 #[s] from topspeed to full stop\n mass = 1000 #[kg]\n\n # World Properties\n rho = 1.225 #[kg/m3] air density\n gravity = 9.81 #[m/s2]\n\n def __init__(self, World):\n self.Model()\n self.StartLocation()\n self.TruckParams()\n self.RotationMatrices()\n World.Clock.UpdateMe(self)\n\n def Model(self):\n self.m = loader.loadModel(self.eggname)\n self.m.reparentTo(render)\n\n def StartLocation(self):\n self.m.setPos(0,0,20)\n self.m.setHpr(0,0,0)\n\n def TruckParams(self):\n # User input\n self.Steer = 0\n self.Throttle = 0\n self.Brake = 0\n\n # Forces\n self.Fengine = self.mass * self.forwardacc\n self.Fbrake = self.mass * self.topspeed/self.brakespeed\n\n # Aerodynamics\n # At top speed: Fengine = 1/2 rho v2 Cd\n self.Cd = self.Fengine * 2 / self.rho / (self.topspeed**2)\n\n # Velocity\n self.Vbody = np.array([0.,0.,0.]) #[m/s]\n self.Vworld = np.array([0.,0.,0.]) #[m/s]\n\n def RotationMatrices(self):\n # Use attitude to compute Euler Transformation Matrices\n self.Truck2World, self.World2Truck = EulerAngles.RotMatDeg(self.m.getH(), self.m.getP(), self.m.getR())\n\n def Update(self,dt):\n # Get Euler Rotation Matrices\n Truck2World, World2Truck = EulerAngles.RotMatDeg(self.m.getH(), self.m.getP(), self.m.getR())\n\n # Perform turning\n Yaw = self.m.getH()\n turnrate = self.Steer * 360/4 # Hardcoded turnrate for now\n newYaw = Yaw + turnrate*dt\n self.m.setH(newYaw)\n\n # Only horizontal driving now, foeck gravity and terrain!\n Fdrag = 0.5 * self.rho * self.Vbody[1]**2 * self.Cd\n frontacc = (self.Throttle*self.Fengine - self.Brake*self.Fbrake - Fdrag)/self.mass\n # New velocity\n self.Vbody[1] = self.Vbody[1]+frontacc*dt\n self.Vbody[1] = max(self.Vbody[1],0)\n \n # Change frame of reference\n self.Vworld = np.dot(self.Vbody,self.World2Truck)\n\n\n # Update Position\n p = self.m.getPos()\n newP = np.array(p) + self.Vworld * dt\n self.m.setX(newP[0])\n self.m.setY(newP[1])\n\n self.m.setZ(newP[2])\n\n # New Rotation Matrices\n self.RotationMatrices()\n" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 48, "blob_id": "0036d86e93c7a02127c51dad22a76fd8766fe84f", "content_id": "b22832da2335cb3568d910ecc32af0a51f677812", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 48, "num_lines": 1, "path": "/Entities/__init__.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "__all__ = ['Terrain','skyDome','Clock','Camera']" }, { "alpha_fraction": 0.7215189933776855, "alphanum_fraction": 0.7215189933776855, "avg_line_length": 39, "blob_id": "32b66667430550f05c09070b8f94148fab981819", "content_id": "09eb1e0b84cade33a8a82a064ba6f3f76c9bb987", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 79, "license_type": "no_license", "max_line_length": 57, "num_lines": 2, "path": "/Controls/__init__.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "__all__ = ['Manager']\n# Keyboard, Controller and xinput are imported in Manager" }, { "alpha_fraction": 0.7594339847564697, "alphanum_fraction": 0.7665094137191772, "avg_line_length": 51.75, "blob_id": "5b849ad41845e74e2f083f9b8a94368dbdcfa170", "content_id": "e901e2451d22ebfcaa1970c2190d22663ee43c10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 424, "license_type": "no_license", "max_line_length": 127, "num_lines": 8, "path": "/README.md", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "# Thunderstruck\npanda3d blender\n\nHow to get model with texture exported to .egg; https://github.com/09th/YABEE/blob/master/YABEE_HowTo.pdf\n\n * make sure the game renderer is on in blender\n * under Default blender layout, (or at least in the properties tab on the right in this view), open the material pane, add one\n * under texture pane, on material, add texture. select desired texture below. make sure mapping uses uv. \n\n" }, { "alpha_fraction": 0.6375266313552856, "alphanum_fraction": 0.6492537260055542, "avg_line_length": 31.379310607910156, "blob_id": "f43b3cb1b63a8ce38c67eb0be514d11cc3686232", "content_id": "8cda5ca37ac0519af281c9d9ce5b691895e55bf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 938, "license_type": "no_license", "max_line_length": 86, "num_lines": 29, "path": "/Entities/Camera.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom direct.task import Task\n\nclass Camera:\n Dist2Truck = 35 #[m]\n Azimuth = 8 #[deg] position of camera wrt truck\n def __init__(self,World):\n # Stiekem bestaat er al een world.camera object\n self.World = World\n\n # Reference to the controlled truck!\n # Of course, must be replaced by networkcommunication\n self.Truck = World.Truck1\n\n # Relative position wrt Truck\n self.ComputePosition()\n\n def ComputePosition(self):\n AzRad = np.deg2rad(self.Azimuth)\n self.Position = self.Dist2Truck * np.array([0, -np.cos(AzRad), np.sin(AzRad)])\n\n def Update(self):\n TruckPosition = np.array(self.Truck.m.getPos())\n CamDistance = np.dot(self.Position,self.Truck.World2Truck)\n CamPosition = TruckPosition + CamDistance\n\n self.World.camera.setPos(tuple(CamPosition))\n\n self.World.camera.setHpr(self.Truck.m.getH(), 0, 0)" }, { "alpha_fraction": 0.6506943702697754, "alphanum_fraction": 0.6554860472679138, "avg_line_length": 34.796512603759766, "blob_id": "63d2b55929dacd025abd872361cd4e5cfefdf814", "content_id": "cba14b127c186a55a8baec017332d53e28241020", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12313, "license_type": "no_license", "max_line_length": 129, "num_lines": 344, "path": "/Tests/victor_client_server/testNetworkObject.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "# python packages\nfrom sys import path\nfrom os import getcwd\nimport time\nimport json \n\n# client / server\nfrom panda3d.core import QueuedConnectionManager\nfrom panda3d.core import QueuedConnectionListener\nfrom panda3d.core import QueuedConnectionReader\nfrom panda3d.core import ConnectionWriter\n\n# other panda stuff\nfrom panda3d.core import *\nfrom panda3d.direct import *\nfrom panda3d.core import NetDatagram\nfrom panda3d.core import Datagram\nfrom panda3d.core import PointerToConnection\nfrom panda3d.core import NetAddress\nfrom panda3d.core import loadPrcFileData\nloadPrcFileData(\"\", \"window-type none\")\nfrom direct.task import Task \n \n# self made packages\nfrom NetworkObject import NetworkObject, NetworkEvent, NetworkPerson, eventTextmessage\npath.append(getcwd() + \"\\\\..\\\\..\\\\Tools\\\\game_config\\\\\")\n# from config import Config\n\n\nclass ClientServerBase(object):\n '''\n Baseclass for client and server, \n these two have almost all functionality similar, except for who is responsible for opening / closing connections\n and the fact that server can have multiple connections\n '''\n def __init__(self, host=\"localhost\", port=5001, name=\"client or server\" ):\n self.name = name\n self.port = port\n self.host = host\n \n self.cManager = QueuedConnectionManager()\n self.cReader = QueuedConnectionReader(self.cManager, 0)\n self.cWriter = ConnectionWriter(self.cManager,0)\n \n self.connections = [] # list of connections, contains 1 item for client, multiple for server\n \n self.readerCallback = None # will be called when a new message arrives\n self.writerCallback = None # will be called when a message needs to be constructed\n\n self.taskMgr = Task.TaskManager()\n self.taskMgr.add(self.tskReaderPolling,\"Poll the connection reader\",-40)\n self.taskMgr.add(self.tskWriterPolling,\"Send data package\",-39)\n\n\n def setReaderCallback( self, callbackFunction ):\n self.readerCallback = callbackFunction \n\n def setWriterCallback( self, callbackFunction ):\n self.writerCallback = callbackFunction \n \n def tskReaderPolling(self,taskdata):\n\n # reader callback \n if not self.cReader.dataAvailable():\n # print( \"tskReaderPolling(): no data available!\" )\n return Task.cont\n\n datagram=NetDatagram() \n if not self.cReader.getData(datagram): \n print( \"tskReaderPolling(): cannot claim data!\" )\n return Task.cont\n\n if self.readerCallback:\n print( \"tskReaderPolling():readerCallback()\" )\n self.readerCallback( datagram ) \n \n return Task.cont\n\n def tskWriterPolling( self , data ): \n \n if not self.writerCallback:\n return Task.cont\n\n data = self.writerCallback()\n if data == None:\n return Task.cont\n \n\n assert(isinstance(data,Datagram))\n\n print( \"tskWriterPolling() sending to : {}\".format(len(self.connections) ))\n\n for con in self.connections:\n if con:\n print( \"tskWriterPolling() sending\" )\n self.cWriter.send(data,con)\n\n return Task.cont\n \n\n def Close( self ):\n # close each of the connections\n for c in self.connections:\n self.cManager.closeConnection(c) \n\n def ProcessReaderData( self, data ): \n raise NotImplementedError(\"overwrite ProcessReaderData() in client/server implementation\")\n\n\nclass Client(ClientServerBase):\n def __init__( self, host=\"localhost\", port=5001, name=\"client\"): \n super().__init__( host=host, port=port, name=name)\n # self.setReaderCallback( sel.ProcessReaderData )\n\n timeout_in_miliseconds=3000 # 3 seconds\n self.connections.append( self.cManager.openTCPClientConnection(self.host,self.port,timeout_in_miliseconds) )\n if not self.connections:\n print(\"{}: Failed to connect to server!\".format(self.name) )\n return\n \n for con in self.connections:\n if con:\n self.cReader.addConnection(con) \n else:\n print( \"failed to add connection!\" )\n\n self.taskMgr.add(self.tskReaderPolling,\"Poll the connection reader\",-40)\n\n\nclass Server(ClientServerBase): \n def __init__( self, host=\"localhost\", port=5001 , name=\"server\" ):\n super().__init__( host=host, port=port, name=name)\n # self.setReaderCallback( sel.ProcessReaderData )\n\n backlog=1000 \n self.tcpSocket = self.cManager.openTCPServerRendezvous(port,backlog)\n self.cListener = QueuedConnectionListener(self.cManager, 0)\n self.cListener.addConnection(self.tcpSocket)\n\n self.taskMgr.add(self.tskListenerPolling,\"Poll the connection listener\",-39) \n \n def tskListenerPolling(self,taskdata): \n if self.cListener.newConnectionAvailable(): \n rendezvous = PointerToConnection()\n netAddress = NetAddress()\n newConnection = PointerToConnection()\n if self.cListener.getNewConnection(rendezvous,netAddress,newConnection):\n newConnection = newConnection.p()\n self.connections.append(newConnection) # Remember connection\n self.cReader.addConnection(newConnection) # Begin reading connection\n print(\"server: received new connection!\")\n return Task.cont\n\n # TODO(victor): what happens when a connection drops away?\n\n def BroadcastMessage(self, datagram):\n # send the same message to all clients\n for con in self.connections:\n self.cWriter.send(datagram,con)\n \n # def ProcessReaderData( self, data ):\n # # TODO(vicdie): overwrite in derived classes \n # pass\n\n def Close( self ):\n # remove all clients\n for con in self.connections:\n self.cReader.removeConnection(con)\n self.connections=[]\n self.cManager.closeConnection(self.tcpSocket)\n\n\n'''\nNOTE: base network manager\ncontains functions for packing all managed objects in a string\nand extracting objects from this string\n\nThe functions for sending and receiving new objects are almost identical on server and client side\n'''\n\nclass NetworkManager(object):\n ''' \n Network manager baseclass, \n Defines the interface of derived objects, takes care of all generic stuff\n '''\n\n class MessageEnum:\n EVENT = 1\n CONSTRUCT = 2\n UPDATE = 3\n DESTRUCT = 4\n\n def __init__(self, client_or_server):\n self.client_or_server = client_or_server\n\n self.eventQueue = [] # events which will need to be sent in the next event update\n self.newObjectQueue = [] # objects for which a construction message needs to be sent\n self.destructObjectQueue = [] # objects for which a destruction message needs to be sent\n \n self.client_or_server.setReaderCallback( self.readerCallback )\n self.client_or_server.setWriterCallback( self.writerCallback )\n \n self.managedNetworkObjects = [] # objects owned by this NetworkManager\n self.sharedNetworkObjects = dict() # self.clientNetworkObjects[connection] = dict(id, object)\n self.receivedNetworkEvents = dict() \n\n def readerCallback( self, data ):\n print(\"reader callback!\" )\n # data contains two fields: 1 containing the message type (EVENT, CONSTRUCT, UPDATE, DESTRUCT)\n # the other is a string, containing the actual data (in json format for now)\n sender = data.getConnection()\n iterator = DatagramIterator(data)\n messageEnum = iterator.getUint8()\n\n if not sender in self.receivedNetworkEvents:\n self.receivedNetworkEvents[sender] = []\n\n if messageEnum == NetworkManager.MessageEnum.EVENT:\n # received an event message\n messageString = iterator.getString()\n messageJsonData = json.loads( messageString )\n for obj in messageJsonData: \n # class_type = globals()[ obj[\"class_name\"] ] #introspection\n # class_instance = class_type( data=obj ) \n class_instance = globals()[obj[\"class_name\"]]( data=obj ) \n print( \"received event: {}\".format(class_instance) )\n # Add new event to list, keep track of who sent it (sender)\n self.receivedNetworkEvents[sender].append( class_instance )\n\n\n def writerCallback( self ):\n\n # if there are no events queued, return None\n if not self.eventQueue:\n return None\n\n # collect data\n data = []\n for event in self.eventQueue: \n data.append( event.toMessage() )\n self.eventQueue = []\n\n # make a message for the events\n myPyDatagram=Datagram() \n myPyDatagram.add_uint8( NetworkManager.MessageEnum.EVENT )\n datastring = json.dumps(data) \n print( datastring )\n myPyDatagram.add_string(datastring)\n\n print(\"server: writer callback\")\n return myPyDatagram\n\n def add(self, newObject ):\n self.managedNetworkObjects.append( newObject )\n self.newObjectQueue.append( newObject)\n # self.sendNewObject(newObject)\n print(\"NetworkManager: added object! {}\".format(newObject) ) \n\n def remove( self, removeObject ):\n self.managedNetworkObjects.remove( removeObject )\n self.destructObjectQueue.append( removeObject )\n print(\"NetworkManager: removed object! {}\".format(removeObject) ) \n\n def addEvent( self, event ):\n assert(isinstance(event,NetworkEvent)) # check correct type\n self.eventQueue.append(event) \n print(\"NetworkManager: sending new event! {}\".format(event) ) \n \n\n'''\nNOTE: client and server network manager are almost identical. \nThe client implementation also gets a dictionary with connections, even though there is only one (server)\nThis is to keep the underlying code exactly the same, so that as much of the code as possible is shared between client and server\n'''\n\nclass ClientNetworkManager(NetworkManager):\n def __init__(self, client):\n super().__init__(client)\n\n def addEvent( self, event ):\n super().addEvent( event )\n # TODO(victor): Do client specific stuff when a new event is sent\n\n def readerCallback( self, data ):\n super().readerCallback( data )\n print(\"ClientNetworkManager: reader callback\" ) \n # TODO(victor): Do client specific stuff when new data arrives\n\nclass ServerNetworkManager(NetworkManager):\n def __init__(self, server):\n super().__init__(server)\n\n def addEvent( self, event ):\n super().addEvent( event )\n # TODO(victor): Do server specific stuff when a new event is sent\n\n def readerCallback( self, data ):\n super().readerCallback( data )\n print(\"ServerNetworkManager: reader callback\" ) \n # TODO(victor): Do server specific stuff when new data arrive\n\n\n\n\n'''\n'''\n\n\n\nif __name__ == \"__main__\":\n \n server = Server()\n serverManager = ServerNetworkManager(server)\n\n client = Client()\n clientManager = ClientNetworkManager(client)\n\n # TODO(victor): maybe we want to provide the networkobject with the sendingNetworkManager that it belongs to\n # This way, when the object is destructed, we don't need to tell the network manager about it\n person1 = NetworkPerson() \n\n # TODO(vicdie): this is needed for as long as client and server are not separately running \n # a static field can be added to the NetworkObject, which points to the NetworkManager where new ones need to register\n serverManager.add( person1 )\n # make some changes to person1\n serverManager.remove( person1 )\n\n # make sure the receiving side obtains a new NetworkPerson and a text message\n\n b_send = True\n\n tStart = time.time() \n while time.time() < tStart + 2:\n\n if time.time() > tStart + 1 and b_send:\n b_send = False \n textMessage = eventTextmessage( \"server says hoi\" )\n serverManager.addEvent( textMessage ) \n textMessage2 = eventTextmessage( \"server says dag\" )\n serverManager.addEvent( textMessage2 )\n\n Task.TaskManager().step() # perform a step as often as possible\n\n print(\"bla\")" }, { "alpha_fraction": 0.668973982334137, "alphanum_fraction": 0.6766651272773743, "avg_line_length": 40.67948532104492, "blob_id": "a824402688e8255541c14f709b6bff0d36cd5548", "content_id": "90356392144a157e9955ee5d30c4fe65e04f485e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6501, "license_type": "no_license", "max_line_length": 161, "num_lines": 156, "path": "/Entities/Terrain.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "from panda3d.core import GeoMipTerrain, Texture, TextureStage,SamplerState\nfrom direct.task import Task\nimport sys\n\nimport os\nclass Terrain:\n folder = os.path.dirname(os.path.abspath(__file__))\n subfolder = \"/Maps/\"\n file = \"simple.jpg\"\n filepath = folder+subfolder+file\n def __init__(self):\n fn = Filename.fromOsSpecific(self.filepath)\n self.terrain = GeoMipTerrain(\"mySimpleTerrain\")\n self.terrain.setHeightfield(\"Entities/Maps/heightmap.png\")\n self.terrain.getRoot().setSz(40)\n #terrain.setBruteforce(True)\n self.terrain.getRoot().reparentTo(render)\n\n # Set terrain properties\n self.terrain.setBlockSize(16)\n self.terrain.setNear(500)\n self.terrain.setFar(100)\n self.terrain.setFocalPoint(base.camera)\n \n # Store the root NodePath for convenience\n root = self.terrain.getRoot()\n root.reparentTo(render)\n\n # some tinkering\n \"\"\"\n # tell renderer to repeat texture when reading over the edge.\n texGrass.setWrapU(Texture.WM_repeat)\n texGrass.setWrapV(Texture.WM_repeat)\n # apply mipmapping: tell renderer how to handle multiple texture pixels being rendered t a single screen pixel (makes textures 30% larger in GPU mem.)\n texGrass.setMinfilter(SamplerState.FT_linear_mipmap_linear)\n \"\"\"\n self.terrain.generate()\n \n\n\n\n\n \"\"\"\n new attempt to include blend mapping:\n \"\"\"\n # determine terrain size\n self.heightmap = self.terrain.heightfield()\n if self.heightmap.getXSize() > self.heightmap.getYSize():\n self.size = self.heightmap.getXSize()-1\n else:\n self.size = self.heightmap.getYSize()-1\n self.xsize = self.heightmap.getXSize()-1\n self.ysize = self.heightmap.getYSize()-1\n\n # Set multi texture\n # Source http://www.panda3d.org/phpbb2/viewtopic.php?t=4536\n self.generateSurfaceTextures()\n \n \n # load a blend texture from file:\n self.blendTexture = loader.loadTexture(\"Entities/Maps/blendMap.png\")\n \n self.blendTS = TextureStage('blend')\n self.blendTS.setSort(0)\n self.blendTS.setPriority(1)\n # apply textures to the terrain and connect custom shader for blend mapping:\n self.setSurfaceTextures() \n\n\n\n\n\n\n\n # Add a task to keep updating the terrain (for changing terrain, or synamic resolution)\n def updateTask(task):\n self.terrain.update()\n return task.cont\n taskMgr.add(updateTask, \"update\")\n\n # this is where we load the textures to be assigned to the terrain\n def generateSurfaceTextures(self):\n # Textureize\n self.grassTexture = loader.loadTexture(\"Entities/Maps/grassy2.png\")\n self.grassTexture.setWrapU(Texture.WMRepeat)\n self.grassTexture.setWrapV(Texture.WMRepeat)\n self.grassTexture.setMinfilter(SamplerState.FT_linear_mipmap_linear)\n self.grassTexture.setAnisotropicDegree(8)\n self.grassTS = TextureStage('grass')\n self.grassTS.setSort(1) # sorting order is relevent for assigning textures to the four \n \n self.rockTexture = loader.loadTexture(\"Entities/Maps/simple.jpg\")\n self.rockTexture.setWrapU(Texture.WMRepeat)\n self.rockTexture.setWrapV(Texture.WMRepeat)\n self.rockTexture.setMinfilter(SamplerState.FT_linear_mipmap_linear)\n #self.grassTexture.setAnisotropicDegree(8)\n self.rockTS = TextureStage('rock')\n self.rockTS.setSort(2)\n # self.rockTS.setCombineRgb(TextureStage.CMAdd, TextureStage.CSLastSavedResult, TextureStage.COSrcColor, TextureStage.CSTexture, TextureStage.COSrcColor)\n \n self.sandTexture = loader.loadTexture(\"Entities/Maps/stars.png\")\n self.sandTexture.setWrapU(Texture.WMRepeat)\n self.sandTexture.setWrapV(Texture.WMRepeat)\n self.sandTexture.setMinfilter(SamplerState.FT_linear_mipmap_linear)\n #self.sandTexture.setAnisotropicDegree(8)\n self.sandTS = TextureStage('sand')\n self.sandTS.setSort(3)\n self.sandTS.setPriority(5) # TODO: figure out what this is for...\n \n self.snowTexture = loader.loadTexture(\"Entities/Maps/grass.png\")\n self.snowTexture.setWrapU(Texture.WMRepeat)\n self.snowTexture.setWrapV(Texture.WMRepeat)\n self.snowTexture.setMinfilter(SamplerState.FT_linear_mipmap_linear)\n #self.snowTexture.setAnisotropicDegree(8)\n self.snowTS = TextureStage('snow')\n self.snowTS.setSort(4)\n self.snowTS.setPriority(0)\n \n # a background (or rather freground?) texture that will be present independently from the blend map (consider removal)\n self.overlayTexture = loader.loadTexture(\"Entities/Maps/heightmap.png\")\n self.overlayTexture.setWrapU(Texture.WMRepeat)\n self.overlayTexture.setWrapV(Texture.WMRepeat)\n self.overlayTexture.setMinfilter(SamplerState.FT_linear_mipmap_linear)\n #self.overlayTexture.setAnisotropicDegree(8)\n self.overlayTS = TextureStage('overlay')\n self.overlayTS.setSort(5)\n self.overlayTS.setPriority(10)\n\n\n\n # this is where we assign loaded textures to be blended in the shader. \n def setSurfaceTextures(self):\n self.ownerview = False\n root = self.terrain.getRoot()\n root.clearTexture()\n #self.terrain.setTextureMap()\n root.setTexture( self.blendTS, self.snowTexture ) # this texture determines where the other textures are visible\n\n root.setTexture( self.grassTS, self.snowTexture )\n #root.setTexScale(self.grassTS, self.size*5, self.size*5) # I try to make the texture 20 times smaller then the blend map...\n\n root.setTexture( self.rockTS, self.snowTexture ) #rockTexture\n #root.setTexScale(self.rockTS, self.size*5, self.size*5) \n\n root.setTexture( self.sandTS, self.snowTexture) #sandTexture\n #root.setTexScale(self.sandTS, self.size*5, self.size*5) \n\n root.setTexture( self.snowTS, self.snowTexture ) #snowTexture\n #root.setTexScale(self.snowTS, self.size*5, self.size*5) \n\n #(consider removal)\n root.setTexture( self.overlayTS, self.overlayTexture ) #overlayTexture\n #root.setTexScale(self.overlayTS, self.xsize, self.ysize)\n\n root.setShaderInput('size', self.xsize, self.ysize, self.size, self.size)\n root.setShader(loader.loadShader('Entities/Maps/terrainblender.sha'))" }, { "alpha_fraction": 0.56548672914505, "alphanum_fraction": 0.569026529788971, "avg_line_length": 33.24242401123047, "blob_id": "6d84f561eba6538e255565644e263486fcb13c40", "content_id": "51c6bf02d20a2fcb82c81e5a3dad44262c2c382c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1130, "license_type": "no_license", "max_line_length": 82, "num_lines": 33, "path": "/Entities/Clock.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "from direct.task import Task\n\nFPS = 30\nclass Clock():\n def __init__(self,World):\n self.World = World\n self.dt = 1./FPS\n self.dtcounter = 0\n # List of objects that have an Update(dt) function that needs to be called\n self.UpdateList = []\n \n\n # The task for our simulation\n def simulationTask(task):\n # Add the deltaTime for the task to the accumulator\n self.dtcounter += globalClock.getDt()\n while self.dtcounter > self.dt:\n # Remove a stepSize from the accumulator until\n # the accumulated time is less than the stepsize\n self.dtcounter -= self.dt\n # Step the simulation\n for Obj in self.UpdateList:\n Obj.Update(self.dt)\n # Camera position is only updated once per frame!\n self.World.Camera.Update()\n return task.cont\n\n taskMgr.add(simulationTask, \"Physics Simulation\")\n \n def UpdateMe(self,Obj):\n # Add the Object to the list of appendables\n \n self.UpdateList.append(Obj)\n" }, { "alpha_fraction": 0.4859641194343567, "alphanum_fraction": 0.49516797065734863, "avg_line_length": 25.475608825683594, "blob_id": "dfba2b20525fbcd0c79cfe46c601344569eb0fcb", "content_id": "26f210a116b179d64699b92eff5a9a7a605be415", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2173, "license_type": "no_license", "max_line_length": 109, "num_lines": 82, "path": "/Tools/terrain_generator/data_object.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "\n\n\nimport main\nimport json\n\n\nclass data_object(object):\n def __init__(self, json_data):\n # check data\n self.first_field = 5\n self.second_field = 6\n self.load(json_data) \n\n def load( self , json_data ):\n self.__dict__.update(json_data) # by default, add all fields to dict\n\n def save( self ):\n # returns a json object of all relevant fields within this object\n json_data = { }\n return json_data\n\n\nclass terrain_object(data_object):\n \"\"\"\n Class for describing a terrain\n \"\"\"\n def __init__(self, json_data):\n self.textures = [\"<some texture file>\", \"<some other texture file>\"] # TODO(victor) multiple textures\n self.height = \"<some height map>\"\n self.blend = \"<some blend map>\"\n self.xyz = [0,0,0]\n self.rot = [0,0,0]\n return super().__init__(json_data)\n\n def save( self ):\n json_data = { \"textures\": self.textures, \n \"height\": self.height, \n \"blend\": self.blend, \n \"xyz\": self.xyz, \n \"rot\": self.rot}\n return json_data\n\n\nclass panda_object(data_object):\n \"\"\"\n Default object\n \"\"\"\n def __init__(self, json_data):\n # set default values\n self.model = \"<some .egg file>\"\n self.xyz = [0,0,0]\n self.rot = [0,0,0]\n # override with data in json\n return super().__init__(json_data)\n \n def save( self ): \n json_data = { \"model\": self.model, \n \"xyz\": self.xyz, \n \"rot\": self.rot}\n return json_data\n\n\n\nclass skybox_object(data_object):\n \"\"\"\n skybox object\n \"\"\"\n def __init__(self, json_data):\n # set default values\n self.model = \"<some .egg file>\"\n self.xyz = [0,0,0]\n self.rot = [0,0,0]\n # override with data in json\n return super().__init__(json_data)\n \n def save( self ): \n json_data = { \"model\": self.model, \n \"xyz\": self.xyz, \n \"rot\": self.rot}\n return json_data\n\n\nif __name__ == \"__main__\":\n main.main()" }, { "alpha_fraction": 0.5364496111869812, "alphanum_fraction": 0.5492487549781799, "avg_line_length": 33.57692337036133, "blob_id": "fe61e07d1295a334c78284f8684a80b40f3dc305", "content_id": "011757064b7f715a6f0372a0335f0792f2d63086", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1797, "license_type": "no_license", "max_line_length": 68, "num_lines": 52, "path": "/Controls/Manager.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "from direct.showbase.DirectObject import DirectObject\nfrom direct.task import Task\nimport sys\nfrom . import Keyboard\nfrom . import Controller\n#from . import xinput\nfrom operator import attrgetter\n\nclass ControlManager(DirectObject):\n def __init__(self,World):\n self.accept('escape', sys.exit)\n\n # Reference to the controlled truck!\n # Of course, must be replaced by networkcommunication\n self.Truck = World.Truck1\n\n # Start different control inputs\n self.Arrows = Keyboard.Arrows(self)\n self.WASD = Keyboard.WASD(self)\n #self.SearchForControllers()\n\n def SearchForControllers(self):\n # Init the search for controllers\n def SearchTask(task):\n joys = xinput.XInputJoystick.enumerate_devices()\n if joys:\n # Joystick found: assume that it is X360\n self.X360 = Controller.X360(self,joys[0])\n return task.done\n else:\n task.delayTime = 3\n return task.again\n taskMgr.doMethodLater(0.1,SearchTask,'Search Controller')\n\n #########################################################\n # These functions must be replaced by handles to client comm\n #########################################################\n def Steer(self, value):\n # Set steering direction: [-1,1] [left, right]\n print(\"Steer\", value)\n self.Truck.Steer = value\n\n def Throttle(self, value):\n # Set throttle: [0,1] [idle, full]\n print(\"Throttle\", value*100,'%')\n self.Truck.Throttle = value\n\n def Brake(self, value):\n # Set Brake: [0,1] [none, full]\n print(\"Brake\", value)\n self.Truck.Brake = value\n #########################################################" }, { "alpha_fraction": 0.5810113549232483, "alphanum_fraction": 0.5810113549232483, "avg_line_length": 25.135135650634766, "blob_id": "efd6aacf177070fa639f49c10b9b8df683143087", "content_id": "2ca8e750940dc96b903567d5313fcffd9d040f9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 969, "license_type": "no_license", "max_line_length": 73, "num_lines": 37, "path": "/Tools/game_config/config.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "\n\n'''\nRead config file, provide an interface for easily reading the data\n\nusage:\nconfig = Config(\"some_file.json\")\nclient_port = config[\"client\"][\"port\"]\n'''\nimport os\nimport json\n\n\nclass Config(object):\n def __init__( self, file=\"config_file.json\" ):\n # Read file\n here = os.path.dirname(os.path.abspath(__file__))\n full_path = os.path.normpath(os.path.join(here, file))\n with open( full_path, 'r') as f:\n self.json_config = json.load(f)\n\n def __getitem__(self, key):\n # get key\n try:\n return self.json_config[key]\n except:\n print( \"ERROR: attempting to obtain config[{}]\".format(key) )\n raise # propagate error\n\n\nif __name__ == \"__main__\":\n\n config_file = \"config_file.json\"\n config = Config( config_file )\n\n client_port = config[\"client\"][\"port\"]\n client_host = config[\"client\"][\"host\"]\n\n print( \"port: {}; host: {};\".format(client_port,client_host) )\n" }, { "alpha_fraction": 0.40909090638160706, "alphanum_fraction": 0.40909090638160706, "avg_line_length": 20, "blob_id": "8d34e3bd2920d5a1b8f8e41dc8ce0731d823dca5", "content_id": "1b01f969e4c59150d8d4404414d5de6ec2f82b5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22, "license_type": "no_license", "max_line_length": 20, "num_lines": 1, "path": "/Entities/Objects/__init__.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "__all__ = ['Trucks']\n\n" }, { "alpha_fraction": 0.5574603080749512, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 34.818180084228516, "blob_id": "3f2dd2b7632740c439d8812d5042bdcaf5804d8e", "content_id": "5df0a8415c2dc8474f42fbb73ed7b9134d5890fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1575, "license_type": "no_license", "max_line_length": 88, "num_lines": 44, "path": "/Controls/Controller.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "from direct.task import Task\n\nclass X360():\n deadzone = 0.4\n def __init__(self, ControlManager,Controller):\n self.CM = ControlManager\n self.Controller = Controller\n self.Controller._last_state = self.Controller.get_state()\n self.KeyBindings()\n \n def CheckController(task):\n state = self.Controller.get_state()\n if not state:\n # Start searching again for the controller!\n self.CM.SearchForControllers()\n return task.done\n if state.packet_number != self.Controller._last_state.packet_number:\n self.Controller.handle_changed_state(state)\n self.Controller._last_state = state\n return task.cont\n\n taskMgr.add(CheckController)\n\n def KeyBindings(self):\n # Bind the X360 controller buttons to game scripts\n @self.Controller.event\n def on_axis(axis, value):\n if axis==\"l_thumb_x\": # Left stick, horizontal direction\n if abs(value)<self.deadzone:\n self.CM.Steer(0)\n else:\n self.CM.Steer(min(1.,value*-2)) # Axis values range from [-0.5, 0.5]\n\n @self.Controller.event\n def on_button(button, pressed):\n if button == 13: # A button\n self.CM.Throttle(pressed)\n elif button ==14: # B button\n self.CM.Brake(pressed)\n\nclass SteamController(X360):\n def KeyBindings(self):\n # TODO: Find the key bindings for the Steam Controller\n pass" }, { "alpha_fraction": 0.5600000023841858, "alphanum_fraction": 0.5600000023841858, "avg_line_length": 25, "blob_id": "83d56d94cb5672c6180345d05bb936b19e3b615d", "content_id": "352d68a1f4a8b30d2ea2d354f5787eb6fe7ef3be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25, "license_type": "no_license", "max_line_length": 25, "num_lines": 1, "path": "/Tools/__init__.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "__all__ = ['EulerAngles']" }, { "alpha_fraction": 0.5479204058647156, "alphanum_fraction": 0.5485231876373291, "avg_line_length": 22.714284896850586, "blob_id": "f8159fb928ee5b25a3a3622dd893834288723948", "content_id": "2cf5273765671088eab2b996d18b0489c1c56e71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1659, "license_type": "no_license", "max_line_length": 60, "num_lines": 70, "path": "/main.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "from direct.showbase.ShowBase import ShowBase\n\n# Als je nieuwe modules hebt voor bij *, voeg de verwijzing \n# toe in __init__.py van de module!\nfrom Controls import *\nfrom Entities import * \nfrom Entities.Objects import *\nimport sys\n\nprint(sys.version)\nclass Thunderstruck_server():\n def __init__(self):\n pass\n\n # Entities\n # Load terrain hgtmap\n # Load Objects \n \n # Game Logic\n # Receive control input from client\n # Game Goals/Rules\n\n # Physics\n # Define truck behavior\n\n # Output\n # Send data to clients\n\nclass Thunderstruck_client(ShowBase):\n def __init__(self,server):\n ShowBase.__init__(self)\n\n # Init the Global Clock\n self.Clock = Clock.Clock(self)\n \n # ----- ENTITIES -----\n # World\n self.Terrain = Terrain.Terrain()\n self.SkyDome = skyDome.skyDome()\n #Light Sources? (or weather, see also particles)\n \n # Objects\n self.Truck1 = Trucks.Basic(self)\n \n # Particles\n # Sparks, fire, LIGHTNING\n\n self.Camera = Camera.Camera(self)\n # User input\n self.CM = Manager.ControlManager(self)\n \n # Server communication\n # Send User controls\n # Receive Trucks locations\n\n # Graphics\n # Renderer\n # Camera behavior\n # GUI frontend\n\n # Sound\n # Music Player\n # Sound effects\n\nif __name__ == \"__main__\":\n server = Thunderstruck_server()\n #server.run()\n\n client = Thunderstruck_client(server)\n client.run()" }, { "alpha_fraction": 0.6897048354148865, "alphanum_fraction": 0.6994240283966064, "avg_line_length": 64.37647247314453, "blob_id": "8be3d292ac39519c0adcf981eaa8fbb9fedbb34d", "content_id": "2346251f55fe49fb8379a02f8219fbf955828979", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5556, "license_type": "no_license", "max_line_length": 218, "num_lines": 85, "path": "/Entities/skyDome.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "from panda3d.core import Texture, TextureStage, DirectionalLight, AmbientLight, TexGenAttrib, VBase4\nfrom panda3d.core import ColorBlendAttrib, LPoint3, LVector4\nfrom direct.filter.CommonFilters import CommonFilters\nfrom panda3d.core import PandaNode, NodePath\nimport sys\nimport os\n\nclass skyDome:\n def __init__(self):\n ##-- load a skydome from a bam file --##\n # this skydome is a small inverted sphere (inverted = back culling makes it transparent outside-in instead of inside-out)\n # that is wrapped around the camera (you can see what's happening by turning on base.oobe(), with togles out of body experience mode)\n # the camera is set as parent, such that the dome will stay centered around the camera.\n # compass makes sure that rotations of the camera are ignored, allowing you to look around the skydome.\n # the sphere is kept small, but disabling depth buffer and ensuring it is the first thing added to the render buffer alllows us to create the illusion that it is infinitely far away.\n # note: SkySphere.bam has to be be re-created for each Panda3D version. you can do so by running sky Sphere.py\n\n # load inverted sphere model, to texture\n #self.SkyDome = loader.loadModel(\"Entities/Maps/skydome1/InvertedSphere.egg\")\n self.SkyDome = loader.loadModel(\"Entities/Maps/skydome1/spacedome.egg\")\n # create 3D texture coordinates on sphere\n #self.SkyDome.setTexGen(TextureStage.getDefault(), TexGenAttrib.MWorldPosition) #* Copies the (x, y, z) position of each vertex, in world space, to the (u, v, w) texture coordinates.\n #self.SkyDome.setTexGen(TextureStage.getDefault(), TexGenAttrib.MEyePosition) # Copies the (x, y, z) position of each vertex, in camera space, to the (u, v, w) texture coordinates.\n #self.SkyDome.setTexGen(TextureStage.getDefault(), TexGenAttrib.MWorldNormal) # Copies the (x, y, z) lighting normal of each vertex, in world space, to the (u, v, w) texture coordinates.\n #self.SkyDome.setTexGen(TextureStage.getDefault(), TexGenAttrib.MEyeNormal) # Copies the (x, y, z) lighting normal of each vertex, in camera space, to the (u, v, w) texture coordinates.\n #self.SkyDome.setTexGen(TextureStage.getDefault(), TexGenAttrib.MEyeSphereMap) #* Generates (u, v) texture coordinates based on the lighting normal and the view vector to apply a standard reflection sphere map.\n #self.SkyDome.setTexGen(TextureStage.getDefault(), TexGenAttrib.MEyeCubeMap) # Generates (u, v, w) texture coordinates based on the lighting normal and the view vector to apply a standard reflection cube map.\n #self.SkyDome.setTexGen(TextureStage.getDefault(), TexGenAttrib.MWorldCubeMap) # Generates (u, v, w) texture coordinates based on the lighting normal and the view vector to apply a standard reflection cube map.\n\n\n\n #self.SkyDome.setTexProjector(TextureStage.getDefault(), render, self.SkyDome) # should only be needed when projectibf cube map to sphere...\n\n # create a cube map texture from 6 separate textures: (# should run 0-5)\n #tex = loader.loadCubeMap('Entities/Maps/skydome1/lakes_#.png')\n # or: get a pre-wrapped texture from the interwebs\n scene = loader.loadTexture('Entities/Maps/skydome1/14-Hamarikyu_Bridge_B_8k.jpg') #new\n \n\n #ts = TextureStage('ts')\n #self.SkyDome.setTexGen(ts, TexGenAttrib.MWorldPosition) # old\n #self.SkyDome.setTexGen(ts, TexGenAttrib.MEyeSphereMap) # new\n #self.SkyDome.setTexProjector(ts, render, self.SkyDome) # old\n # ts.setMode(TextureStage.MModulateGlow) # old\n\n # and give it to inverted sphere\n #self.SkyDome.setTexture(TextureStage.getDefault(),scene)\n #self.SkyDome.setTexture(ts,tex)\n\n #TODO: make sure that this cube map and .eeg model are loaded from a BAM file for faster loading. (and don't forget to re-set textProjector after loading!)\n # load model (sphere + texture)\n #self.SkyDome = loader.loadModel(\"SkySphere.bam\")\n # tell renderer how to project the texture to this sphere\n #self.SkyDome.setTexProjector(TextureStage.getDefault(), render, self.SkyDome) \n\n # origen of model is on the surface. Let's move to the centre \n # (and make it a little larger to prevent it from intersecting the camera's fustrum)\n self.SkyDome.setPos(0,0.5,0)\n self.SkyDome.setScale(2)\n # and slave it to the camera\n self.SkyDome.wrtReparentTo(camera) # note: cam vs. camera! (cam will make skydome look normal even in oobe mode)\n # altough parented by camera, tell to ignore camera rotations:\n self.SkyDome.setCompass()\n # tell renderer to use it as background (i.e. first to be rendered), and exclude it from depth buffer\n self.SkyDome.set_bin(\"background\", 0)\n self.SkyDome.set_depth_write(0)\n # ignore light effects?\n self.SkyDome.setLightOff()\n\n #base.oobe()\n #render.setShaderAuto()\n #filters = CommonFilters(base.win, base.cam)\n #filterok = filters.setBloom(blend=(0, 0, 0, 1), desat=-0.5, mintrigger =0.1, intensity=8.0, size=\"medium\")\n\n # add some light\n \n dlight = DirectionalLight('dlight')\n alight = AmbientLight('alight')\n dlnp = render.attachNewNode(dlight)\n alnp = render.attachNewNode(alight)\n dlight.setColor((0.2, 0.2, 0.2, 1))\n alight.setColor((0.7, 0.7, 0.7, 1))\n dlnp.setHpr(60, -60, 0)\n render.setLight(dlnp)\n render.setLight(alnp)" }, { "alpha_fraction": 0.6544041633605957, "alphanum_fraction": 0.6683937907218933, "avg_line_length": 25.397260665893555, "blob_id": "8967c45437690008c814f9e709affe030589b7d9", "content_id": "7e26289ffe4d886fed37326ba3fb6e1ed5ebdfc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1930, "license_type": "no_license", "max_line_length": 102, "num_lines": 73, "path": "/Tools/terrain_generator/main.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": " \n\n\nimport sys\n\n# Panda\nfrom panda3d.core import * \nloadPrcFileData(\"\", \"window-type none\") \nfrom direct.showbase.DirectObject import DirectObject\nfrom panda3d.core import WindowProperties\n\n# QT\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\n# local\nimport gui\nfrom pandaWorld import World\n\ngetModelPath().appendDirectory('/c/Panda3D-1.9.4-x64/models/') \n\n\n\nclass QTPandaWidget(QWidget):\n\tdef __init__(self, parent=None):\n\t\tsuper(QWidget, self).__init__(parent)\n\t\tself.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))\n\t\t\n\tdef resizeEvent(self, evt):\n\t\twp = WindowProperties()\n\t\twp.setSize(self.width(), self.height())\n\t\twp.setOrigin(self.x(),self.y())\n\t\tbase.win.requestProperties(wp)\n\t\n\tdef minimumSizeHint(self):\n\t\treturn QSize(400,300)\n\nclass QTMainWindow(QDialog):\n def __init__(self, pandaWorld=None, parent=None):\n super(QDialog, self).__init__(parent)\n self.setWindowTitle(\"Test\")\n s = 80\n self.setGeometry(0,0,21*s,9*s)\n \n self.pandaContainer = QTPandaWidget(self) \n \n layout = QHBoxLayout()\n layout.addWidget(self.pandaContainer) \n user_interface = gui.Gui(self, pandaWorld=pandaWorld)\n layout.addWidget(user_interface)\n \n self.setLayout(layout) \n self.pandaWorld = pandaWorld\n pandaWorld.bindToWindow(int(self.winId())) # window.pandaContainer.winId() or window.winId()? \n # this basically creates an idle task\n # TODO(victor): run panda in separate thread if possible\n self.timer = QTimer(self)\n self.timer.timeout.connect( pandaWorld.step )\n self.timer.start(0.01)\n\n\n\ndef main():\n pandaWorld = World()\n\n app = QApplication(sys.argv)\n window = QTMainWindow(pandaWorld=pandaWorld) \n window.show()\n\n # ensure both qt and panda close\n sys.exit(app.exec_())\n \n \nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.49675944447517395, "alphanum_fraction": 0.5158215761184692, "avg_line_length": 26.030927658081055, "blob_id": "0f33ac4e075c407e6a99f5a33dc980be0fc5f933", "content_id": "5417944babea76f7a070c17880566f825a354804", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2623, "license_type": "no_license", "max_line_length": 101, "num_lines": 97, "path": "/Tools/terrain_generator/camera.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "\n\nimport time \nimport numpy as np\n\nfrom direct.task import Task\n\nimport main\n\n#\n# Camera\n#\n\nclass Camera(object):\n def __init__(self, world):\n self.world = world\n self.world.accept( \"w\", self.move_y, [1] )\n self.world.accept( \"s\", self.move_y, [-1])\n self.world.accept( \"a\", self.move_x, [-1] )\n self.world.accept( \"d\", self.move_x, [1] )\n\n self.world.accept( \"w-up\", self.move_y,[ 0])\n self.world.accept( \"s-up\", self.move_y, [0])\n self.world.accept( \"a-up\", self.move_x, [0])\n self.world.accept( \"d-up\", self.move_x, [0])\n\n self.time = time.time()\n self.pos = np.array( [0,0,1.8] )\n self.rot = np.array( [0,0,0] )\n\n self.control = np.array( [0,0] )\n\n # print( self.world.camera) \n # self.world.camera.setFov(120) \n\n # camera control\n taskMgr.add(self.tick, 'TickCameraMovement')\n\n def move_x(self, x):\n self.control[0] = x\n\n def move_y(self, y):\n self.control[1] = y\n\n def get_forward_vector(self):\n cam = self.world.camera\n yaw = np.deg2rad( cam.get_h() )\n # pitch = np.deg2rad( cam.get_p() )\n # return np.array( [np.cos(yaw) , np.sin(yaw), 0 ])\n return self.world.camera.getQuat().getForward()\n\n def get_right_vector(self):\n forward = self.get_forward_vector()\n right = np.cross( forward, np.array([0,0,1]) )\n return right / np.linalg.norm( right )\n\n\n\n\n def tick(self, task ):\n t_cur = time.time()\n dt = t_cur - self.time\n cam = self.world.camera\n\n vmax = 3.6\n\n yaw_deg = cam.get_h()\n pitch_deg = cam.get_p()\n roll_deg = cam.get_r()\n\n yaw = np.pi * yaw_deg / 180.\n pitch = np.pi * pitch_deg / 180.\n roll = np.pi * roll_deg / 180.\n\n # cam.set_p( t_cur )\n f = np.pi / 2\n forward = np.array( [np.cos(yaw), np.sin(yaw), 0 ]) # TODO(victor): calculate correct vectors\n right = np.array( [np.cos(yaw+f), np.sin(yaw+f), 0])\n \n # self.pos = self.pos + (dt * self.control[0] * right )\n # self.pos = self.pos + (dt * self.control[1] * forward)\n\n self.pos = self.pos + (dt * self.control[0] * self.get_right_vector() )\n self.pos = self.pos + (dt * self.control[1] * self.get_forward_vector() )\n \n # cam.setPos( *self.pos ) \n cam.set_x( self.pos[0] )\n cam.set_y( self.pos[1] )\n cam.set_z( self.pos[2] )\n \n # cam.set_p( )\n cam.set_r( 0 )\n\n self.time = t_cur\n return task.cont\n\n\nif __name__ == \"__main__\":\n main.main()" }, { "alpha_fraction": 0.6446043252944946, "alphanum_fraction": 0.6482014656066895, "avg_line_length": 35.394737243652344, "blob_id": "11f1edd3ea294c5613fa2a70330c7ed85d048a19", "content_id": "e77cff8a9286001d0c6bd4c6a90e85a0cd8884cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1390, "license_type": "no_license", "max_line_length": 95, "num_lines": 38, "path": "/Tools/terrain_generator/picker.py", "repo_name": "Jerommaas/Thunderstruck", "src_encoding": "UTF-8", "text": "\n#\n# this class contains the picker node, which handles everything related to selecting 3d objects\n#\n#\n\n\nclass picker(object):\n def __init__(self, mouse, render, camera):\n self.mouse = mouse\n self.render = render \n self.camera = camera\n # see:\n # http://www.panda3d.org/manual/index.php/Clicking_on_3D_Objects\n \n self.pickerNode = CollisionNode('mouseRay')\n pickerNP = camera.attachNewNode(self.pickerNode)\n self.pickerNode.setFromCollideMask(GeomNode.getDefaultCollideMask())\n self.pickerRay = CollisionRay()\n self.pickerNode.addSolid(self.pickerRay)\n self.traverser.addCollider(self.pickerNP, self.handle_picker() )\n\n \n def handle_picker(self):\n self.set_picker_ray()\n self.traverser.traverse(self.render)\n # Assume for simplicity's sake that myHandler is a CollisionHandlerQueue.\n if myHandler.getNumEntries() > 0:\n # This is so we get the closest object\n myHandler.sortEntries()\n pickedObj = myHandler.getEntry(0).getIntoNodePath()\n\n def set_picker_ray(self):\n # First we check that the mouse is not outside the screen.\n if not base.mouseWatcherNode.hasMouse():\n return\n\n mpos = base.mouseWatcherNode.getMouse()\n self.pickerRay.setFromLens(base.camNode, mpos.getX(), mpos.getY())\n\n\n " } ]
27
umtkas/yumidoDictionary
https://github.com/umtkas/yumidoDictionary
0a890dfe77b04fef2ea0c952dbd1a674ed0b4ab8
632ca4766ecf1fe873e23d134d08652c430e5927
0bf79b74c7516a02e1a701b6290ef1749cd5c9ca
refs/heads/master
2016-04-13T23:12:21.076349
2015-10-27T00:27:56
2015-10-27T00:27:56
41,797,434
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.609166145324707, "alphanum_fraction": 0.6136218905448914, "avg_line_length": 30.420000076293945, "blob_id": "13d69e792bde7491784bb4a3bc8d38b694ee1842", "content_id": "e2291366b03abb2ea3b02c9f1c91e17acf781f07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1571, "license_type": "no_license", "max_line_length": 104, "num_lines": 50, "path": "/getmeanings.py", "repo_name": "umtkas/yumidoDictionary", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n__author__ = 'umitkas'\n\n\nimport sqlite3\n\nclass GetMeanings(object):\n\n def __init__(self):\n super(GetMeanings, self).__init__()\n self.database_name = 'english_turkish.db'\n self.connectDatabase()\n\n def connectDatabase(self):\n try:\n self.database_connection = sqlite3.connect(self.database_name)\n self.database_cursor = self.database_connection.cursor()\n\n except sqlite3.DatabaseError as ex:\n print(\"Database Error :: {}\".format(ex))\n def disconnectDatabase(self):\n try:\n self.database_connection.close()\n except sqlite3.Error as ex:\n print(\"Disconnection Error :: {}\".format(ex))\n\n def searchWordEnglishToTurkish(self, word):\n\n meanings = ''\n __search = (word,)\n __sql_query = \"select english_word, turkish_meanings from dictionary where english_word = ?\"\n self.database_cursor.execute(__sql_query, __search)\n\n result = self.database_cursor.fetchone()\n\n if result is None: # check the value exist or not\n return 'Nothing is Found'\n\n return list(result)[1]\n\n\n def searchWordTurkishToEnglish(self, word):\n english_meaning = ''\n __search = (word, )\n __sql_query = \"select english_word, turkish_meanings from dictionary where turkish_meanings = ?\"\n self.database_cursor.execute(__sql_query, __search)\n result = self.database_cursor.fetchone()\n if result is None:\n return \"Nothing is Found\"\n return list(result)[1]\n" }, { "alpha_fraction": 0.606965184211731, "alphanum_fraction": 0.6158098578453064, "avg_line_length": 30.736841201782227, "blob_id": "c1759472e7e0ef7bfe4c14025fea4e6de404973f", "content_id": "01afd15a52e84946959d025adee96bf87980c161", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1809, "license_type": "no_license", "max_line_length": 118, "num_lines": 57, "path": "/yumidodict.py", "repo_name": "umtkas/yumidoDictionary", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n__author__ = 'umitkas'\n\n\nimport getmeanings\nfrom sys import exit\ntry:\n import clipboard\nexcept:\n print('clipboard module does not exist in python3\\n package link : https://pypi.python.org/pypi/clipboard/0.0.4 ')\n exit(0)\ntry:\n import notify2\nexcept:\n print('notify2 module does not exist in python3\\n package link : https://pypi.python.org/pypi/notify2/')\n exit(0)\n\nclass YumidoDictionary(getmeanings.GetMeanings):\n\n\n def __init__(self):\n super(YumidoDictionary, self).__init__()\n self.selected_word = \"\"\n notify2.init(\"YumidoDictionary\")\n self.notification = notify2.Notification(\"Yumido Dictionart is Started\",\"\")\n self.notification.show()\n\n def getSelectedWord(self):\n previous = \"\"\n while True:\n try:\n self.selected_word = clipboard.paste().lower()\n if len(self.selected_word) != 0 and previous != self.selected_word and self.selected_word.isalpha():\n string_meanings = self.searchWordEnglishToTurkish(self.selected_word)\n self.updateNotification(self.selected_word, self.splitMeanings(string_meanings))\n previous = self.selected_word\n time.sleep(1)\n except:\n print(' Program is Closed')\n exit(0)\n\n def updateNotification(self, word, string_meaning):\n self.notification.update(word, string_meaning)\n self.notification.show()\n\n def splitMeanings(self, meanings):\n string_meanings = '\\n'\n meanings = meanings.split(', ')\n for meaning in meanings:\n string_meanings += '{}\\n'.format(meaning)\n return string_meanings\n\n\n\nif __name__ == \"__main__\":\n y = YumidoDictionary()\n y.getSelectedWord()\n" }, { "alpha_fraction": 0.811274528503418, "alphanum_fraction": 0.8284313678741455, "avg_line_length": 30.384614944458008, "blob_id": "322fac0ec26d15d7c2f5c3dfb4597c1758996089", "content_id": "c0cba3c791b759122415dc058bd3850a3fa01636", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 428, "license_type": "no_license", "max_line_length": 117, "num_lines": 13, "path": "/README.md", "repo_name": "umtkas/yumidoDictionary", "src_encoding": "UTF-8", "text": "# yumidoDictionary\n\npython3 üzerine kurulması gereken paketler.\n\nclipboard\nlink : https://pypi.python.org/pypi/clipboard/0.0.4\n\nnotify2\nlink : https://pypi.python.org/pypi/notify2/\n\nProgramı terminal üzerinden python3 yumidodict.py dosyasını çalıştırarak başlatabilirsiniz.\nSonrasında yapmanız gerecek herhangi bir yerden ctrl + c kısayoluyla kopyaladığınız metni sağ tarafta bildirim olarak\ngörebilirsiniz.\n" } ]
3
sgrkmr/parse-lecture-links
https://github.com/sgrkmr/parse-lecture-links
57cec9821faf1b9c69d45d09ed1d5a91aa4cce3b
83bd6926af0db879c169921739e41e2b22230cb0
70838a7694819b3ce4a4bad39bc6921f74077af5
refs/heads/master
2023-01-11T06:35:11.476230
2020-11-11T15:21:06
2020-11-11T15:21:06
300,212,731
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6244897842407227, "alphanum_fraction": 0.6244897842407227, "avg_line_length": 28.625, "blob_id": "93ae6b7f1b720eef5414d4073bd68ef8689b5999", "content_id": "1401ad0d07c7c32ebc636e1365dc8a8fad344344", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 490, "license_type": "no_license", "max_line_length": 86, "num_lines": 16, "path": "/src/tools/data_scraping.py", "repo_name": "sgrkmr/parse-lecture-links", "src_encoding": "UTF-8", "text": "import urllib.request\r\nimport json\r\nimport urllib\r\n\r\n\r\ndef YoutubeTitle(id):\r\n VideoID = id\r\n params = {\"format\": \"json\", \"url\": \"https://www.youtube.com/watch?v=%s\" % VideoID}\r\n url = \"https://www.youtube.com/oembed\"\r\n query_string = urllib.parse.urlencode(params)\r\n url = url + \"?\" + query_string\r\n\r\n with urllib.request.urlopen(url) as response:\r\n response_text = response.read()\r\n data = json.loads(response_text.decode())\r\n return data[\"title\"]\r\n" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 27, "blob_id": "11a6e59f22ca5b1179511cd15aba44f92f16c64b", "content_id": "25399ef5c59c55f76f4164d22f6d4490eadc68da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28, "license_type": "no_license", "max_line_length": 27, "num_lines": 1, "path": "/src/tools/__init__.py", "repo_name": "sgrkmr/parse-lecture-links", "src_encoding": "UTF-8", "text": "from . import data_scraping\n" }, { "alpha_fraction": 0.5728813409805298, "alphanum_fraction": 0.5796610116958618, "avg_line_length": 16.352941513061523, "blob_id": "32ccdc791465aa0eb85a7421787dcef6b4c27eed", "content_id": "c1199ba27671e0e4084f8dd823d07da2d5e18bbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 295, "license_type": "no_license", "max_line_length": 68, "num_lines": 17, "path": "/README.md", "repo_name": "sgrkmr/parse-lecture-links", "src_encoding": "UTF-8", "text": "<h1 align=\"center\">\n parse-lecture-links\n</h1>\n<p align=\"center\">\n <sub>\n\tScrapes lecture links from WhatsApp Chat file.\n </sub><br/>\n <sub>\n <b>!!</b> This repo is intended for a specific use case only\n </sub>\n</p>\n\n---\n## Usage\n```console\n$ python -m src.core <file>\n```\n" }, { "alpha_fraction": 0.7916666865348816, "alphanum_fraction": 0.7916666865348816, "avg_line_length": 23, "blob_id": "b171f0b7895a1c89a41b3f09f3c1b225ff4fa84e", "content_id": "897add3581e0fc827e509e0889b5dc78b3f2f410", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24, "license_type": "no_license", "max_line_length": 23, "num_lines": 1, "path": "/src/lib/__init__.py", "repo_name": "sgrkmr/parse-lecture-links", "src_encoding": "UTF-8", "text": "from . import whatsChat\n" }, { "alpha_fraction": 0.34007585048675537, "alphanum_fraction": 0.35429835319519043, "avg_line_length": 39.6315803527832, "blob_id": "78c6f94f2d696f60e3e65a27c0ade35eaf27449a", "content_id": "8dddb0ac425034fef33a1fd7ebd090bafa69bc4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3166, "license_type": "no_license", "max_line_length": 164, "num_lines": 76, "path": "/legacy/script.py", "repo_name": "sgrkmr/parse-lecture-links", "src_encoding": "UTF-8", "text": "# DEPRECATED\r\nfrom wchat import chat\r\nimport re\r\n\r\n# parsing recorded lectures links\r\nch_path = \"\"\r\nparsedData = chat(ch_path).get()\r\nprint(f\"source: {ch_path}\")\r\nrecLectureData = []\r\nfor i, j in enumerate(parsedData):\r\n recSearch = re.search(\"https\\:\\/\\/us\\d{2}web\\.zoom\\.us\\/rec\", j[-1])\r\n if recSearch:\r\n dataBuffer = []\r\n dataBuffer.append(j[-1])\r\n # link\r\n try:\r\n if recSearch.start() == 0:\r\n dataBuffer.append(parsedData[i + 1][-1])\r\n else:\r\n if re.search(\"[lL]ecture\", parsedData[i + 1][-1]) and not re.search(\r\n \"https\", parsedData[i + 1][-1]\r\n ):\r\n dataBuffer.append(parsedData[i + 1][-1])\r\n else:\r\n dataBuffer.append(None)\r\n except:\r\n dataBuffer.append(None)\r\n # description of link\r\n dt = j[0].split(\"/\")\r\n dt[0], dt[1] = dt[1], dt[0]\r\n # swapping dates and months position\r\n dataBuffer.append(\"/\".join(dt))\r\n # date\r\n recLectureData.append(dataBuffer)\r\n\r\nprint(\"export lectures links?(Y/N): \", end=\"\")\r\nchoice = input()\r\nif re.match(\"(^[yY]\\w{0,0}$)\", choice):\r\n while 1:\r\n print(\"file name: \", end=\"\")\r\n exp_name = input()\r\n if re.match(\"(^[a-zA-Z]\\w{0,26}$)\", exp_name):\r\n while 1:\r\n print(\"file type?(md/txt): \", end=\"\")\r\n fl_type = input()\r\n if re.match(r\"(^(\\b(txt)\\w{0,0}\\b)|(\\b(md)\\w{0,0}\\b)$)\", fl_type):\r\n with open(exp_name + \".\" + fl_type, \"w\") as f:\r\n mode = 0\r\n if fl_type == \"md\":\r\n mode = 1\r\n f.write(\"####Recent ↑\\n\")\r\n for i, j in enumerate(recLectureData[::-1]):\r\n pos = (len(recLectureData) - 1) != i\r\n if mode:\r\n f.write(f\"**`{j[2]}`**\\n\")\r\n if re.match(\"(^http)\", j[0]):\r\n f.write(f\"> [{j[1]}]({j[0]})\")\r\n else:\r\n lnk = re.search(\r\n \"(((https\\:\\/\\/us\\d{2}web\\.zoom\\.us\\/rec)(.*)(startTime=[\\d]+))|((https\\:\\/\\/us\\d{2}web\\.zoom\\.us\\/rec\\/share\\/)([^\\s]+)))\",\r\n j[0],\r\n ).group(1)\r\n if j[1] == None:\r\n msg = j[0].replace(lnk, \"\").strip()\r\n f.write(f\"> [{msg}]({lnk})\")\r\n else:\r\n f.write(f\"> [{j[1]}]({lnk})\")\r\n if pos:\r\n f.write(\"\\n\\n\")\r\n else:\r\n f.write(f\"{j[0]}\\n{j[1]}\")\r\n break\r\n print(\"invalid!\")\r\n break\r\n print(\"invalid!\")\r\n print(f\"saved as {exp_name+'.'+fl_type}\")\r\n" }, { "alpha_fraction": 0.45882657170295715, "alphanum_fraction": 0.47426658868789673, "avg_line_length": 39.78494644165039, "blob_id": "ebd06bc3754ec8219aba2f39ad38456ef9341055", "content_id": "a6aca3d21ff3a3cb3d7618f5c7ddfd9135b4c29b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3886, "license_type": "no_license", "max_line_length": 115, "num_lines": 93, "path": "/src/lib/whatsChat.py", "repo_name": "sgrkmr/parse-lecture-links", "src_encoding": "UTF-8", "text": "import re, csv\r\n\r\n\r\nclass ChatParser:\r\n def __init__(self, path: str) -> None:\r\n \"\"\"'parses raw WhatsApp export chat into usable format\"\"\"\r\n self.parsedData = []\r\n with open(path, encoding=\"utf-8\") as file:\r\n file.readline()\r\n # skipping first line of the file (end-to-end encryption msg)\r\n messageBuffer = []\r\n # for multi-line messages\r\n date, time, author = None, None, None\r\n\r\n while True:\r\n line = file.readline()\r\n if not line:\r\n # stop reading further if end of file has been reached\r\n # appending data from last iteration before break\r\n self.parsedData.append(\r\n [date, time, author, \" \".join(messageBuffer)]\r\n )\r\n break\r\n line = line.strip()\r\n if self._startsWithDateTime(line):\r\n # If a line starts with a date time pattern, then this indicates the beginning of a new message\r\n if len(messageBuffer) > 0:\r\n # check if the message buffer contains characters from previous iterations\r\n self.parsedData.append(\r\n [date, time, author, \" \".join(messageBuffer)]\r\n )\r\n # save the data from the previous message in parsedData\r\n messageBuffer.clear()\r\n # clearing for next message\r\n date, time, author, message = self._getData(line)\r\n messageBuffer.append(message)\r\n else:\r\n messageBuffer.append(line)\r\n # if a line doesn't start with a date time (i.e. the message is in continuation)\r\n\r\n @property\r\n def get(self) -> list:\r\n return self.parsedData\r\n\r\n def export_csv(\r\n self, file: str, header=[\"Date\", \"Time\", \"Author\", \"Message\"]\r\n ) -> None:\r\n assert isinstance(header, (list, str)) and len(header) == 4, \"Improper header!\"\r\n with open(file, \"w+\") as export:\r\n csv.writer(export).writerows([header] + self.get())\r\n\r\n @staticmethod\r\n def _startsWithDateTime(s: str) -> bool:\r\n \"\"\"'regex to identify date time pattern\"\"\"\r\n pattern = \"^(([1-9])|((0)[0-9])|((1)[0-2]))(\\/)([1-9]|[0-2][0-9]|(3)[0-1])(\\/)(\\d{2}|\\d{4}), \"\r\n \"([1-9]|[0-9][0-9]):([0-9][0-9]) ((PM)|(AM)) -\"\r\n result = re.match(pattern, s)\r\n if result:\r\n return True\r\n return False\r\n\r\n @staticmethod\r\n def _startsWithAuthor(s: str) -> bool:\r\n \"\"\"regex to identify author of an message\"\"\"\r\n patterns = [\r\n \"([\\w]+):\", # first name\r\n \"([\\w]+[\\s]+[\\w]+):\", # first name + last Name\r\n \"([\\w]+[\\s]+[\\w]+[\\s]+[\\w]+):\", # first name + middle Name + last name\r\n \"([+]\\d{2} \\d{5} \\d{5}):\", # mobile number\r\n \"([+]\\d{2} \\d{4} \\d{3} \\d{3}):\",\r\n \"([+]\\d{2} \\d{3} \\d{3} \\d{4}):\",\r\n \"([+]\\d{2} \\d{4} \\d{7})\",\r\n ]\r\n pattern = \"^\" + \"|\".join(patterns)\r\n result = re.match(pattern, s)\r\n if result:\r\n return True\r\n return False\r\n\r\n def _getData(self, line: str) -> tuple:\r\n \"\"\"parses the raw chat and returns its each component\"\"\"\r\n # line = \"18/06/17, 12:47 PM - Mario: Ima...\"\r\n splitLine = line.split(\" - \")\r\n dateTime = splitLine[0]\r\n date, time = dateTime.split(\", \")\r\n message = \" \".join(splitLine[1:])\r\n if self._startsWithAuthor(message): # True\r\n splitMessage = message.split(\": \")\r\n author = splitMessage[0]\r\n message = \" \".join(splitMessage[1:])\r\n else:\r\n author = None\r\n return date, time, author, message\r\n" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 24, "blob_id": "e823254ca236bf88155debd41a028d1bd3b8a9c1", "content_id": "bb6a291108c8b897800f55595648c1ace48ffef6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25, "license_type": "no_license", "max_line_length": 24, "num_lines": 1, "path": "/src/__init__.py", "repo_name": "sgrkmr/parse-lecture-links", "src_encoding": "UTF-8", "text": "from . import lib, tools\n" }, { "alpha_fraction": 0.48756054043769836, "alphanum_fraction": 0.4953247606754303, "avg_line_length": 34.742332458496094, "blob_id": "e145068d608e0c2cfdedc2680af16d60f10299ff", "content_id": "02070544fd4cec22a3f02f7418550e0e9818076d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11980, "license_type": "no_license", "max_line_length": 111, "num_lines": 326, "path": "/src/core.py", "repo_name": "sgrkmr/parse-lecture-links", "src_encoding": "UTF-8", "text": "#! Script made for a specific use case\r\nimport sys, os, re, csv\r\nfrom .tools.data_scraping import YoutubeTitle\r\nfrom .lib.whatsChat import ChatParser\r\nfrom argparse import ArgumentParser\r\nfrom subprocess import getoutput\r\nfrom platform import system\r\nfrom shutil import which\r\nfrom pathlib import Path\r\nfrom datetime import datetime\r\n\r\n\r\nparser = ArgumentParser(\r\n description=\"parse-lecture-links: scrapes lecture links from whatsapp chat file\"\r\n)\r\nparser.add_argument(\r\n \"path\",\r\n help=\"parses [FILE] to get lecture links eg.'WhatsApp Chat with xxx.txt'\",\r\n nargs=1,\r\n type=str,\r\n metavar=\"FILE\",\r\n)\r\nparser.add_argument(\r\n \"-a\",\r\n \"--add\",\r\n help=\"Add additional links through a csv [FILE] formatted as <link>, <description>, <date>, <access_code>\",\r\n nargs=\"?\",\r\n type=str,\r\n required=False,\r\n default=None,\r\n metavar=\"FILE\",\r\n)\r\nparser.add_argument(\r\n \"-o\",\r\n \"--output\",\r\n help=\"write output to [FILE]\",\r\n nargs=\"?\",\r\n type=str,\r\n required=False,\r\n default=Path(\"output.md\"),\r\n metavar=\"FILE\",\r\n)\r\nparser.add_argument(\r\n \"-l\",\r\n \"--link\",\r\n help=\"open [URL] at exit\",\r\n nargs=\"?\",\r\n type=str,\r\n required=False,\r\n default=False,\r\n metavar=\"URL\",\r\n)\r\nparser.add_argument(\r\n \"--copy-output\",\r\n help=\"copy output to clipboard\",\r\n action=\"store_true\",\r\n required=False,\r\n)\r\nparser.add_argument(\r\n \"--clear\",\r\n help=\"remove output file and chat file at exit\",\r\n action=\"store_true\",\r\n required=False,\r\n)\r\nargs = parser.parse_args()\r\n\r\n# Add file check\r\nif args.add:\r\n add_file_choice = \".csv\"\r\n add_file_path = Path(args.add).resolve()\r\n if add_file_path.suffix != add_file_choice:\r\n print(\"error: Supported file formats are {}\".format(add_file_choice))\r\n exit(-1)\r\n\r\noutput_choices = {\"md\": \".md\", \"csv\": \".csv\", \"txt\": \".txt\"}\r\n# Output path check\r\noutput_path = Path(args.output).resolve()\r\nif not output_path.parent.is_dir() or output_path.suffix == \"\":\r\n print(\"error: Enter valid path for output file\")\r\n exit(-1)\r\nif output_path.suffix not in list(output_choices.values()):\r\n print(\"error: Supported file formats are {}\".format(list(output_choices.values())))\r\n exit(-1)\r\n\r\nif args.add:\r\n AddLinks = None\r\n with open(add_file_path, newline=\"\") as f:\r\n AddLinks = list(csv.reader(f))\r\n for i in AddLinks:\r\n if len(i) != 4:\r\n print(\"error: Incompatible data\")\r\n exit(-1)\r\n\r\ndata_dir = Path(__file__).resolve().parent.parent.joinpath(\"data\")\r\nif not data_dir.is_dir():\r\n data_dir.mkdir()\r\n\r\nChatFilePath = Path(args.path[0])\r\nCachePath = data_dir.joinpath(\"cache.csv\")\r\n\r\ncmdExists = lambda cmd: which(cmd) is not None\r\nisTermux = False\r\nif system().lower() == \"linux\":\r\n isTermux = getoutput(\"echo $PREFIX | grep 'com.termux'\").strip() != \"\"\r\n\r\nif ChatFilePath.is_file():\r\n parsedData = ChatParser(ChatFilePath).get\r\n # Date, Time, Author, Message\r\nelse:\r\n if cmdExists(\"termux-toast\"):\r\n os.system(\"termux-toast -s -b white -c black error: File not found\")\r\n print(\"error: File not found\")\r\n exit(-1)\r\n\r\ncache_exists = CachePath.is_file()\r\nCache = None\r\nif cache_exists:\r\n with open(CachePath, newline=\"\") as f:\r\n Cache = list(csv.reader(f))\r\n\r\nrecLectureData = []\r\n# Regex to match zoom links\r\nrecSearch = lambda x: re.search(\"https\\:\\/\\/us\\d{2}web\\.zoom\\.us\\/rec\", x)\r\n# Regex to match youtube links\r\nytbSearch = lambda x: re.search(\"https\\:\\/\\/youtu.be|https\\:\\/\\/www.youtube\", x)\r\ndescCheck = lambda s, x: re.search(f\"({s})\", x, re.I)\r\n\r\nKeyList = [\"dropper\", \"lecture\"]\r\nassert isinstance(KeyList, (list, str)), \"Invalid Keywords!\"\r\n# Statement to check if atleast one keyword from KeyList is present or not\r\nKeyExists = (\r\n lambda e: \"(\"\r\n + \" or \".join([\"descCheck('%s',parsedData[%s][-1])\" % (i, e) for i in KeyList])\r\n + \")\"\r\n)\r\n\r\n# Parsing chat to get [<links>, <description>, <date>, Optional[<access_code>]]\r\nfor i, j in enumerate(parsedData):\r\n dataBuffer = []\r\n if recSearch(j[-1]):\r\n #! Zoom links section\r\n # Regex to get the zoom rec link from text\r\n\r\n # lnk = re.search(\r\n # \"(((https\\:\\/\\/us\\d{2}web\\.zoom\\.us\\/rec)(.*)(startTime=[\\d]+))\"\r\n # \"|((https\\:\\/\\/us\\d{2}web\\.zoom\\.us\\/rec\\/share\\/)([^\\s]+)))\",\r\n # j[-1],\r\n # ).group(1)\r\n lnk = re.search(\r\n \"https\\:\\/\\/us\\d{2}web\\.zoom\\.us\\/rec\\/share\\/\\S+startTime=[\\d]+\"\r\n \"|https\\:\\/\\/us\\d{2}web\\.zoom\\.us\\/rec\\/share\\/\\S+\",\r\n j[-1],\r\n ).group()\r\n # Appending link\r\n dataBuffer.append(lnk)\r\n # For description\r\n try:\r\n if not recSearch(parsedData[i + 1][-1]) and eval(KeyExists(\"i+1\")):\r\n # i.e. description is in the next line\r\n # Appending description\r\n dataBuffer.append(parsedData[i + 1][-1])\r\n else:\r\n # i.e. description is not in the next line\r\n if recSearch(j[-1]).start() == 0:\r\n # For the link starting at 0 index\r\n find = 0\r\n for k in range(1, 4 + 1):\r\n # Trying to find description of the link for upto 4 trials\r\n if eval(KeyExists(\"(i+1)+k\")):\r\n find = 1\r\n break\r\n # Appending description if found\r\n if find:\r\n dataBuffer.append(parsedData[(i + 1) + k][-1])\r\n else:\r\n # If not found even after that, then using a generic description\r\n # share_code = re.search(\r\n # \"\\/share\\/([^\\s]+)(([\\?])|.)\", parsedData[i + 1][-1]\r\n # ).group(1)\r\n share_code = re.search(\r\n \"(?<=\\/share\\/)\\S+(?=\\?)|(?<=\\/share\\/)\\S+\",\r\n parsedData[i + 1][-1],\r\n ).group()\r\n dataBuffer.append(\r\n f\"Unknown Title (link_code): {share_code[:12]}...\"\r\n )\r\n del share_code\r\n else:\r\n # For link not starting at 0 index, i.e. there's some text before it\r\n # So, we'll use that text as our description\r\n # trim=re.search(\r\n # 'Start Time \\:.*',j[-1].replace(lnk,'').strip(),re.I)\r\n desc_pattern = (\r\n r\"(?P<time>Start Time \\:.+?(AM|PM)).+\"\r\n \"(?P<meet>Meeting Recording\\:).+\"\r\n )\r\n desc_code = r\"(?P<code>Access Passcode\\:\\s+\\S+)\"\r\n trim = re.search(desc_pattern + desc_code, j[-1], re.I)\r\n if trim:\r\n # Access code is present\r\n dataBuffer.append(\r\n (\r\n j[-1].replace(trim.group(), \"\") + trim.group(\"code\")\r\n ).strip()\r\n )\r\n else:\r\n # No Access code found\r\n trim = re.search(desc_pattern, j[-1], re.I)\r\n if trim:\r\n dataBuffer.append(j[-1].replace(trim.group(), \"\").strip())\r\n else:\r\n # Not much stuff found, removing link from desc\r\n dataBuffer.append(j[-1].replace(lnk, \"\").strip())\r\n except IndexError:\r\n # most likely IndexError for 'i+1' while checking for next line\r\n # i.e. next line does not exist, so use the text as description\r\n desc_pattern = (\r\n r\"(?P<time>Start Time \\:.+?(AM|PM)).+(?P<meet>Meeting Recording\\:).+\"\r\n )\r\n desc_code = r\"(?P<code>Access Passcode\\:\\s+\\S+)\"\r\n trim = re.search(desc_pattern + desc_code, j[-1], re.I)\r\n if trim:\r\n dataBuffer.append(\r\n (j[-1].replace(trim.group(), \"\") + trim.group(\"code\")).strip()\r\n )\r\n else:\r\n trim = re.search(desc_pattern, j[-1], re.I)\r\n if trim:\r\n dataBuffer.append(j[-1].replace(trim.group(), \"\").strip())\r\n else:\r\n dataBuffer.append(j[-1].replace(lnk, \"\").strip())\r\n elif ytbSearch(j[-1]):\r\n #! Youtube links section\r\n # Regex for id type1 links\r\n # https://youtu.be/<id>\r\n id1_get = re.search(\"(?<=https\\:\\/\\/youtu.be\\/)\\S+\", j[-1])\r\n # Regex for id of type2 links\r\n # https://www.youtube.com/...?v=<id>&...\r\n id2_get = re.search(\"\\/v\\/(.+?(?=\\?))|\\?v=(.+?(?=\\&))\", j[-1])\r\n find = 0\r\n if cache_exists:\r\n for x, data in enumerate(Cache):\r\n if j[-1] == data[0]:\r\n find = 1\r\n break\r\n if find:\r\n dataBuffer += Cache[x]\r\n else:\r\n # Cache not available\r\n # Appending link\r\n dataBuffer.append(j[-1])\r\n # Getting id of Youtube link\r\n if id1_get:\r\n # Type1 link\r\n id = id1_get.group().strip()\r\n else:\r\n # Type2 link\r\n id2_get = [i for i in id2_get.groups() if i is not None][0]\r\n id = id2_get\r\n # Appending description\r\n try:\r\n dataBuffer.append(YoutubeTitle(id))\r\n except Exception as e:\r\n print(repr(e))\r\n if cmdExists(\"termux-toast\"):\r\n os.system(\"termux-toast -s -b white -c black Connection Error\")\r\n continue\r\n # Generating cache\r\n with open(CachePath, \"a+\") as f:\r\n csv.writer(f).writerows([dataBuffer])\r\n else:\r\n #! is not a link, i.e. just skip\r\n continue\r\n # Swapping dates and months position\r\n dt = j[0].split(\"/\")\r\n dt[2] = \"20{}\".format(dt[2])\r\n dt = datetime.strptime(\"-\".join(dt), \"%m-%d-%Y\")\r\n dt = dt.strftime(\"%d-%m-%Y\")\r\n # Appending date\r\n dataBuffer.append(dt)\r\n access_code = re.search(r\"(?P<code>Access Passcode\\:\\s+\\S+)\", dataBuffer[1], re.I)\r\n if access_code:\r\n dataBuffer[1] = dataBuffer[1].replace(access_code.group(\"code\"), \"\").strip()\r\n # Appending access_code\r\n dataBuffer.append(access_code.group(\"code\"))\r\n else:\r\n dataBuffer.append(None)\r\n recLectureData.append(dataBuffer) # [<link>,<desc>,<date>,Optional[<access_code>]]\r\n\r\nif args.add:\r\n recLectureData.extend(AddLinks)\r\n recLectureData = sorted(\r\n recLectureData, key=lambda x: datetime.strptime(x[2], \"%d-%m-%Y\")\r\n )\r\n\r\nfor data in recLectureData:\r\n data[2] = datetime.strptime(data[2], \"%d-%m-%Y\").strftime(\"%d %b %Y\")\r\n\r\nwith args.output.open(\"w+\") as f:\r\n if args.output.suffix == output_choices[\"md\"]:\r\n f.write(\"### Recent ↑\\n\")\r\n for i, j in enumerate(recLectureData[::-1]):\r\n # Check if not last index\r\n pos = (len(recLectureData) - 1) != i\r\n f.write(f\"**`{j[2]}`**\\n\")\r\n f.write(f\"> [{j[1]}]({j[0]})\")\r\n if j[3]: # Access code\r\n f.write(f\"\\n> `{j[3]}`\")\r\n if pos:\r\n f.write(\"\\n\\n\")\r\n else:\r\n raise NotImplementedError\r\n\r\nif isTermux:\r\n # Copy to clipboard\r\n if args.copy_output:\r\n os.system(\"termux-clipboard-set < %s\" % (str(args.output.resolve())))\r\n # Verbose message\r\n os.system(\"termux-toast -s -b white -c black Copied to clipboard!\")\r\n # Clean up\r\n if args.clear:\r\n args.output.unlink()\r\n ChatFilePath.unlink()\r\n if args.link:\r\n # Open link to edit with new datab\r\n os.system(\"termux-open-url %s\" % args.link)\r\n" } ]
8
techagile/det_211194
https://github.com/techagile/det_211194
281c3b0be61320f7bb4c87eea9b02ce2813d5e2a
91328012ba9eefd5b0b3f3bed25ead13029fa856
e133525ba566b6890b21c22ed85a1e55682b7f26
refs/heads/master
2022-04-15T17:45:13.520026
2020-03-14T13:44:32
2020-03-14T13:44:32
246,787,630
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6543859839439392, "alphanum_fraction": 0.6552631855010986, "avg_line_length": 23.782608032226562, "blob_id": "d7b8ac69e61232975c0875a1df45056269682f3c", "content_id": "6a94eb7be7710490c3a469573bc789488b45ef4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1140, "license_type": "no_license", "max_line_length": 72, "num_lines": 46, "path": "/roles/custom_monitoring/files/hostname_ts.py", "repo_name": "techagile/det_211194", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# Script to output hostname, timestamp to file on schedule, continuously\n\nimport socket\nimport datetime\nimport time\nimport platform\n\ndef gethostname_val():\n hostname_val=socket.gethostname();\n return hostname_val\n\ndef gettimestamp_val():\n timestamp_val = str(datetime.datetime.now())\n return timestamp_val\n\ndef writetofile():\n if (ostype.startswith(\"Linux\")):\n filelog = open(\"/tmp/host_ts.log\",\"a\")\n filelog.write(\"\\n\" + gethostname_val() + \" \" + gettimestamp_val())\n filelog.close()\n elif ostype.startswith(\"Windows\"):\n filelog = open(\"c:\\\\temp\\\\host_ts.log\",\"a\")\n filelog.write(\"\\n\" + gethostname_val() + \" \" + gettimestamp_val())\n filelog.close()\n \n\ndef readfile():\n if (ostype.startswith(\"Linux\")):\n filelog = open(\"/tmp/host_ts.log\",\"r\")\n print(filelog.read())\n filelog.close()\n elif ostype.startswith(\"Windows\"):\n filelog = open(\"c:\\\\temp\\\\host_ts.log\",\"r\")\n print(filelog.read())\n filelog.close()\n\ndef main():\n # Running endless loop, unti interrupted!\n while(not time.sleep(5)):\n writetofile()\n\nif __name__ == \"__main__\":\n ostype=platform.platform()\n main()\n" }, { "alpha_fraction": 0.7477282285690308, "alphanum_fraction": 0.7529208064079285, "avg_line_length": 27.182926177978516, "blob_id": "07e499ec3712c1b0f6df2ae3e02b98ac74dda20e", "content_id": "f2029c0fd12570ac6dc60e64bf4c99340f474995", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2311, "license_type": "no_license", "max_line_length": 181, "num_lines": 82, "path": "/README.md", "repo_name": "techagile/det_211194", "src_encoding": "UTF-8", "text": "# Project Title\n\nDeploy monitorting service for DevOps. Basically, the project result in setting up a monitoring script that runs at schedule interval, starts on boot, continuously. \n\n\n## Prerequisites\n\nAnsible v2.9\nJenkins v2.225\nSetup environment variables on Jenkins slave, but this is not required when using LDAP\nOPS_USERNAME=<username>\n \nOPS_PASSWORD=<password>\n\n### Installing \n\n+ Setup Jenkins Server\n\nThis project use Jenkins server as slave, as well\n\n+ Setup Jenkins slave and label it as 'master'\n![](images/Jenkins_master.JPG)\n\n+ Setup Jenkins Credentials\n![](images/Jenkins_credentials.JPG)\n\n+ Setup Jenkins Pipeline\n![](images/Jenkins_Pipeline_setup.JPG)\n\n+ Setup Linux node to act as client. The client IP is added to the inventory file i.e. ~/hosts\nMake sure there is passwordless access to the Linux node. \n\n## Run the pipeline\n\nAs per the Jenkinsfile/deploy.groovy, there are 2 stages, namely:\n+ Clone DETMonitor (which clone the artefact i.e. pull from GIT)\n\n+ Deploy DETMonitor (which deploys the artefact on to the clients, which is setup in the ~/hosts file)\n\n![](images/Jenkins_Pipeline_run.JPG)\n\n+ Pipeline run output\n![](images/Jenkins_Pipeline_run_out.JPG)\n\n+ DevOps monitoring output\n![](images/monitoring_devops_out.JPG)\n\nCaveat\n------------\n\nBasically, the role confirms to:\n+ run the service as daemon [done]\n```\nmonitor.service - Hostname monitor service\n Loaded: loaded (/lib/systemd/system/monitor.service; enabled; vendor preset: enabled)\n```\n+ fails after 3 restart attempts, as per the monitor.service i.e. StartLimitBurst=3 [done]\n```\n[Unit]\nDescription=Hostname monitor service\nAfter=network.target\nStartLimitBurst=3\nStartLimitInterval=10\n```\n\n- Auto start on boot [missing]\n\nThough the service is set to auto-restart on boot, but confirmed that the service isn't picking up on reboot. \n\n- Service to run as non-priviledged user [missing]\n\nInitially the service was setup to run as non-priviledged user via; and it was working well. But later changed to the current state. Need further investigation to get this, sorted. \n```\n- name: Reload daemon service\n command: \"systemctl --user daemon-reload\"\n\n- name: Start monitor.service\n command: \"systemctl --user start monitor\"\n\n- name: Enable monitor.service at boot\n command: \"systemctl --user enable monitor\"\n```\n" } ]
2
shreyasseshadri/Personal-Website
https://github.com/shreyasseshadri/Personal-Website
15fcdcab83af7bb783761b549aeb08da0e472894
6e4070edf221db91e93b5c2c8e1c4c47c3d4ef6d
674a973ea39e9bfedef080aa327c7a23e7bd9157
refs/heads/master
2021-04-12T09:03:51.529197
2018-10-01T12:44:46
2018-10-01T12:44:46
126,461,969
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 28, "blob_id": "2341c933c893b6dd6e27b1bb6a1bae1bd9952b6b", "content_id": "3b4a74960174967b5a016a3e88b4f4b3ed17049e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 261, "license_type": "no_license", "max_line_length": 81, "num_lines": 9, "path": "/README.md", "repo_name": "shreyasseshadri/Personal-Website", "src_encoding": "UTF-8", "text": "# Personal Website\n\nThis is a website under construction made using django<br>\n## Here is a screenshot of project page\n\n\n![alt text](images/projects.png \"Description goes here\")\n\nThe home page is still under construction the reusme page redirects to my resume.\n" }, { "alpha_fraction": 0.6726886034011841, "alphanum_fraction": 0.6801275014877319, "avg_line_length": 30.366666793823242, "blob_id": "2cf4d2d87e77304844e66489bc270105b658ff65", "content_id": "8a3d15747d6d943fe0dfa43c02f65aa760298470", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 941, "license_type": "no_license", "max_line_length": 87, "num_lines": 30, "path": "/about/views.py", "repo_name": "shreyasseshadri/Personal-Website", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\n# Create your views here.\n\ndef pdf_view(request):\n return redirect('/media/Resume.pdf')\n\n # with open('/media/Resume.pdf', 'r') as pdf:\n # response = HttpResponse(pdf.read(), content_type='application/pdf')\n # response['Content-Disposition'] = 'inline;filename=some_file.pdf'\n # return response\n # pdf.closed\n\n# from django.http import FileResponse, Http404\n#\n# def pdf_view(request):\n# try:\n# return FileResponse(open('Resume.pdf', 'rb'), content_type='application/pdf')\n# except:\n# # print(\"hello\")\n# raise Http404()\n\ndef index(request):\n return render(request,'about/index.html',{})\n# /home/shreyas/personal_website/about/templates/about/Resume.pdf\n# /home/shreyas/personal_website/about/views.py\n" }, { "alpha_fraction": 0.6541666388511658, "alphanum_fraction": 0.6708333492279053, "avg_line_length": 22.899999618530273, "blob_id": "81cb7956ed72163ae8c7fafb63dc3123fa5c77e7", "content_id": "a74b4e385c73ad4025af55b6762c1c958cebd240", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 41, "num_lines": 10, "path": "/test_api/models.py", "repo_name": "shreyasseshadri/Personal-Website", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n\n# Create your models here.\nclass Stock(models.Model):\n text=models.TextField(max_length=200)\n def __str__(self):\n return self.text\n\n" }, { "alpha_fraction": 0.6608186960220337, "alphanum_fraction": 0.6608186960220337, "avg_line_length": 20.375, "blob_id": "abe4b5d28ae69cb85454f8499d30cbda2f1d04e6", "content_id": "48581c800b4a30e0aea4cc3bd0a65299f7b8d3e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 171, "license_type": "no_license", "max_line_length": 52, "num_lines": 8, "path": "/about/urls.py", "repo_name": "shreyasseshadri/Personal-Website", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom about import views\n\n\nurlpatterns = [\n url(r'^$',views.index,name=\"index\"),\n url(r'$', views.pdf_view, name='project_index'),\n]\n" } ]
4
radhac88/divya
https://github.com/radhac88/divya
e2431392ebd11ccc0b69a50f56906c752ca5c2e8
f1face99ea5426f12e8881d47f8ccd9ded25cb09
ae73569f1b6b9a26a720f92b3fc19dcb1fc1a01a
refs/heads/master
2020-04-27T18:23:00.765816
2019-03-11T16:00:41
2019-03-11T16:00:41
174,567,003
0
0
null
2019-03-08T16:01:43
2019-03-08T16:32:26
2019-03-11T16:00:41
Python
[ { "alpha_fraction": 0.6301218271255493, "alphanum_fraction": 0.673311173915863, "avg_line_length": 19.522727966308594, "blob_id": "f5d1e50b07accb9fd92ee28e1dc76970a36f64c8", "content_id": "5c8a2827433ce1c58b732f554439b6d5c326f9de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 903, "license_type": "no_license", "max_line_length": 56, "num_lines": 44, "path": "/README.md", "repo_name": "radhac88/divya", "src_encoding": "UTF-8", "text": "# divya\nRepository for python practice\n\n\n\"\"\"\ndef changeme( mylist,val ):\n \"This changes a passed list into this function\"\n mylist.append(1)\n mylist.append(2)\n mylist.append(3)\n myval = val + 1\n print (\"Values inside the function: \", mylist, myval)\n return myval, mylist\n\n# Now you can call changeme function\nmylist = [10,20,30]\nmyval = 25\nprint (\"Values outside the function: \", mylist, myval)\nchangeme( mylist, myval )\nprint (\"2 Values outside the function: \", mylist, myval)\n\nop1, op = changeme(mylist, myval)\nprint(op)\nprint(op1)\n\n\"\"\"\nimport random\n\nlist1 = [10,20,30,40,50]\nfor i in range(1,10):\n print(random.choice(list1))\n\n\ndef myfunc(*args, **kwargs):\n\tfor x in args:\n\t\tprint(x)\n\tfor x, y in kwargs.items():\n\t\tprint(y)\n\nmyfunc(2,\"radha\", 56, first=\"rk\", second=\"krishna\")\nval1 = int(input(\"Please enter value 1: \"))\nval2 = int(input(\"Please enter value 2: \"))\n\nprint(val1+val2)\n" }, { "alpha_fraction": 0.6345885396003723, "alphanum_fraction": 0.6387726664543152, "avg_line_length": 15.272727012634277, "blob_id": "1977c3ade5ff0fc93efcf70e9a54eaa6da6c5a87", "content_id": "11bcff50e62ff36f1a66b6620929f16f407afc55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 717, "license_type": "no_license", "max_line_length": 53, "num_lines": 44, "path": "/books.py", "repo_name": "radhac88/divya", "src_encoding": "UTF-8", "text": "#Comment added by radhakrishna\n#Comment added by Divya\n\n#New line added for feature/speechReco branch testing\nbooks = [{\n\t\"name\": None,\n\t\"author\": None,\n\t\"read\": None\n},\n\t{\n\t\t\n\t}\n\n]\n\ndef insert():\n\tprint(\"Enter book name\")\n\tb_name = input()\n\tbooks[\"name\"] = b_name\n\tprint(\"Enter author name\")\n\tb_author = input()\n\tbooks[\"author\"] = b_author\n\tprint(\"Have you read the book Y/N\")\n\tb_read = input()\n\tif b_read == \"Y\":\n\t\tbooks[\"read\"] = True\n\telse:\n\t\tbooks[\"read\"] = False\n\n\treturn(books)\n\n\n\nprint(\"Select the action you want to perform\")\nprint(\"1. Insert\")\nprint(\"1. Delete\")\nprint(\"1. Mark\")\nuser_input = input()\n\nif user_input == \"insert\" or \"Insert\":\n\tprint(insert())\n\nelif user_input == \"Delete\" or \"delete\":\n\tpass\n\t" } ]
2
loserbbb/tucao
https://github.com/loserbbb/tucao
e86b5a389a9c9219d95feea4e37822602a36abdc
ccc078dd76ee292c6bbd76e809eb8c92257c74e4
85bcf2e2de6f64b0cbbcad22edb1829b96563c72
refs/heads/master
2021-01-15T11:58:30.466610
2017-08-08T02:13:39
2017-08-08T02:18:56
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5318245887756348, "alphanum_fraction": 0.5417255759239197, "avg_line_length": 23.379310607910156, "blob_id": "6e912c605f6e5e813d91b57a11685c03928b0725", "content_id": "7dcc552e9f83028371f9661441567cc2e8a273a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 707, "license_type": "no_license", "max_line_length": 61, "num_lines": 29, "path": "/tucao/usermanage/migrations/0002_auto_20170807_0838.py", "repo_name": "loserbbb/tucao", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('usermanage', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='user',\n name='telnumber',\n field=models.CharField(null=True, max_length=11),\n ),\n migrations.AlterField(\n model_name='user',\n name='born_date',\n field=models.DateTimeField(null=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='head_img',\n field=models.TextField(null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.7165354490280151, "alphanum_fraction": 0.7283464670181274, "avg_line_length": 30.6875, "blob_id": "8b662d99ca9c90dd84cee802de97f05f4e367cec", "content_id": "8dd0039b4826cdce48e1807e807a9be80ef1f972", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 508, "license_type": "no_license", "max_line_length": 55, "num_lines": 16, "path": "/tucao/usermanage/models.py", "repo_name": "loserbbb/tucao", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass User(models.Model):\n name=models.CharField(max_length=10)\n password=models.CharField(max_length=16)\n born_date=models.DateTimeField(null=True)\n gender=models.BooleanField()\n description=models.TextField(blank=True,null=True)\n head_img=models.TextField(null=True)\n exp=models.IntegerField()\n rank=models.IntegerField()\n telnumber=models.CharField(max_length=11,null=True)\n\n def __str__(self):\n \treturn self.name\n\n" }, { "alpha_fraction": 0.5247410535812378, "alphanum_fraction": 0.5304948091506958, "avg_line_length": 31.185184478759766, "blob_id": "919ea9bb1407825f958973eb6bb07cb2fadb75c1", "content_id": "ce64353e8580412d6b62e4522e571a33b43a161b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 869, "license_type": "no_license", "max_line_length": 114, "num_lines": 27, "path": "/tucao/usermanage/migrations/0001_initial.py", "repo_name": "loserbbb/tucao", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('name', models.CharField(max_length=10)),\n ('password', models.CharField(max_length=16)),\n ('born_date', models.DateTimeField()),\n ('gender', models.BooleanField()),\n ('description', models.TextField(null=True, blank=True)),\n ('head_img', models.TextField()),\n ('exp', models.IntegerField()),\n ('rank', models.IntegerField()),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.7322834730148315, "alphanum_fraction": 0.7322834730148315, "avg_line_length": 35.28571319580078, "blob_id": "38ecd3effbf9a4cc8c6ab2437717fddebd961eb4", "content_id": "dd12671ea2467b01218542620a81fb868d29aa68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 508, "license_type": "no_license", "max_line_length": 100, "num_lines": 14, "path": "/tucao/usermanage/middleware.py", "repo_name": "loserbbb/tucao", "src_encoding": "UTF-8", "text": "from django.shortcuts import render,HttpResponse,redirect,HttpResponseRedirect\ntry:\n from django.utils.deprecation import MiddlewareMixin\nexcept ImportError:\n MiddlewareMixin=object\n\nclass SimpleMiddleware(MiddlewareMixin):\n def process_request(self,request):\n if 'user' not in request.session and request.path != '/login/' and request.path !='/admin/':\n HttpResponseRedirect('/login/')\n return None\n\n def process_response(self,request,response):\n return response\n" }, { "alpha_fraction": 0.588744580745697, "alphanum_fraction": 0.5901876091957092, "avg_line_length": 31.23255729675293, "blob_id": "b828416f459611b734ae836eaae8b4559451461c", "content_id": "07e75336fe37e2c96b21e94b2ba4547514cf7eca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1386, "license_type": "no_license", "max_line_length": 103, "num_lines": 43, "path": "/tucao/usermanage/views.py", "repo_name": "loserbbb/tucao", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import User\n# Create your views here.\ndef index(request):\n return HttpResponse('hello')\n\ndef regist(request):\n if request.method == 'POST':\n try:\n user=User.objects.get(name=request.POST.get('name'))\n return HttpResponse('Exist')\n except Exception:\n name=request.POST.get('name')\n password=request.POST.get('password')\n gender=request.POST.get('gender',True)\n telnumber=request.POST.get('telnumber')\n\n user=User(name=name,password=password,gender=gender,telnumber=telnumber,exp='0',rank='1')\n user.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Error')\n\ndef login(request):\n if request.method == 'POST':\n name=request.POST.get('name')\n password=request.POST.get('password')\n user\n try:\n user=User.objects.get(name=name)\n except:\n pass\n if user is not None:\n if password==user.password:\n request.session['user']=user.name\n return HttpResponse('Success')\n else:\n return HttpResponse('Password incorrot')\n else:\n return HttpResponse('user not eixist')\n else:\n return HttpResponse('Error')\n" } ]
5
LibrEars/someCode
https://github.com/LibrEars/someCode
5debe4bb6e0af53204169152c969682effdab743
0c5f8bcffc7301ba1fef4160eadad145f166fdfd
458caabb05b5ad9ef4507f9a4d401ccad901632c
refs/heads/master
2021-01-21T14:23:30.926498
2016-06-11T15:27:04
2016-06-11T15:27:04
59,768,902
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4862259030342102, "alphanum_fraction": 0.4986225962638855, "avg_line_length": 17.58974266052246, "blob_id": "1a6372b20f35e0fefdc6e08bdc2bbad7c13fd202", "content_id": "13e1998b94a5a05c019cad202e75d7fd6f20215d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 727, "license_type": "no_license", "max_line_length": 70, "num_lines": 39, "path": "/dreieck.c", "repo_name": "LibrEars/someCode", "src_encoding": "UTF-8", "text": "/*************************************************\n*Entwickeln Sie ein Programm, das ein auf der Spitze stehendes Dreieck\n*mit Sternchen (*) auf dem Bildschirm ausgibt\n**************************************************/\n\n#include <stdio.h>\n\n//\n\nint main(void) {\n\n\t// variablen\n\tunsigned l, m, n;\n\tunsigned zeilen;\n\tunsigned nSterne, leerzeichen;\n\n\tprintf(\"Bitte Anzahl der Zeilen eingeben\\n\");\n\tif ( scanf(\"%u\", &zeilen)==0) {\n\t\tprintf(\"ungültie Eingabe\");\n\t\treturn 0;\n\t}\n\n\tfor (l = 1; l <= zeilen; l++) {\n\t\tleerzeichen = l;\n\t\tnSterne = (zeilen + 1 - l) *2 -1;\n\n\t\tprintf(\"\\n\");\n\n\t\tfor (m = 1; m <= leerzeichen; m++) {\n\t\t\tprintf(\" \");\n\t\t}\n\n\t\tfor (n = 1; n <= nSterne; n++) {\n\t\t\tprintf(\"*\");\n\t\t}\n\t}\n\tprintf(\"\\n\");\n\treturn 0;\n}\n\n" }, { "alpha_fraction": 0.5377777814865112, "alphanum_fraction": 0.5688889026641846, "avg_line_length": 19.837209701538086, "blob_id": "274c737b120cc588848a26db763a7162b7274bd8", "content_id": "f7c973567dc6b51d5b949cc4e0457736a0097f04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 902, "license_type": "no_license", "max_line_length": 70, "num_lines": 43, "path": "/sine.c", "repo_name": "LibrEars/someCode", "src_encoding": "UTF-8", "text": "/*********************************************************************\n*Entwickeln Sie ein Programm, das Ihnen die Werte der Sinusfunktion\n*in 10er Schritten von 0 bis 360° mit drei Stellen nach dem Komma \n*ausgibt. Die Sinusfunktion sin() ist in der Header-Datei math.h \n*definiert. Achten Sie auf eventuelle Typkonvertierungen\n*********************************************************************/\n\n#include <stdio.h>\n#include <math.h>\n\ndouble zehnSine(void);\n\nint main(void) {\n\t\n\t// Funktion ausführen\n\tzehnSine();\n\t\n}\n\n\n// Funktionsdefinition\ndouble zehnSine(void) {\n\t// Konstante Pi definieren\n\tconst double Pi = 3.141592653;\n\t\n\t// Variablen definieren\n\tdouble winkel;\n\tdouble rad;\n\tdouble sinus;\n\tint i;\n\t\n\tprintf(\"winkel sinus\\n\");\n\n\tfor (i = 0; i <= 36; i++) {\n\t\twinkel = i * 10;\n\n\t\trad = winkel * Pi / 180;\n\t\tsinus = sin(rad);\n\n\t\tprintf(\"%6g %6.3f\\n\", winkel, sinus);\n\t}\n\treturn 0;\n}\n\n\n\n\n" }, { "alpha_fraction": 0.4887983798980713, "alphanum_fraction": 0.5152749419212341, "avg_line_length": 17.148147583007812, "blob_id": "90aba5c160f1581771f307ad8368e0a77a869563", "content_id": "d9efe28d366adf1622bebb85d356c44f2341069b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 491, "license_type": "no_license", "max_line_length": 43, "num_lines": 27, "path": "/sineFFI.py", "repo_name": "LibrEars/someCode", "src_encoding": "UTF-8", "text": "from cffi import FFI\n\nffi = FFI()\n\nffi.set_source(\"sinusCFFI\",\n\"\"\"\n#include <stdio.h>\n#include <math.h>\n\n\n double mySine(double winkel) {\n // Konstante Pi definieren\n const double Pi = 3.141592653;\n\n // Variablen definieren\n double rad;\n double sinus;\n\n rad = winkel * Pi / 180;\n sinus = sin(rad);\n return(sinus);\n }\n\"\"\")\n\nffi.cdef(\"double mySine(double winkel);\")\n\nffi.compile()\n\n" }, { "alpha_fraction": 0.6612903475761414, "alphanum_fraction": 0.6935483813285828, "avg_line_length": 11.199999809265137, "blob_id": "edc4c5cb362ddf942584f9ed3fb0088f14a2692c", "content_id": "a8047872357c8e04b638dca86db5e9bbd5cd8b2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 30, "num_lines": 5, "path": "/sinusCFFI.py", "repo_name": "LibrEars/someCode", "src_encoding": "UTF-8", "text": "from sinusCFFI import ffi, lib\n\ns = lib.mySine(20)\n\nprint(s)\n\n" }, { "alpha_fraction": 0.6066977977752686, "alphanum_fraction": 0.6160436272621155, "avg_line_length": 27.511110305786133, "blob_id": "40c6ed9c34a358a1decf6baec360ef3083c0c859", "content_id": "df78f8fd42127d4bac8b7d68b4e6ff97245fda20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1292, "license_type": "no_license", "max_line_length": 62, "num_lines": 45, "path": "/scalar.c", "repo_name": "LibrEars/someCode", "src_encoding": "UTF-8", "text": "/*************************************************************\n*Entwickeln Sie ein Programm, das das Skalarprodukt zweier \n*Vektoren bestimmt. Die Anzahl der Elemente und die Werte der \n*Vektoren sind in der Eingabeschleife manuell einzugeben.\n*\n* Überprüfen Sie, ob die Anzahl der Elemente die Maximalgröße \n* der Vektoren überschreitet und ermöglichen Sie ggf. eine \n* Korrektur. Legen Sie die maximale Anzahl der Vektorelemente \n* mit einer define-Anweisung durch den Präprozessor fest.\n*\n* Skalarprodukt: A*B = a1*b1 + a2*b2 + a3*b3 + ... + an*bn\n*************************************************************/\n\n#include <stdio.h>\n#include <math.h>\n\n#define elementMax 6\n\nint main(void) {\n\tint i, ai, bi, element;\n\tint sProdukt = 0;\n\n\tprintf(\"Programm zum berechnen des Skalarproduktes X*Y\\n\");\n\n\t// read the values\n\tprintf(\"Anzahl der Elemente eingeben:\\n\");\n\tscanf(\"%d\",&element);\n\twhile (element > elementMax) {\n\t\tprintf(\"Anzahl der Elemente zu groß\\nNochmal eingeben\\n\");\n\t\tscanf(\"%d\", &element);\n\t}\t\t\n\tfor (i = 0; i < element; i++) {\n\t\tprintf(\"%d-tes x-element eingeben:\\n\", i+1);\n\t\tscanf(\"%d\", &ai);\n\t\tprintf(\"%d-tes y-element eingeben:\\n\", i+1);\n\t\tscanf(\"%d\", &bi);\n\n\t\t// calculate\n\t\tsProdukt += ai*bi;\n\t}\n\n\tprintf(\"Das Skalarprodukt X*Y = %d\\n\", sProdukt);\n\treturn 0;\n\t\n}\n\n" } ]
5
bugpoop/tatketcapbot
https://github.com/bugpoop/tatketcapbot
396b8c47a031d8f4d43a34e285c6feb82053fbdb
117e41152caa83c709bf186bf8b610f3a8a3dcbf
1bb7d9293dc54a13c999c6827f040447e0151167
refs/heads/main
2023-03-28T04:55:19.910238
2021-03-22T10:01:54
2021-03-22T10:01:54
349,006,489
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8275862336158752, "alphanum_fraction": 0.8275862336158752, "avg_line_length": 42.5, "blob_id": "cacc057744cd4440ecf7fbd95516c71511438d56", "content_id": "ce046c10c91a2af2d97d578e59b7c6cdf93bbb69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 87, "license_type": "no_license", "max_line_length": 71, "num_lines": 2, "path": "/README.md", "repo_name": "bugpoop/tatketcapbot", "src_encoding": "UTF-8", "text": "# tatketcapbot\nA telegram bot that automatically updates you on bitcoin price changes.\n" }, { "alpha_fraction": 0.590512752532959, "alphanum_fraction": 0.5967910885810852, "avg_line_length": 32.93902587890625, "blob_id": "ac3c2d616cd14babe2a3e8b7b1b0116be4ff03e9", "content_id": "825599995047f455a3eaf5868058b9bd0bb94908", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2867, "license_type": "no_license", "max_line_length": 107, "num_lines": 82, "path": "/main.py", "repo_name": "bugpoop/tatketcapbot", "src_encoding": "UTF-8", "text": "from selenium import webdriver\r\nimport time\r\nfrom telegram.ext import Updater, CommandHandler\r\nimport logging\r\nlogging.basicConfig(level=logging.DEBUG,\r\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n\r\n\r\ndef start(update, context):\r\n \"\"\"Send a message when the command /start is issued.\"\"\"\r\n update.message.reply_text('initiating the cum')\r\n driver = webdriver.Firefox()\r\n\r\n while True:\r\n\r\n driver.get(\"https://www.google.com/search?client=firefox-b-d&q=btc+to+usd\")\r\n price = driver.find_element_by_css_selector(\".SwHCTb\").text\r\n price = price.replace(\".\", \"\")\r\n price = price.replace(\",\", \"\")\r\n price = float(price)\r\n\r\n while True:\r\n time.sleep(60)\r\n driver.get(\"https://www.google.com/search?client=firefox-b-d&q=btc+to+usd\")\r\n current = driver.find_element_by_css_selector(\".SwHCTb\").text\r\n current = current.replace(\".\", \"\")\r\n current = current.replace(\",\", \"\")\r\n current = float(current)\r\n\r\n if current > price + price*(5/1000):\r\n current = int(current)\r\n update.message.reply_text(f\"BTC is up by more than %0.5. Current price is {current} USD\")\r\n break\r\n if current < price - price*(2/1000):\r\n current = int(current)\r\n update.message.reply_text(f\"BTC is down by more than %0.2. Current price is {current} USD\")\r\n break\r\n else:\r\n print(\"no change\")\r\n break\r\n\r\n\r\ndef btc(update, context):\r\n driver = webdriver.Firefox()\r\n driver.get(\"https://www.google.com/search?client=firefox-b-d&q=btc+to+usd\")\r\n price = driver.find_element_by_css_selector(\".SwHCTb\").text\r\n price = price.replace(\".\", \"\")\r\n price = price.replace(\",\", \"\")\r\n price = float(price)\r\n update.message.reply_text(f\"Current BTC price is {price} USD\")\r\n\r\n\r\ndef main():\r\n \"\"\"Start the bot.\"\"\"\r\n # Create the Updater and pass it your bot's token.\r\n # Make sure to set use_context=True to use the new context based callbacks\r\n # Post version 12 this will no longer be necessary\r\n updater = Updater(\"TOKEN\", use_context=True)\r\n\r\n # Get the dispatcher to register handlers\r\n dp = updater.dispatcher\r\n\r\n # on different commands - answer in Telegram\r\n dp.add_handler(CommandHandler(\"start\", start))\r\n dp.add_handler(CommandHandler(\"help\", help))\r\n\r\n\r\n\r\n # log all errors\r\n dp.add_error_handler(logging.error)\r\n\r\n # Start the Bot\r\n updater.start_polling()\r\n\r\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\r\n # SIGTERM or SIGABRT. This should be used most of the time, since\r\n # start_polling() is non-blocking and will stop the bot gracefully.\r\n updater.idle()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n" } ]
2
Nagoudi/Parsing-the-Zahran-s-word2vec-model
https://github.com/Nagoudi/Parsing-the-Zahran-s-word2vec-model
8bc25f50f3940d5b5c367ac2293b45709556136a
d8ff353de07d05cdb4d29c2a13af6d77ba32cc07
4bbaa40067a049eabdaf161f6a8955cf2e61ac17
refs/heads/master
2020-06-14T16:29:27.854593
2019-07-03T13:25:02
2019-07-03T13:25:02
195,056,395
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.52484530210495, "alphanum_fraction": 0.5332833528518677, "avg_line_length": 30.916168212890625, "blob_id": "6ef8e42b5405dd59e8ccde396146ff5de591d0ff", "content_id": "d4b7d743e268ddeaa4867d804d863aed17cc86ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5347, "license_type": "no_license", "max_line_length": 232, "num_lines": 167, "path": "/parseMoi_Fast.py", "repo_name": "Nagoudi/Parsing-the-Zahran-s-word2vec-model", "src_encoding": "UTF-8", "text": "# encoding=utf8\n\n\nfrom scipy import spatial\nimport time\nimport math\nfrom optparse import OptionParser\nimport numpy as np\nfrom numpy import linalg as LA\nimport operator as op\nimport os, sys\n\ndef parse (VECTOR_PATH, WORD2VEC):\n print 'Start loading vectors ...'\n start_time = time.time();\n\n vecDic = {} \n vectors = None\n fin = open(VECTOR_PATH, \"rb\") \n header = fin.readline()\n vocab_size, vector_size = map(int, header.split())\n if(WORD2VEC): #for CBOW or Skip-gram\n binary_len = np.dtype(np.float32).itemsize * vector_size\n else: # for GloVe\n binary_len = np.dtype(np.float64).itemsize * vector_size\n\n for line_no in xrange(vocab_size): \n word = ''\n while True:\n ch = fin.read(1)\n if ch == ' ' and WORD2VEC == 1:\n break\n elif ch == '#' and WORD2VEC==0:\n\t\t\t\tbreak\n word += ch \n if(WORD2VEC): \n vector = np.fromstring(fin.read(binary_len), np.float32)\n else: \n vector = np.fromstring(fin.read(binary_len), np.float64) \n word = word.strip()\n vecDic[word] = vector \n print 'finished loading vectors ...'\n print(\"Number of words in the model : %s\" % (len(vecDic)))\n print(\"Time for loading the model : %s seconds\" % (time.time() - start_time))\n return vecDic\n\n\n\ndef readWordsFile(WordFilename):\n allWords=[]\n with open(WordFilename, 'r') as f:\n allWords = f.readlines()\n words = []\n for i in range(0, len(allWords)):\n if (allWords[i] != '\\n'):\n words.append(allWords[i].strip())\n return words\n\ndef writeResults(allRes, outputFile):\n f = open(outputFile, 'w')\n for word, similairWords in allRes.items():\n f.write(word + '\\n')\n f.write(\"--------------------------------------\" + '\\n')\n for i, e in enumerate(similairWords):\n lineFormat = \"{}-->( {} , {} )\"\n line = lineFormat.format(i + 1, e[0], 1 - e[1])\n f.write(line + '\\n')\n f.write(\"--------------------------------------\" + '\\n')\n f.close()\n\ndef Nplus_proche(wordFilename,Dictonary,N,outputFile):\n\n print 'Starting looking for similair words .......'\n start_time = time.time()\n words = readWordsFile(wordFilename)\n\n allWords=Dictonary.keys()\n allVectors=Dictonary.values()\n norm=LA.norm(allVectors, axis=1)\n allRes={}\n index = 1\n for word in words:\n print 'Word Number : %d'% index\n start_time_word = time.time()\n index =index+1\n if word in Dictonary:\n\n indexOfWord = allWords.index(word)\n vectWord =Dictonary[word]\n dots = np.dot(allVectors,vectWord)\n similarties=1-np.divide(dots,norm[indexOfWord]*norm)\n\n\n ind = np.argsort(similarties)\n ind = ind[1:(N+1)]\n result=zip([allWords[i] for i in ind], [similarties[i] for i in ind])\n\n allRes[word]= result\n\n print \"-------- %s sec--------\" % (time.time() - start_time_word)\n else:\n print (\"word don't exist \")\n\n writeResults(allRes, outputFile)\n print(\"Time of finding similairs words : %s seconds\" % (time.time() - start_time))\n print 'Done ...'\n return allRes\n\n\n\n\n\nif __name__=='__main__':\n\n args = [\"-m\", \"/home/facultemi/Desktop/Moatez files/ALL_NORM_PHRASES200_CBOW300_WIN5_NEG10_HS0_MIN10_SAMPLE\",\"-w\",\"/home/facultemi/Desktop/Moatez files/input.txt\",\"-n\",\"25\",\"-o\",\"/home/facultemi/Desktop/Moatez files/output.txt\"]\n\n parser = OptionParser()\n\n parser.add_option(\"-m\", \"--model\",\n action=\"store\",\n type=\"string\",\n dest=\"modelFilename\",\n help=\"The name of file of Word2vec Model\")\n parser.add_option(\"-w\", \"--word\",\n action=\"store\",\n type=\"string\",\n dest=\"WordFilename\",\n help=\"The name of text file that contains the used word\")\n parser.add_option(\"-n\", \"--number\",\n action=\"store\",\n type=\"string\",\n dest=\"N\",\n help=\"The nnumber of similair words\")\n parser.add_option(\"-o\", \"--output\",\n action=\"store\",\n type=\"string\",\n dest=\"outputFile\",\n help=\"The name of output text file\")\n\n (options, args) = parser.parse_args(args=None, values=None)\n\n error = False\n\n if ((options.modelFilename==None) or (options.WordFilename==None) or (options.N == None) or (options.outputFile == None)):\n print \"Invalid arguments use -h to view informations about arguments\"\n error=True\n\n if(error == False):\n vecDic= parse(options.modelFilename,1)\n res = Nplus_proche(options.WordFilename,vecDic,int(options.N),options.outputFile)\n\n\n\n print \"---------------------- Done ------------------------\"\n\n# W ='الاسلام‏'\n# V=Dictonary[W]\n# print np.dot(V, V)\n# W = 'ديننا'\n# V = Dictonary[W]\n# print np.dot(V, V)\n\n# dotsDic = dict(zip(dictionary.keys(),dots))\n# vecDic = {}\n\n# for key,vector in dictionary.iteritems():\n# vecDic[key]=1-(dotsDic[key] / (norm[key] * norm[word]))\n\n\n\n" }, { "alpha_fraction": 0.7631579041481018, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 37, "blob_id": "abe3e39964afc8660a092df6c2c92581359fdb08", "content_id": "898eb91b5b05ee65b6ecef22856e9f135f6123f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 38, "license_type": "no_license", "max_line_length": 37, "num_lines": 1, "path": "/README.md", "repo_name": "Nagoudi/Parsing-the-Zahran-s-word2vec-model", "src_encoding": "UTF-8", "text": "# Parsing-the-Zahran-s-word2vec-model\n" } ]
2
jonathanvanschenck/Monochromator-GUI
https://github.com/jonathanvanschenck/Monochromator-GUI
4afb0fa9bf6e621268b49048efc2a8ed05b8d532
2abd543b996b7377ab690cb68309e86ef6af3c57
12680bc530b76e0baaa2441a623f07234220cbfc
refs/heads/master
2020-07-11T11:19:18.172021
2020-07-01T22:06:34
2020-07-01T22:06:34
204,525,525
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7799353003501892, "alphanum_fraction": 0.7928802371025085, "avg_line_length": 43.14285659790039, "blob_id": "8c4aba1061ef42918d0c6670c52bfcaf431d1db6", "content_id": "907448bcc78d74641283cf5e8643bee045c0adbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 309, "license_type": "no_license", "max_line_length": 77, "num_lines": 7, "path": "/README.md", "repo_name": "jonathanvanschenck/Monochromator-GUI", "src_encoding": "UTF-8", "text": "# Monochromator-GUI\nGUI to load and calibrate a monochromator: based on a USB2000 ocean optics\nspectrometer and a Thorlabs APT translation stage.\n\n# Notes\nNote that you will need a copy of the Thorlabs APT translation stage library:\n`APT.dll`, which you can get from Thorlabs. Dump it into the top directory.\n" }, { "alpha_fraction": 0.5182367563247681, "alphanum_fraction": 0.5369945764541626, "avg_line_length": 38.15899658203125, "blob_id": "7fb83d1375c9a9a85fa373226a71cfc2d0191fe3", "content_id": "08351f47df1539ac125522ab2e34698620b13e0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9596, "license_type": "no_license", "max_line_length": 111, "num_lines": 239, "path": "/monochromator/calibrate.py", "repo_name": "jonathanvanschenck/Monochromator-GUI", "src_encoding": "UTF-8", "text": "#%%\r\nimport tkinter as tk\r\nimport os\r\nfrom time import sleep\r\nfrom tkinter import messagebox\r\nfrom tkinter import filedialog\r\nfrom spectrometer import Spectrometer\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\r\nfrom scipy.optimize import minimize\r\ndef gauss(x,p):\r\n return np.abs(p[0])+np.abs(p[1])*np.exp(-((x-p[2])/p[3])**2)\r\n\r\nclass Calibrate(tk.Frame):\r\n def __init__(self,monochromator):\r\n tk.Frame.__init__(self,None)\r\n self.master.title(\"Calibration Window\")\r\n self.master.protocol(\"WM_DELETE_WINDOW\", lambda : self.master.destroy())\r\n if not messagebox.askyesno(\"Title\",\"Create a new calibration file?\"):\r\n fname = filedialog.askopenfilename(title=\"Load Calibration File\",\r\n initialdir = os.getcwd(),\r\n filetypes = ((\"calibration files\",\"*.cal\"),(\"all files\",\"*.*\")))\r\n monochromator.load_calibration_points(fname)\r\n self.master.destroy()\r\n else:\r\n self.spec = Spectrometer()\r\n self.specRunning = True\r\n self.mono = monochromator\r\n self.mono.reset_calibration()\r\n self.create_widgets()\r\n self.start_aquisition()\r\n self.mainloop()\r\n self.spec.close()\r\n \r\n def create_widgets(self):\r\n # Create MPL Figure\r\n self.mpl = MPL(self.master,\r\n self.spec.wavelengths(),self.spec.intensities(),\r\n #np.arange(0,100,0.1),gauss(np.arange(0,100,0.1),[200,2700,40,5]),\r\n #self.spec.wavelengths,self.spec.intensities,\r\n column=0,row=2,columnspan=2)\r\n \r\n # Create Spectrometer control window\r\n self.specFrame = tk.LabelFrame(self.master,text=\"Spectrometer Controls\")\r\n self.specFrame.grid(column=0,row=0)\r\n self.ITLabel = tk.Label(self.specFrame,text=\"IT (ms)\")\r\n self.ITLabel.grid(column=0,row=0,sticky=tk.E)\r\n self.ITvariable = tk.StringVar()\r\n self.set_IT(20)\r\n self.ITEntry = tk.Entry(self.specFrame,\r\n textvariable=self.ITvariable,\r\n width=6)\r\n self.ITEntry.grid(column=1,row=0)\r\n self.ITUpdateButton = tk.Button(self.specFrame,text=\"Update\",\r\n command=lambda: self.set_IT(self.ITvariable.get()))\r\n self.ITUpdateButton.grid(column=2,row=0,sticky=tk.W)\r\n self.PPLabel = tk.Label(self.specFrame,text=\"Aquire:\")\r\n self.PPLabel.grid(column=0,row=1,sticky=tk.E)\r\n self.playButton = tk.Button(self.specFrame,text=\"Play\",\r\n command=lambda: self.start_aquisition())\r\n self.playButton.grid(column=1,row=1)\r\n self.pauseButton = tk.Button(self.specFrame,text=\"Pause\",\r\n command=lambda: self.stop_aquisition())\r\n self.pauseButton.grid(column=2,row=1)\r\n \r\n # Create calibration setup\r\n self.calFrame = tk.LabelFrame(self.master,text=\"Spectrometer Controls\")\r\n self.calFrame.grid(column=1,row=0)\r\n self.PosLabel = tk.Label(self.calFrame,text=\"Starting Position:\")\r\n self.PosLabel.grid(column=0,row=0,sticky=tk.E)\r\n self.Posvariable = tk.StringVar()\r\n self.set_Pos(self.mono.lower_bound)\r\n self.PosEntry = tk.Entry(self.calFrame,\r\n textvariable=self.Posvariable,\r\n width=6)\r\n self.PosEntry.grid(column=1,row=0)\r\n self.PosUpdateButton = tk.Button(self.calFrame,text=\"Move\",\r\n command=lambda: self.set_Pos(self.Posvariable.get()))\r\n self.PosUpdateButton.grid(column=2,row=0,sticky=tk.W)\r\n self.stepLabel = tk.Label(self.calFrame,text=\"Number of Steps:\")\r\n self.stepLabel.grid(column=0,row=1,sticky=tk.E)\r\n self.Stepvariable = tk.StringVar()\r\n self.Stepvariable.set(\"3\")\r\n self.StepEntry = tk.Entry(self.calFrame,\r\n textvariable=self.Stepvariable,\r\n width=6)\r\n self.StepEntry.grid(column=1,row=1)\r\n self.startCalButton = tk.Button(self.calFrame,\r\n text=\"Start Calibration\",\r\n command = lambda: self.start_calibration())\r\n self.startCalButton.grid(column=0,row=2)\r\n self.nextButton = tk.Button(self.calFrame,\r\n text=\"Next Position\",\r\n command = lambda: self.next_position())\r\n self.nextButton.grid(column=1,row=2)\r\n self.nextButton.config(state='disabled')\r\n \r\n \r\n def set_IT(self,IT):\r\n try:\r\n it = int(IT)*1000\r\n except:\r\n it = 100*1000\r\n if it<10*1000:\r\n it = 10*1000\r\n elif it>10*1000*1000:\r\n it = 10*1000*1000\r\n self.spec.integration_time_micros(it)\r\n self.ITvariable.set(str(it//1000))\r\n self.mpl.update_spectrum(self.spec.intensities())\r\n \r\n def set_Pos(self,POS):\r\n try:\r\n pos = int(POS)\r\n except:\r\n pos = 100\r\n if pos<0:\r\n pos = 0\r\n elif pos>150:\r\n pos = 150\r\n self.mono.set_lower_bound(pos)\r\n self.Posvariable.set(str(pos))\r\n self.mono.move(self.mono.lower_bound)\r\n \r\n def start_aquisition(self):\r\n self.specRunning = True\r\n self.aquire()\r\n \r\n def aquire(self):\r\n# y = self.mpl.spectrum.get_ydata()\r\n self.mpl.update_spectrum(self.spec.intensities())#(0.99*y)\r\n if self.specRunning:\r\n self.master.after(0,self.aquire)\r\n \r\n def stop_aquisition(self):\r\n self.specRunning = False\r\n \r\n def start_calibration(self):\r\n self.stop_aquisition()\r\n self.playButton.config(state=\"disabled\")\r\n self.pauseButton.config(state=\"disabled\")\r\n self.PosUpdateButton.config(state=\"disabled\")\r\n self.startCalButton.config(state=\"disabled\")\r\n self.nextButton.config(state='normal')\r\n try:\r\n n = int(self.Stepvariable.get())\r\n except:\r\n n = 5\r\n if n<2:\r\n n = 2\r\n elif n>10:\r\n n = 10\r\n self.mmSpace = list(self.mono.lower_bound-np.linspace(5,31-4,n))\r\n self.mono.move(self.mmSpace.pop(0))\r\n sleep(0.1)\r\n self.mpl.update_spectrum(self.spec.intensities())\r\n self.mpl.gen_fit()\r\n \r\n def next_position(self):\r\n self.mono.add_point(self.mono.mot.getPos(),*self.mpl.p[-2:])\r\n try:\r\n mm = self.mmSpace.pop(0)\r\n except IndexError:\r\n self.save_calibration_file()\r\n self.master.destroy()\r\n else:\r\n self.mono.move(mm)\r\n sleep(0.1)\r\n self.mpl.update_spectrum(self.spec.intensities())\r\n self.mpl.gen_fit()\r\n \r\n \r\n def save_calibration_file(self):\r\n path = filedialog.askdirectory(initialdir = os.getcwd(),\r\n title= \"Calibration File Directory\")\r\n self.mono.save_calibration_points(path)\r\n \r\n \r\nclass MPL:\r\n def __init__(self,master,x,y,p=[0,0,500,5],**kwargs):\r\n self.x = x\r\n self.p = np.array(p)\r\n \r\n # Create tk Frame to hold MPL plot\r\n self.frame = tk.Frame(master)\r\n self.frame.grid(**kwargs)\r\n \r\n # Create MPL figure\r\n self.fig = plt.figure(figsize=(10,5))\r\n self.ax = self.fig.add_subplot(111)\r\n self.spectrum, = self.ax.plot(x,y,color=\"blue\")\r\n self.ax.set_xlabel(\"Wavelength (nm)\")\r\n self.ax.set_ylabel(\"Counts\")\r\n self.ax.set_ylim(0,4000)\r\n \r\n # Attached MPL figure and toolbar to tk Frame\r\n self.canvas = FigureCanvasTkAgg(self.fig,self.frame)\r\n self.canvas.get_tk_widget().pack()\r\n self.toolbar = NavigationToolbar2Tk(self.canvas,self.frame)\r\n self.toolbar.update()\r\n \r\n # initialize fit\r\n self.fit, = self.ax.plot(x,gauss(x,self.p),color=\"black\")\r\n \r\n # Setup MPL click collbacks\r\n self.canvas.mpl_connect('button_press_event',self.click)\r\n \r\n def click(self,event):\r\n if event.inaxes == self.ax:\r\n if event.button == 1:\r\n print(\"Left click @ x=\",event.xdata,\" y=\",event.ydata)\r\n self.p[1],self.p[2] = event.ydata,event.xdata\r\n self.update_fit()\r\n if event.button == 2:\r\n print(\"Scroll click @ x=\",event.xdata,\" y=\",event.ydata)\r\n if event.button == 3:\r\n print(\"Right click @ x=\",event.xdata,\" y=\",event.ydata)\r\n self.gen_fit()\r\n \r\n def update_fit(self):\r\n self.fit.set_ydata(gauss(self.x,self.p))\r\n self.fig.canvas.draw()\r\n \r\n def update_spectrum(self,y):\r\n self.spectrum.set_ydata(y)\r\n self.fig.canvas.draw()\r\n \r\n def gen_fit(self):\r\n y = self.spectrum.get_ydata()\r\n x0 = self.x[np.argmax(y)]\r\n y0 = np.max(y)\r\n mask = np.array(np.abs(self.x-x0)<50)\r\n def diff(p):\r\n return np.sum((y[mask]-gauss(self.x,p)[mask])**2)\r\n fit = minimize(diff,[y[0],y0,x0,1])\r\n# print(fit)\r\n self.p = np.copy(fit.x)\r\n self.update_fit()" }, { "alpha_fraction": 0.8524590134620667, "alphanum_fraction": 0.8524590134620667, "avg_line_length": 19.33333396911621, "blob_id": "050c7e16c40f8c2ac09a1d58f555b64903240e41", "content_id": "ba5d0eae96d57e9c9703e0deb4c1e79361dde205", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 61, "license_type": "no_license", "max_line_length": 21, "num_lines": 3, "path": "/monochromator/__init__.py", "repo_name": "jonathanvanschenck/Monochromator-GUI", "src_encoding": "UTF-8", "text": "import .calibrate\nimport .monochromator\nimport .spectrometer\n" }, { "alpha_fraction": 0.7490347623825073, "alphanum_fraction": 0.7490347623825073, "avg_line_length": 17.923076629638672, "blob_id": "4c1a7017a869f53cb280231bb81a62e755c04986", "content_id": "14ec11aa2180e4056a4fe0bcb91ff6b96f22d97b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 259, "license_type": "no_license", "max_line_length": 53, "num_lines": 13, "path": "/example.py", "repo_name": "jonathanvanschenck/Monochromator-GUI", "src_encoding": "UTF-8", "text": "#%%\r\nfrom monochromator.monochromator import Monochromator\r\nfrom monochromator.calibrate import Calibrate\r\n\r\n# Instantiate monochromator instance\r\ntry:\r\n mono.close()\r\nexcept:\r\n pass\r\nmono = Monochromator()\r\n\r\n# Launch calibration GUI\r\nCalibrate(mono)\r\n" }, { "alpha_fraction": 0.582880437374115, "alphanum_fraction": 0.5874999761581421, "avg_line_length": 36.33333206176758, "blob_id": "22f4e69e0cd9a42f05dbc87f95835c0cef4b19dd", "content_id": "d0245924b68dc91918783ca5ea2b69ba87fd4340", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3680, "license_type": "no_license", "max_line_length": 101, "num_lines": 96, "path": "/monochromator/spectrometer.py", "repo_name": "jonathanvanschenck/Monochromator-GUI", "src_encoding": "UTF-8", "text": "\"\"\"Allows pyseabreeze spectrometers to be loaded via a popup window\r\n---Classes---\r\nselectionBox:\r\n A tk widget to hold a list of tk.Radiobuttons which all refer to the same variable\r\n \r\nSpectrometer:\r\n A wrapper for the seabreeze.spectrometer.Spectrometer class, which automatically\r\n searches for available OceanOptics spectrometers. If multiple devices (or no\r\n devices) are available, the software launches a tk window to list the options.\r\n \r\nCreated by: Jonathan D. B. Van Schenck\r\n\"\"\"\r\n\r\n#%%\r\nimport tkinter as tk\r\nimport seabreeze.spectrometers as sb\r\n#%%\r\nclass selectionBox(tk.LabelFrame):\r\n '''Container for associated tk.Radiobuttons\r\n ---Initialization Parameters---\r\n master: tk.Frame instance into which the widget will be created\r\n variable: The underlying tk variable which all the Radiobuttons \r\n will be attached to\r\n valueList: List of possible values for which tk.Radiobuttons will\r\n be created\r\n label: Optional Label for the tk.LabelFrame which wraps the radiobuttons\r\n textList: Optional list of labels to represent each valueList (must be either\r\n the same length as valueList, or None).\r\n ---Variables---\r\n variable:\r\n The underlying tk variable which all the Radiobuttons will be attached to\r\n \r\n RBList:\r\n List to hold each tk.Radiobutton instance\r\n \r\n ---Methods---\r\n gen_list:\r\n Generates and packs the tk.Radiobuttons into a tk.LabelFrame\r\n \r\n '''\r\n def __init__(self,master,variable,valueList,label=\"\",textList=None):\r\n tk.LabelFrame.__init__(self,master,text=label)\r\n self.variable = variable\r\n self.RBList = []\r\n self.gen_list(valueList,textList)\r\n \r\n \r\n def gen_list(self,valueList,textList=None):\r\n for rb in self.RBList:\r\n rb.destroy()\r\n if textList is None:\r\n tL = [str(v) for v in valueList]\r\n else:\r\n tL = textList\r\n self.RBList = [tk.Radiobutton(self,text=t,variable=self.variable,value=v,indicatoron=0)\\\r\n for t,v in zip(tL,valueList)]\r\n for i,rb in enumerate(self.RBList):\r\n rb.grid(column=0,row=i)\r\n \r\n\r\n\r\nclass Spectrometer(sb.Spectrometer):\r\n \"\"\"Wrapper for seabreeze.spectrometer.Spectrometer class with smart inialization and popup window\r\n ---Initialization Variables---\r\n \r\n ---Variables---\r\n \r\n ---Methods---\r\n \"\"\"\r\n def __init__(self):\r\n def scan():\r\n return sb.list_devices()\r\n deviceList = scan()\r\n if len(deviceList) == 1:\r\n sb.Spectrometer.__init__(self,deviceList[0])\r\n else:\r\n root = tk.Tk()\r\n root.title(\"Spectrometer Selection\")\r\n root.geometry(\"200x200\")\r\n d = tk.StringVar()\r\n buttonList = selectionBox(root,d,deviceList,label=\"Select Spectrometer\")\r\n buttonList.grid(column=0,row=1,columnspan=2)\r\n def rescan(buttonList):\r\n deviceList = scan()\r\n buttonList.gen_list(deviceList)\r\n tk.Button(root,text=\"Rescan\",command= lambda : rescan(buttonList)).grid(column=0,row=0)\r\n def load():\r\n try:\r\n sb.Spectrometer.__init__(self,d.get())\r\n except:\r\n print(\"Problem loading device \\'%s\\', try again\" % d.get())\r\n else:\r\n root.destroy()\r\n tk.Button(root,text=\"Load\",command=load).grid(column=1,row=0)\r\n root.protocol(\"WM_DELETE_WINDOW\", lambda : root.destroy())\r\n root.mainloop()\r\n" }, { "alpha_fraction": 0.5600548386573792, "alphanum_fraction": 0.5766910314559937, "avg_line_length": 37.709617614746094, "blob_id": "e1f6f63ab321c82a40d05874b5e3aac08b4346dd", "content_id": "ae6fe826acc3215155405b4cc980731e2593f6b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21880, "license_type": "no_license", "max_line_length": 230, "num_lines": 551, "path": "/monochromator/monochromator.py", "repo_name": "jonathanvanschenck/Monochromator-GUI", "src_encoding": "UTF-8", "text": "#%%\r\nimport tkinter as tk\r\nimport os\r\nfrom time import sleep\r\nfrom tkinter import messagebox\r\nfrom tkinter import filedialog\r\nimport seabreeze.spectrometers as sb\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\r\nfrom scipy.optimize import minimize\r\n\r\ndef gauss(x,p):\r\n return np.abs(p[0])+np.abs(p[1])*np.exp(-((x-p[2])/p[3])**2)\r\n\r\nfrom time import localtime\r\ndef today():\r\n t = localtime()\r\n return \"{0}{1:0>2}{2:0>2}-{3:0>2}{4:0>2}{5:0>2}\".format(str(t.tm_year)[-2:],t.tm_mon,t.tm_mday,t.tm_hour,t.tm_min,t.tm_sec)\r\nfrom ctypes import c_long, c_float, windll, pointer\r\n\r\nclass APTMotor():\r\n def __init__(self,SerialNum=None, HWTYPE=31, loc='', verbose=False, dllname='APT.dll'):\r\n '''\r\n HWTYPE_BSC001\t\t11\t// 1 Ch benchtop stepper driver\r\n HWTYPE_BSC101\t\t12\t// 1 Ch benchtop stepper driver\r\n HWTYPE_BSC002\t\t13\t// 2 Ch benchtop stepper driver\r\n HWTYPE_BDC101\t\t14\t// 1 Ch benchtop DC servo driver\r\n HWTYPE_SCC001\t\t21\t// 1 Ch stepper driver card (used within BSC102,103 units)\r\n HWTYPE_DCC001\t\t22\t// 1 Ch DC servo driver card (used within BDC102,103 units)\r\n HWTYPE_ODC001\t\t24\t// 1 Ch DC servo driver cube\r\n HWTYPE_OST001\t\t25\t// 1 Ch stepper driver cube\r\n HWTYPE_MST601\t\t26\t// 2 Ch modular stepper driver module\r\n HWTYPE_TST001\t\t29\t// 1 Ch Stepper driver T-Cube\r\n HWTYPE_TDC001\t\t31\t// 1 Ch DC servo driver T-Cube\r\n HWTYPE_LTSXXX\t\t42\t// LTS300/LTS150 Long Travel Integrated Driver/Stages\r\n HWTYPE_L490MZ\t\t43\t// L490MZ Integrated Driver/Labjack\r\n HWTYPE_BBD10X\t\t44\t// 1/2/3 Ch benchtop brushless DC servo driver\r\n '''\r\n\r\n self.verbose = verbose\r\n self.Connected = False\r\n if not os.path.exists(loc+dllname):\r\n print(\"ERROR: DLL not found\")\r\n self.aptdll = windll.LoadLibrary(loc+dllname)\r\n self.aptdll.EnableEventDlg(True)\r\n self.aptdll.APTInit()\r\n self.HWType = c_long(HWTYPE)\r\n self.blCorr = 0.10 #100um backlash correction\r\n if SerialNum is not None:\r\n if self.verbose: print(\"Serial is\", SerialNum)\r\n self.SerialNum = c_long(SerialNum)\r\n self.initializeHardwareDevice()\r\n # TODO : Error reporting to know if initialisation went sucessfully or not.\r\n else:\r\n if self.verbose: print(\"No serial, please setSerialNumber\")\r\n\r\n def getNumberOfHardwareUnits(self):\r\n '''\r\n Returns the number of HW units connected that are available to be interfaced\r\n '''\r\n numUnits = c_long()\r\n self.aptdll.GetNumHWUnitsEx(self.HWType, pointer(numUnits))\r\n return numUnits.value\r\n\r\n def initializeHardwareDevice(self):\r\n '''\r\n Initialises the motor.\r\n You can only get the position of the motor and move the motor after it has been initialised.\r\n Once initiallised, it will not respond to other objects trying to control it, until released.\r\n '''\r\n if self.verbose: print('initializeHardwareDevice serial', self.SerialNum)\r\n result = self.aptdll.InitHWDevice(self.SerialNum)\r\n if result == 0:\r\n self.Connected = True\r\n if self.verbose: print('initializeHardwareDevice connection SUCESS')\r\n # need some kind of error reporting here\r\n else:\r\n raise Exception('Connection Failed. Check Serial Number!')\r\n return True\r\n\r\n '''\r\n Controlling the motors\r\n m = move\r\n c = controlled velocity\r\n b = backlash correction\r\n\r\n Rel = relative distance from current position.\r\n Abs = absolute position\r\n '''\r\n def getPos(self):\r\n '''\r\n Obtain the current absolute position of the stage\r\n '''\r\n if self.verbose: print('getPos probing...')\r\n if not self.Connected:\r\n raise Exception('Please connect first! Use initializeHardwareDevice')\r\n\r\n position = c_float()\r\n self.aptdll.MOT_GetPosition(self.SerialNum, pointer(position))\r\n if self.verbose: print('getPos ', position.value)\r\n return position.value\r\n\r\n def mRel(self, relDistance):\r\n '''\r\n Moves the motor a relative distance specified\r\n relDistance float Relative position desired\r\n '''\r\n if self.verbose: print('mRel ', relDistance, c_float(relDistance))\r\n if not self.Connected:\r\n print('Please connect first! Use initializeHardwareDevice')\r\n #raise Exception('Please connect first! Use initializeHardwareDevice')\r\n relativeDistance = c_float(relDistance)\r\n self.aptdll.MOT_MoveRelativeEx(self.SerialNum, relativeDistance, True)\r\n if self.verbose: print('mRel SUCESS')\r\n return True\r\n\r\n def mAbs(self, absPosition):\r\n '''\r\n Moves the motor to the Absolute position specified\r\n absPosition float Position desired\r\n '''\r\n if self.verbose: print('mAbs ', absPosition, c_float(absPosition))\r\n if not self.Connected:\r\n raise Exception('Please connect first! Use initializeHardwareDevice')\r\n absolutePosition = c_float(absPosition)\r\n self.aptdll.MOT_MoveAbsoluteEx(self.SerialNum, absolutePosition, True)\r\n if self.verbose: print('mAbs SUCESS')\r\n return True\r\n\r\n def mcRel(self, relDistance, moveVel=0.5):\r\n '''\r\n Moves the motor a relative distance specified at a controlled velocity\r\n relDistance float Relative position desired\r\n moveVel float Motor velocity, mm/sec\r\n '''\r\n if self.verbose: print('mcRel ', relDistance, c_float(relDistance), 'mVel', moveVel)\r\n if not self.Connected:\r\n raise Exception('Please connect first! Use initializeHardwareDevice')\r\n # Save velocities to reset after move\r\n maxVel = self.getVelocityParameterLimits()[1]\r\n # Set new desired max velocity\r\n self.setVel(moveVel)\r\n self.mRel(relDistance)\r\n self.setVel(maxVel)\r\n if self.verbose: print('mcRel SUCESS')\r\n return True\r\n\r\n def mcAbs(self, absPosition, moveVel=0.5):\r\n '''\r\n Moves the motor to the Absolute position specified at a controlled velocity\r\n absPosition float Position desired\r\n moveVel float Motor velocity, mm/sec\r\n '''\r\n if self.verbose: print('mcAbs ', absPosition, c_float(absPosition), 'mVel', moveVel)\r\n if not self.Connected:\r\n raise Exception('Please connect first! Use initializeHardwareDevice')\r\n # Save velocities to reset after move\r\n minVel, acc, maxVel = self.getVelocityParameters()\r\n # Set new desired max velocity\r\n self.setVel(moveVel)\r\n self.mAbs(absPosition)\r\n self.setVel(maxVel)\r\n if self.verbose: print('mcAbs SUCESS')\r\n return True\r\n\r\n def mbRel(self, relDistance):\r\n '''\r\n Moves the motor a relative distance specified\r\n relDistance float Relative position desired\r\n '''\r\n if self.verbose: print('mbRel ', relDistance, c_float(relDistance))\r\n if not self.Connected:\r\n print('Please connect first! Use initializeHardwareDevice')\r\n #raise Exception('Please connect first! Use initializeHardwareDevice')\r\n self.mRel(relDistance-self.blCorr)\r\n self.mRel(self.blCorr)\r\n if self.verbose: print('mbRel SUCESS')\r\n return True\r\n\r\n def mbAbs(self, absPosition):\r\n '''\r\n Moves the motor to the Absolute position specified\r\n absPosition float Position desired\r\n '''\r\n if self.verbose: print('mbAbs ', absPosition, c_float(absPosition))\r\n if not self.Connected:\r\n raise Exception('Please connect first! Use initializeHardwareDevice')\r\n if (absPosition < self.getPos()):\r\n if self.verbose: print('backlash mAbs', absPosition - self.blCorr)\r\n self.mAbs(absPosition-self.blCorr)\r\n self.mAbs(absPosition)\r\n if self.verbose: print('mbAbs SUCESS')\r\n return True\r\n\r\n\r\n def go_home(self):\r\n '''\r\n Move the stage to home position and reset position entry\r\n '''\r\n if self.verbose: print('Going home')\r\n if not self.Connected:\r\n raise Exception('Please connect first! Use initializeHardwareDevice')\r\n if self.verbose: print('go_home SUCESS')\r\n self.aptdll.MOT_MoveHome(self.SerialNum)\r\n return True\r\n\r\n\r\n def cleanUpAPT(self):\r\n '''\r\n Releases the APT object\r\n Use when exiting the program\r\n '''\r\n self.aptdll.APTCleanUp()\r\n if self.verbose: print('APT cleaned up')\r\n self.Connected = False\r\n\r\n\r\nclass Monochromator:\r\n def __init__(self,reset=True,SerialNum=20808447, HWTYPE=13, loc='C:/Users/vanschej/Documents/Python Scripts/PyAPT/',verbose=False, dllname='APT.dll'):\r\n self.mot = APTMotor(SerialNum=SerialNum, HWTYPE=HWTYPE, loc=loc,verbose=verbose, dllname=dllname)\r\n self.reset_calibration()\r\n self.set_lower_bound(10)\r\n if reset:\r\n self.go_home()\r\n\r\n def go_home(self):\r\n self.mot.go_home()\r\n self.move(self.lower_bound+5)\r\n\r\n def move(self,mm):\r\n# print(mm)\r\n self.mot.mbAbs(mm)\r\n\r\n def set_lower_bound(self,mm):\r\n self.lower_bound = mm\r\n\r\n def reset_calibration(self):\r\n self.__calibration = [[],[],[]]\r\n\r\n def add_point(self,pos,wave,fwhm):\r\n self.__calibration[0].append(pos)\r\n self.__calibration[1].append(wave)\r\n self.__calibration[2].append(fwhm)\r\n\r\n def create_calibration(self):\r\n self.__b=np.sum((np.array(self.__calibration[1])-np.mean(self.__calibration[1]))*(np.array(self.__calibration[0])-np.mean(self.__calibration[0])))/np.sum((np.array(self.__calibration[1])-np.mean(self.__calibration[1]))**2)\r\n self.__a=np.mean(self.__calibration[0])-self.__b*np.mean(self.__calibration[1])\r\n self.__monoBound = [np.ceil((self.lower_bound-self.__a)/self.__b),np.floor((self.lower_bound-31-self.__a)/self.__b)]\r\n\r\n def save_calibration_points(self,path_to_folder):\r\n self.create_calibration()\r\n oldD = os.getcwd()\r\n os.chdir(path_to_folder)\r\n f = open(today()+\".cal\",\"w\")\r\n for c in self.__calibration:\r\n f.write(\",\".join([str(cc) for cc in c])+\"\\n\")\r\n f.write(\"{0},{1},{2},{3}\\n\".format(self.__b,self.__a,*self.__monoBound))\r\n f.close()\r\n os.chdir(oldD)\r\n\r\n def load_calibration_points(self,file):\r\n f = open(file)\r\n calibrationPoints = [[float(ll) for ll in l.strip(\"\\n\").split(\",\")] for l in f]\r\n check_old = np.array(calibrationPoints.pop())\r\n self.reset_calibration()\r\n for p,w,f in zip(*calibrationPoints):\r\n self.add_point(p,w,f)\r\n self.create_calibration()\r\n check_new = np.append([self.__b,self.__a],self.__monoBound)\r\n return np.all(np.abs(check_old-check_new)/check_old < 0.1)\r\n\r\n def get_pos(self,lam):\r\n res = self.__a+self.__b*lam\r\n #assert res>=iniPos and res<=iniPos+31\r\n return(res)\r\n\r\n def go_to_wave(self,lam):\r\n self.move(self.get_pos(lam))\r\n\r\n def shutdown(self):\r\n self.mot.cleanUpAPT()\r\n\r\nclass Calibrate(tk.Frame):\r\n def __init__(self,monochromator):\r\n tk.Frame.__init__(self,None)\r\n self.master.title(\"Calibration Window\")\r\n self.master.protocol(\"WM_DELETE_WINDOW\", lambda : self.master.destroy())\r\n self.spec = Spectrometer()\r\n self.specRunning = True\r\n self.mono = monochromator\r\n self.mono.reset_calibration()\r\n self.create_widgets()\r\n self.start_aquisition()\r\n self.mainloop()\r\n self.spec.close()\r\n\r\n def create_widgets(self):\r\n # Create MPL Figure\r\n self.mpl = MPL(self.master,\r\n self.spec.wavelengths(),self.spec.intensities(),\r\n #np.arange(0,100,0.1),gauss(np.arange(0,100,0.1),[200,2700,40,5]),\r\n #self.spec.wavelengths,self.spec.intensities,\r\n column=0,row=2,columnspan=2)\r\n\r\n # Create Spectrometer control window\r\n self.specFrame = tk.LabelFrame(self.master,text=\"Spectrometer Controls\")\r\n self.specFrame.grid(column=0,row=0)\r\n self.ITLabel = tk.Label(self.specFrame,text=\"IT (ms)\")\r\n self.ITLabel.grid(column=0,row=0,sticky=tk.E)\r\n self.ITvariable = tk.StringVar()\r\n self.set_IT(20)\r\n self.ITEntry = tk.Entry(self.specFrame,\r\n textvariable=self.ITvariable,\r\n width=6)\r\n self.ITEntry.grid(column=1,row=0)\r\n self.ITUpdateButton = tk.Button(self.specFrame,text=\"Update\",\r\n command=lambda: self.set_IT(self.ITvariable.get()))\r\n self.ITUpdateButton.grid(column=2,row=0,sticky=tk.W)\r\n self.PPLabel = tk.Label(self.specFrame,text=\"Aquire:\")\r\n self.PPLabel.grid(column=0,row=1,sticky=tk.E)\r\n self.playButton = tk.Button(self.specFrame,text=\"Play\",\r\n command=lambda: self.start_aquisition())\r\n self.playButton.grid(column=1,row=1)\r\n self.pauseButton = tk.Button(self.specFrame,text=\"Pause\",\r\n command=lambda: self.stop_aquisition())\r\n self.pauseButton.grid(column=2,row=1)\r\n\r\n # Create calibration setup\r\n self.calFrame = tk.LabelFrame(self.master,text=\"Spectrometer Controls\")\r\n self.calFrame.grid(column=1,row=0)\r\n self.PosLabel = tk.Label(self.calFrame,text=\"Starting Position:\")\r\n self.PosLabel.grid(column=0,row=0,sticky=tk.E)\r\n self.Posvariable = tk.StringVar()\r\n self.set_Pos(self.mono.lower_bound)\r\n self.PosEntry = tk.Entry(self.calFrame,\r\n textvariable=self.Posvariable,\r\n width=6)\r\n self.PosEntry.grid(column=1,row=0)\r\n self.PosUpdateButton = tk.Button(self.calFrame,text=\"Move\",\r\n command=lambda: self.set_Pos(self.Posvariable.get()))\r\n self.PosUpdateButton.grid(column=2,row=0,sticky=tk.W)\r\n self.stepLabel = tk.Label(self.calFrame,text=\"Number of Steps:\")\r\n self.stepLabel.grid(column=0,row=1,sticky=tk.E)\r\n self.Stepvariable = tk.StringVar()\r\n self.Stepvariable.set(\"3\")\r\n self.StepEntry = tk.Entry(self.calFrame,\r\n textvariable=self.Stepvariable,\r\n width=6)\r\n self.StepEntry.grid(column=1,row=1)\r\n self.startCalButton = tk.Button(self.calFrame,\r\n text=\"Start Calibration\",\r\n command = lambda: self.start_calibration())\r\n self.startCalButton.grid(column=0,row=2)\r\n self.nextButton = tk.Button(self.calFrame,\r\n text=\"Next Position\",\r\n command = lambda: self.next_position())\r\n self.nextButton.grid(column=1,row=2)\r\n self.nextButton.config(state='disabled')\r\n\r\n\r\n def set_IT(self,IT):\r\n try:\r\n it = int(IT)*1000\r\n except:\r\n it = 100*1000\r\n if it<10*1000:\r\n it = 10*1000\r\n elif it>10*1000*1000:\r\n it = 10*1000*1000\r\n self.spec.integration_time_micros(it)\r\n self.ITvariable.set(str(it//1000))\r\n self.mpl.update_spectrum(self.spec.intensities())\r\n\r\n def set_Pos(self,POS):\r\n try:\r\n pos = int(POS)\r\n except:\r\n pos = 100\r\n if pos<0:\r\n pos = 0\r\n elif pos>150:\r\n pos = 150\r\n self.mono.set_lower_bound(pos)\r\n self.Posvariable.set(str(pos))\r\n self.mono.move(self.mono.lower_bound)\r\n\r\n def start_aquisition(self):\r\n self.specRunning = True\r\n self.aquire()\r\n\r\n def aquire(self):\r\n# y = self.mpl.spectrum.get_ydata()\r\n self.mpl.update_spectrum(self.spec.intensities())#(0.99*y)\r\n if self.specRunning:\r\n self.master.after(0,self.aquire)\r\n\r\n def stop_aquisition(self):\r\n self.specRunning = False\r\n\r\n def start_calibration(self):\r\n self.stop_aquisition()\r\n self.playButton.config(state=\"disabled\")\r\n self.pauseButton.config(state=\"disabled\")\r\n self.PosUpdateButton.config(state=\"disabled\")\r\n self.startCalButton.config(state=\"disabled\")\r\n self.nextButton.config(state='normal')\r\n try:\r\n n = int(self.Stepvariable.get())\r\n except:\r\n n = 5\r\n if n<2:\r\n n = 2\r\n elif n>10:\r\n n = 10\r\n self.mmSpace = list(self.mono.lower_bound-np.linspace(5,31-4,n))\r\n self.mono.move(self.mmSpace.pop(0))\r\n sleep(0.1)\r\n self.mpl.update_spectrum(self.spec.intensities())\r\n self.mpl.gen_fit()\r\n\r\n def next_position(self):\r\n self.mono.add_point(self.mono.mot.getPos(),*self.mpl.p[-2:])\r\n try:\r\n mm = self.mmSpace.pop(0)\r\n except IndexError:\r\n self.save_calibration_file()\r\n self.master.destroy()\r\n else:\r\n self.mono.move(mm)\r\n sleep(0.1)\r\n self.mpl.update_spectrum(self.spec.intensities())\r\n self.mpl.gen_fit()\r\n\r\n\r\n def save_calibration_file(self):\r\n path = filedialog.askdirectory(initialdir = os.getcwd(),\r\n title= \"Calibration File Directory\")\r\n self.mono.save_calibration_points(path)\r\n\r\n\r\nclass MPL:\r\n def __init__(self,master,x,y,p=[0,0,500,5],**kwargs):\r\n self.x = x\r\n self.p = np.array(p)\r\n\r\n # Create tk Frame to hold MPL plot\r\n self.frame = tk.Frame(master)\r\n self.frame.grid(**kwargs)\r\n\r\n # Create MPL figure\r\n self.fig = plt.figure(figsize=(10,5))\r\n self.ax = self.fig.add_subplot(111)\r\n self.spectrum, = self.ax.plot(x,y,color=\"blue\")\r\n self.ax.set_xlabel(\"Wavelength (nm)\")\r\n self.ax.set_ylabel(\"Counts\")\r\n self.ax.set_ylim(0,4000)\r\n\r\n # Attached MPL figure and toolbar to tk Frame\r\n self.canvas = FigureCanvasTkAgg(self.fig,self.frame)\r\n self.canvas.get_tk_widget().pack()\r\n self.toolbar = NavigationToolbar2Tk(self.canvas,self.frame)\r\n self.toolbar.update()\r\n\r\n # initialize fit\r\n self.fit, = self.ax.plot(x,gauss(x,self.p),color=\"black\")\r\n\r\n # Setup MPL click collbacks\r\n self.canvas.mpl_connect('button_press_event',self.click)\r\n\r\n def click(self,event):\r\n if event.inaxes == self.ax:\r\n if event.button == 1:\r\n print(\"Left click @ x=\",event.xdata,\" y=\",event.ydata)\r\n self.p[1],self.p[2] = event.ydata,event.xdata\r\n self.update_fit()\r\n if event.button == 2:\r\n print(\"Scroll click @ x=\",event.xdata,\" y=\",event.ydata)\r\n if event.button == 3:\r\n print(\"Right click @ x=\",event.xdata,\" y=\",event.ydata)\r\n self.gen_fit()\r\n\r\n def update_fit(self):\r\n self.fit.set_ydata(gauss(self.x,self.p))\r\n self.fig.canvas.draw()\r\n\r\n def update_spectrum(self,y):\r\n self.spectrum.set_ydata(y)\r\n self.fig.canvas.draw()\r\n\r\n def gen_fit(self):\r\n y = self.spectrum.get_ydata()\r\n x0 = self.x[np.argmax(y)]\r\n y0 = np.max(y)\r\n mask = np.array(np.abs(self.x-x0)<50)\r\n def diff(p):\r\n return np.sum((y[mask]-gauss(self.x,p)[mask])**2)\r\n fit = minimize(diff,[y[0],y0,x0,1])\r\n# print(fit)\r\n self.p = np.copy(fit.x)\r\n self.update_fit()\r\n\r\n#%\r\nclass selectionBox(tk.LabelFrame):\r\n def __init__(self,master,variable,valueList,label=\"\",textList=None):\r\n tk.LabelFrame.__init__(self,master,text=label)\r\n self.variable = variable\r\n self.RBList = []\r\n self.gen_list(valueList,textList)\r\n\r\n\r\n def gen_list(self,valueList,textList=None):\r\n for rb in self.RBList:\r\n rb.destroy()\r\n if textList is None:\r\n tL = [str(v) for v in valueList]\r\n else:\r\n tL = textList\r\n self.RBList = [tk.Radiobutton(self,text=t,variable=self.variable,value=v,indicatoron=0)\\\r\n for t,v in zip(tL,valueList)]\r\n for i,rb in enumerate(self.RBList):\r\n rb.grid(column=0,row=i)\r\n\r\nclass Spectrometer(sb.Spectrometer):\r\n def __init__(self):\r\n def scan():\r\n return sb.list_devices()\r\n deviceList = scan()\r\n if len(deviceList) == 1:\r\n sb.Spectrometer.__init__(self,deviceList[0])\r\n else:\r\n root = tk.Tk()\r\n root.title(\"Spectrometer Selection\")\r\n root.geometry(\"200x200\")\r\n d = tk.StringVar()\r\n buttonList = selectionBox(root,d,deviceList,label=\"Select Spectrometer\")\r\n buttonList.grid(column=0,row=1,columnspan=2)\r\n def rescan(buttonList):\r\n deviceList = scan()\r\n buttonList.gen_list(deviceList)\r\n tk.Button(root,text=\"Rescan\",command= lambda : rescan(buttonList)).grid(column=0,row=0)\r\n def load():\r\n try:\r\n sb.Spectrometer.__init__(self,d.get())\r\n except:\r\n print(\"Problem loading device \\'%s\\', try again\" % d.get())\r\n else:\r\n root.destroy()\r\n tk.Button(root,text=\"Load\",command=load).grid(column=1,row=0)\r\n root.protocol(\"WM_DELETE_WINDOW\", lambda : root.destroy())\r\n root.mainloop()\r\n" } ]
6
SiddhanthHegde/You-Need-to-Pay-More-Attention
https://github.com/SiddhanthHegde/You-Need-to-Pay-More-Attention
c9cde0e93d13d64784128fe168d0624dbc181b38
601cca190d52f9d83eb92924929831a146ed8904
5d1c92177388228c447f39e1e630c0ed7d9b8784
refs/heads/main
2023-03-09T04:53:04.914273
2021-03-05T15:43:51
2021-03-05T15:43:51
340,835,280
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6889660954475403, "alphanum_fraction": 0.6976541876792908, "avg_line_length": 29.3157901763916, "blob_id": "189dcd8a930059053a88c4351b9d2a211d0b56c5", "content_id": "f89ecccd81a3ee7364d454999b57275c5bd8845f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1151, "license_type": "no_license", "max_line_length": 88, "num_lines": 38, "path": "/splitAndMake.py", "repo_name": "SiddhanthHegde/You-Need-to-Pay-More-Attention", "src_encoding": "UTF-8", "text": "#This file creates a balanced split between the classes and makes it model feedable form\nimport os\nimport pandas as pd\nfrom zipfile import ZipFile\nfrom utils import move_data, split_data\n\nextract_path = 'training_img.zip'\nwith ZipFile(extract_path, 'r') as zipObj:\n zipObj.extractall()\n\nos.mkdir('Troll')\nos.mkdir('Non_troll')\nsrc = 'uploaded_tamil_memes'\nmove_data(src,'Troll','Non_troll')\n\nos.mkdir('Train')\nos.mkdir('Val')\nsplit_data('Troll','Train','Val',128)\nsplit_data('Non_troll','Train','Val',101)\n\ndf = pd.read_csv('train_captions.csv')\ndf.drop('Unnamed: 0',axis=1,inplace=True)\n\ntrain_df_data = []\nval_df_data = []\nfor img_name in os.listdir('Train'):\n ind = list(df[df['imagename'] == img_name].index)[0]\n train_df_data.append([img_name,df['captions'].iloc[ind]])\n\nfor img_name in os.listdir('Val'):\n ind = list(df[df['imagename'] == img_name].index)[0]\n val_df_data.append([img_name,df['captions'].iloc[ind]])\n\ntrain_df = pd.DataFrame(train_df_data,columns=['img_name','captions'])\nval_df = pd.DataFrame(val_df_data,columns=['img_name','captions'])\n\ntrain_df.to_csv('train_df.csv',index=False)\nval_df.to_csv('val_df.csv',index=False)" }, { "alpha_fraction": 0.6814229488372803, "alphanum_fraction": 0.7047430872917175, "avg_line_length": 24.565656661987305, "blob_id": "43fa23b0491a69c1eec251cdc28c0f013faceb75", "content_id": "04bb595cac3cde7e45ed12f16f9125457a2fbc9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2530, "license_type": "no_license", "max_line_length": 99, "num_lines": 99, "path": "/train.py", "repo_name": "SiddhanthHegde/You-Need-to-Pay-More-Attention", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nplt.style.use(\"ggplot\")\n\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\n\nimport timm\nfrom zipfile import ZipFile\nimport os\nimport time\nfrom shutil import copy2\nfrom torch.utils.data import DataLoader\nfrom transformers import AdamW,get_linear_schedule_with_warmup,AutoModel,AutoTokenizer\nfrom PIL import Image\nfrom collections import defaultdict\n\nfrom model import multimodal\nfrom dataset import create_data_loader\nfrom utils import train_epoch, eval_model, epoch_time\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nPRE_TRAINED_MODEL_NAME = 'bert-base-multilingual-cased'\ntokenizer = AutoTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)\n\nmy_trans = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), \n])\n\nBATCH_SIZE = 16\nMAX_LEN = 128\nEPOCHS = 4\nhistory = defaultdict(list)\nbest_accuracy = 0\nLOAD_MODEL = False\n\ntrain_data_loader = create_data_loader(train_df,tokenizer,MAX_LEN,BATCH_SIZE,my_trans,'Train',True)\nval_data_loader = create_data_loader(val_df,tokenizer,MAX_LEN,BATCH_SIZE,my_trans,'Val',False)\n\nmodel = multimodal()\nmodel = model.to(device)\n\noptimizer = AdamW(model.parameters(), lr=2e-5, correct_bias=False)\ntotal_steps = len(train_data_loader) * EPOCHS\nscheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=total_steps\n)\n\nloss = nn.BCEWithLogitsLoss().to(device)\n\nfor epoch in range(EPOCHS):\n \n \n start_time = time.time()\n train_acc,train_loss = train_epoch(\n model,\n train_data_loader,\n loss,\n optimizer,\n device,\n scheduler,\n 2071\n )\n \n \n val_acc,val_loss = eval_model(\n model,\n val_data_loader,\n loss,\n device,\n 229\n )\n \n end_time = time.time()\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')\n print(f'Train Loss {train_loss} accuracy {train_acc}')\n print(f'Val Loss {val_loss} accuracy {val_acc}')\n print()\n\n history['train_acc'].append(train_acc)\n history['train_loss'].append(train_loss)\n history['val_acc'].append(val_acc)\n history['val_loss'].append(val_loss)\n\nif history['val_acc'][-1] > 0.95:\n torch.save('vit-bert-1.0val.bin')\n\nif LOAD_MODEL:\n model.load_state_dict(torch.load('vit-bert-1.0val.bin'))" }, { "alpha_fraction": 0.6425339579582214, "alphanum_fraction": 0.6674208045005798, "avg_line_length": 31.77777862548828, "blob_id": "4f5583b80b5e5ceb7df5df87275e5f7f0d9fce37", "content_id": "ff02083d47233053ca9a5d7d11b3c0494420c743", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 884, "license_type": "no_license", "max_line_length": 73, "num_lines": 27, "path": "/model.py", "repo_name": "SiddhanthHegde/You-Need-to-Pay-More-Attention", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport timm\nfrom transformers import AutoModel \n\nclass multimodal(nn.Module):\n def __init__(self):\n super(multimodal, self).__init__()\n self.vit = timm.create_model(\"vit_base_patch16_224\", pretrained=True)\n self.bert = AutoModel.from_pretrained('bert-base-multilingual-cased')\n self.vit.head = nn.Linear(self.vit.head.in_features, 128)\n self.fc1 = nn.Linear(self.bert.config.hidden_size,128)\n self.relu = nn.ReLU()\n self.fc2 = nn.Linear(256,1)\n self.drop = nn.Dropout(p=0.2)\n\n def forward(self,input_ids, attention_mask, img):\n _, pooled_output = self.bert(\n input_ids = input_ids,\n attention_mask = attention_mask\n )\n text_out = self.fc1(pooled_output)\n img_out = self.vit(img)\n merged = torch.cat((text_out,img_out),1)\n act = self.relu(merged)\n out = self.drop(act)\n return self.fc2(out)" }, { "alpha_fraction": 0.7520259022712708, "alphanum_fraction": 0.7763370871543884, "avg_line_length": 50.41666793823242, "blob_id": "cdbef790d83bd40e3dec16fecb327a42daba3d9e", "content_id": "58c1c6bbbc724ef55a837647b58c6a29e0a84911", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 617, "license_type": "no_license", "max_line_length": 172, "num_lines": 12, "path": "/README.md", "repo_name": "SiddhanthHegde/You-Need-to-Pay-More-Attention", "src_encoding": "UTF-8", "text": "# You-Need-to-Pay-More-Attention\nThis repository is the source code for the paper \"UVCE-IIITT@DravidianLangTech-EACL2021: Tamil Troll Meme Classification: You need to Pay more Attention\"\n\nThis is a deep learning model for multimodal analysis of Tamil memes given the images and its captions. The main idea was to gain more attention and not use any CNN or RNN.\n\nSteps to run:\n\n1) Run the makeAndSplit.py to split the data\n2) Run the train.py to train the model\n3) Run the test.py for inference\n\n<img width = \"2406\" src = \"https://github.com/SiddhanthHegde/You-Need-to-Pay-More-Attention/blob/main/Meme%20img%20.jpg\">\n" }, { "alpha_fraction": 0.6039215922355652, "alphanum_fraction": 0.6065359711647034, "avg_line_length": 26.339284896850586, "blob_id": "492dbaca86b1a71dd910ef130e3315687e95c53b", "content_id": "fd7c4f5367dd41a63e15c1241fdceba318b3d165", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1530, "license_type": "no_license", "max_line_length": 82, "num_lines": 56, "path": "/dataset.py", "repo_name": "SiddhanthHegde/You-Need-to-Pay-More-Attention", "src_encoding": "UTF-8", "text": "import torch\nfrom PIL import Image\nfrom torch.utils.data import DataLoader\n\nclass TamilDataset(torch.utils.data.Dataset):\n def __init__(self,df,tokenizer,max_len,path,transforms=None):\n self.data_dir = path\n self.df = df\n self.tokenizer = tokenizer\n self.transforms = transforms\n self.max_len = max_len\n\n def __len__(self):\n return self.df.shape[0]\n\n def __getitem__(self,index):\n img_name, captions = self.df.iloc[index]\n img_path = os.path.join(self.data_dir,img_name)\n labels = 0 if img_name.startswith('N') else 1\n img = Image.open(img_path).convert('RGB')\n\n if self.transforms is not None:\n img = self.transforms(img)\n\n encoding = self.tokenizer.encode_plus(\n captions,\n add_special_tokens=True,\n max_length = self.max_len,\n return_token_type_ids = False,\n padding = 'max_length',\n return_attention_mask= True,\n return_tensors='pt',\n truncation=True\n )\n\n return {\n 'image' : img,\n 'text' : captions,\n 'input_ids' : encoding['input_ids'].flatten(),\n 'attention_mask' : encoding['attention_mask'].flatten(),\n 'label' : torch.tensor(labels,dtype=torch.float)\n } \n\ndef create_data_loader(df,tokenizer,max_len,batch_size,mytransforms,path,shuffle):\n ds = TamilDataset(\n df,\n tokenizer,\n max_len,\n path,\n mytransforms\n )\n\n return DataLoader(ds,\n batch_size = batch_size,\n shuffle=False,\n num_workers=4)" }, { "alpha_fraction": 0.5946372151374817, "alphanum_fraction": 0.6031545996665955, "avg_line_length": 30.396039962768555, "blob_id": "cf5e1af45bf542ae13ffd59459f7aca6f4de35b3", "content_id": "8cbcdeab7272015ca21acbe187c3a9b197f9c3fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3170, "license_type": "no_license", "max_line_length": 81, "num_lines": 101, "path": "/utils.py", "repo_name": "SiddhanthHegde/You-Need-to-Pay-More-Attention", "src_encoding": "UTF-8", "text": "import os\nfrom shutil import copy2\nimport time\nimport torch\n\ndef move_data(start,troll,not_troll):\n for img_name in os.listdir(start):\n src = os.path.join(start,img_name)\n if img_name.startswith('N'):\n copy2(src,not_troll)\n else:\n copy2(src,troll)\n\ndef split_data(start,train,val,split):\n for i, img_name in enumerate(os.listdir(start)):\n src = os.path.join(start,img_name)\n if i < split:\n copy2(src,val)\n else:\n copy2(src,train)\n\ndef epoch_time(start_time,end_time):\n\telapsed_time = end_time - start_time\n\telapsed_mins = int(elapsed_time/60)\n\telapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n\treturn elapsed_mins,elapsed_secs\n\ndef train_epoch(model,data_loader,loss_fn,optimizer,device,scheduler,n_examples):\n model = model.train()\n losses = []\n correct_predictions = 0\n\n for idx, data in enumerate(data_loader):\n\n input_ids = data['input_ids'].to(device)\n attention_mask = data['attention_mask'].to(device)\n labels = data['label'].to(device)\n labelsviewed = labels.view(labels.shape[0],1)\n image = data['image'].to(device)\n\n outputs = model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n img=image\n )\n preds = [0 if x < 0.5 else 1 for x in outputs]\n preds = torch.tensor(preds).to(device)\n loss = loss_fn(outputs,labelsviewed)\n\n correct_predictions += torch.sum(preds == labels)\n losses.append(loss.item())\n\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n\n return correct_predictions.double() / n_examples, np.mean(losses)\n\ndef eval_model(model, data_loader, loss_fn, device, n_examples):\n model = model.eval()\n losses = []\n correct_predictions = 0\n with torch.no_grad():\n for d in data_loader:\n input_ids = d[\"input_ids\"].to(device)\n attention_mask = d[\"attention_mask\"].to(device)\n labels = d[\"label\"].to(device)\n labelsviewed = labels.view(labels.shape[0],1)\n image = d['image'].to(device)\n outputs = model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n img=image\n )\n preds = [0 if x < 0.5 else 1 for x in outputs]\n preds = torch.tensor(preds).to(device)\n loss = loss_fn(outputs, labelsviewed)\n correct_predictions += torch.sum(preds == labels)\n losses.append(loss.item())\n return correct_predictions.double() / n_examples, np.mean(losses)\n\ndef get_predictions(model,data_loader, device):\n model = model.eval()\n f_preds = []\n with torch.no_grad():\n for d in data_loader:\n input_ids = d[\"input_ids\"].to(device)\n attention_mask = d[\"attention_mask\"].to(device)\n image = d['image'].to(device)\n outputs = model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n img=image\n )\n preds = ['Non-troll' if x < 0.5 else 'Troll' for x in outputs]\n for j in preds:\n f_preds.append(j)\n \n return f_preds" }, { "alpha_fraction": 0.7481590509414673, "alphanum_fraction": 0.754050076007843, "avg_line_length": 27.25, "blob_id": "8a9e98939d00341dfa12cdc47599e83eefc506bf", "content_id": "c26a137e0f199a4b29420ef18fdec162f75ae0ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 679, "license_type": "no_license", "max_line_length": 101, "num_lines": 24, "path": "/test.py", "repo_name": "SiddhanthHegde/You-Need-to-Pay-More-Attention", "src_encoding": "UTF-8", "text": "import torch\nimport pandas as pd\nfrom dataset import create_data_loader\nfrom model import multimodal\nfrom utils import get_predictions\nfrom zipfile import ZipFile\n\nLOAD_MODEL = True\ndevice = 'cuda'\n\nmodel = multimodal()\nmodel = model.to(device)\n\nif LOAD_MODEL:\n model.load_state_dict(torch.load('vit-bert-1.0val.bin'))\n\ndf_test = pd.read_csv('test_captions.csv')\ndf_test.drop('Unnamed: 0',axis=1,inplace=True)\nextract_path = 'test_img.zip'\nwith ZipFile(extract_path, 'r') as zipObj:\n zipObj.extractall()\n\ntest_data_loader = create_data_loader(df_test,tokenizer,MAX_LEN,BATCH_SIZE,my_trans,'test_img',False)\nsubmission_preds = get_predictions(model,test_data_loader,device)\n\n" } ]
7
a1600012888/AdversarialTraining
https://github.com/a1600012888/AdversarialTraining
6dcb34801f1f498a85c49e7d3a1be225e7d6ff7f
8893d1d02be571184d42d63a3ab677a7dd4db527
22addead5f5b0ce51dc10adb87a92868d935f5b4
refs/heads/master
2020-04-23T15:32:25.689941
2019-02-28T08:49:33
2019-02-28T08:49:33
171,269,134
4
0
null
null
null
null
null
[ { "alpha_fraction": 0.6163793206214905, "alphanum_fraction": 0.6551724076271057, "avg_line_length": 20.090909957885742, "blob_id": "f8378d732cc248e301f00fac80aadb17d9660a75", "content_id": "356bbc0551cf94f4ea367fe5021f21003770df22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 232, "license_type": "no_license", "max_line_length": 55, "num_lines": 11, "path": "/experiments/CIFAR10/trades.train/network.py", "repo_name": "a1600012888/AdversarialTraining", "src_encoding": "UTF-8", "text": "import config\nfrom base_model.cifar_resnet18 import cifar_resnet18\n\ndef create_network():\n net = cifar_resnet18(num_class = 10, expansion = 1)\n\n return net\n\nif __name__ == '__main__':\n net = create_network()\n print(net)\n" }, { "alpha_fraction": 0.6173070669174194, "alphanum_fraction": 0.6394876837730408, "avg_line_length": 36.22093200683594, "blob_id": "2d6a2ad8d74a2458ab6ce986f5d771c63cc25a31", "content_id": "26d8e634c7d6f18248cb973ac59caf79a87b2cf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3201, "license_type": "no_license", "max_line_length": 123, "num_lines": 86, "path": "/experiments/CIFAR10/cosine.adv.train/config.py", "repo_name": "a1600012888/AdversarialTraining", "src_encoding": "UTF-8", "text": "from easydict import EasyDict\nimport sys\nimport os\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\ndef add_path(path):\n if path not in sys.path:\n print('Adding {}'.format(path))\n sys.path.append(path)\n\nabs_current_path = os.path.realpath('./')\nroot_path = os.path.join('/', *abs_current_path.split(os.path.sep)[:-3])\nlib_dir = os.path.join(root_path, 'lib')\nadd_path(lib_dir)\n\nfrom training.config import TrainingConfigBase, SGDOptimizerMaker, \\\n PieceWiseConstantLrSchedulerMaker, IPGDAttackMethodMaker\n\n\nclass CosineClassificationLoss(torch.nn.modules.loss._Loss):\n def __init__(self, class_num = 10, reduction = 'mean'):\n super(CosineClassificationLoss, self).__init__()\n self.class_num = class_num\n self.reduction = reduction\n self.cosine_similarity = torch.nn.CosineSimilarity()\n\n def forward(self, pred, target):\n one_hot_target = torch.zeros_like(pred)\n one_hot_target[list(range(pred.size(0))), target] = 1\n minus_cosine_similarity = 1 - self.cosine_similarity(pred, one_hot_target)\n if self.reduction == 'mean':\n loss = torch.mean(minus_cosine_similarity)\n else:\n loss = torch.sum(minus_cosine_similarity)\n\n return loss\n\nclass TrainingConfing(TrainingConfigBase):\n\n lib_dir = lib_dir\n\n num_epochs = 180\n val_interval = 10\n\n create_optimizer = SGDOptimizerMaker(lr =1e-1, momentum = 0.9, weight_decay = 1e-4)\n create_lr_scheduler = PieceWiseConstantLrSchedulerMaker(milestones = [70, 120, 150], gamma = 0.1)\n\n create_loss_function = CosineClassificationLoss\n\n create_attack_method = \\\n IPGDAttackMethodMaker(eps = 8/255.0, sigma = 2/255.0, nb_iters = 10, norm = np.inf,\n mean = torch.tensor(np.array([0]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis]),\n std = torch.tensor(np.array([1]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis]))\n\n create_evaluation_attack_method = \\\n IPGDAttackMethodMaker(eps = 8/255.0, sigma = 2/255.0, nb_iters = 20, norm = np.inf,\n mean = torch.tensor(np.array([0]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis]),\n std = torch.tensor(np.array([1]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis]))\n\n\nconfig = TrainingConfing()\n\n\n# About data\n# C.inp_chn = 1\n# C.num_class = 10\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--resume', default=None, type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-b', '--batch_size', default=256, type=int,\n metavar='N', help='mini-batch size')\nparser.add_argument('-d', type=int, default=7, help='Which gpu to use')\nparser.add_argument('-freq', '--attack-interval', default=2, type = int,\n help = 'Specify how many iterations between two batch of adv images')\nparser.add_argument('--auto-continue', default=False, action = 'store_true',\n help = 'Continue from the latest checkpoint')\nargs = parser.parse_args()\n\n\nif __name__ == '__main__':\n pass\n" }, { "alpha_fraction": 0.771725058555603, "alphanum_fraction": 0.7924773097038269, "avg_line_length": 54, "blob_id": "6f14f8132747041b33332f21ab19c8639b8570d3", "content_id": "30e01339b73fd74b5200b6e23cf5d02b6217a868", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 771, "license_type": "no_license", "max_line_length": 130, "num_lines": 14, "path": "/README.md", "repo_name": "a1600012888/AdversarialTraining", "src_encoding": "UTF-8", "text": "# AdversarialTraining\n\nThis is an inplementaion of Adversarial Training on Python3, Pytorch. The code is designed to be easy to be extended.\n\nThis repository now includes:\n* Natural training code for CIFAR-10 (see experiments/CIFAR10/adversaril.train)\n* Adversaril Training code for CIFAR-10 (see experiments/CIFAR10/adversaril.train)\n* PGD attack with different norm constraint, e.g. L-1, L2, L-$\\inf$ (see lib/attack/pgd.py)\n* TRADES attack and TRADES training. (which won the 1st place in the robust model track NeurIPS 2018 Adversarial Vision Challenge)\n\nThis repository will includes:\n* Examples of training on your own dataset, using your own models, your own loss functions or training againt your own adversaries\n* More popular attack methods\n* More base models\n\n" }, { "alpha_fraction": 0.7941176295280457, "alphanum_fraction": 0.7941176295280457, "avg_line_length": 33, "blob_id": "f82f3ecdc70ac9c8fb32dbb072d755f5d2c9b34c", "content_id": "b6bac43fce5914993f9b7f789ec091c1b384d86a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34, "license_type": "no_license", "max_line_length": 33, "num_lines": 1, "path": "/lib/attack/__init__.py", "repo_name": "a1600012888/AdversarialTraining", "src_encoding": "UTF-8", "text": "from .attack_base import clip_eta\n" }, { "alpha_fraction": 0.7118320465087891, "alphanum_fraction": 0.7156488299369812, "avg_line_length": 27.324323654174805, "blob_id": "d702c2bf97b8f3ba8e8c0dd04ffba79e9060f813", "content_id": "0aaf4e00cb2edd164d014fbbe7e7e48250ac1b2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1048, "license_type": "no_license", "max_line_length": 83, "num_lines": 37, "path": "/experiments/CIFAR10/natural.train/eval.py", "repo_name": "a1600012888/AdversarialTraining", "src_encoding": "UTF-8", "text": "from config import config\nfrom dataset import create_test_dataset\nfrom network import create_network\n\nfrom training.train import eval_one_epoch\nfrom utils.misc import load_checkpoint\n\nimport argparse\nimport torch\nimport numpy as np\nimport os\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--resume', '--resume', default='log/models/last.checkpoint',\n type=str, metavar='PATH',\n help='path to latest checkpoint (default:log/last.checkpoint)')\nparser.add_argument('-d', type=int, default=0, help='Which gpu to use')\nargs = parser.parse_args()\n\n\nDEVICE = torch.device('cuda:{}'.format(args.d))\ntorch.backends.cudnn.benchmark = True\n\nnet = create_network()\nnet.to(DEVICE)\n\nds_val = create_test_dataset(512)\n\nAttackMethod = config.create_evaluation_attack_method(DEVICE)\n\nif os.path.isfile(args.resume):\n load_checkpoint(args.resume, net)\n\n\nprint('Evaluating')\nclean_acc, adv_acc = eval_one_epoch(net, ds_val, DEVICE, AttackMethod)\nprint('clean acc -- {} adv acc -- {}'.format(clean_acc, adv_acc))\n" }, { "alpha_fraction": 0.6117504835128784, "alphanum_fraction": 0.615081787109375, "avg_line_length": 26.065574645996094, "blob_id": "7fb4232b531bb6fabc4b2818df9a22cba5670e8c", "content_id": "c15ffca64d7981af4a6d186d1614bc160280f6fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3302, "license_type": "no_license", "max_line_length": 108, "num_lines": 122, "path": "/lib/training/config.py", "repo_name": "a1600012888/AdversarialTraining", "src_encoding": "UTF-8", "text": "from abc import ABCMeta, abstractproperty, abstractmethod\nfrom typing import Tuple, List, Dict\nimport os\nimport sys\nimport torch\n\n\nclass TrainingConfigBase(metaclass=ABCMeta):\n '''\n Base class for training\n '''\n\n # directory handling\n @property\n def abs_current_dir(self):\n return os.path.realpath('./')\n\n @property\n def log_dir(self):\n if not os.path.exists('./log'):\n os.mkdir('./log')\n return os.path.join(self.abs_current_dir, 'log')\n\n @property\n def model_dir(self):\n log_dir = self.log_dir\n model_dir = os.path.join(log_dir, 'models')\n #print(model_dir)\n if not os.path.exists(model_dir):\n os.mkdir(model_dir)\n return model_dir\n\n @abstractproperty\n def lib_dir(self):\n pass\n\n # training setting\n @abstractproperty\n def num_epochs(self):\n pass\n\n @property\n def val_interval(self):\n '''\n Specify how many epochs between two validation steps\n Return <= 0 means no validation phase\n '''\n return 0\n\n @abstractmethod\n def create_optimizer(self, params) -> torch.optim.Optimizer:\n '''\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n '''\n pass\n\n @abstractmethod\n def create_lr_scheduler(self, optimizer:torch.optim.Optimizer) -> torch.optim.lr_scheduler._LRScheduler:\n pass\n\n @abstractmethod\n def create_loss_function(self) -> torch.nn.modules.loss._Loss:\n pass\n\n\n def create_attack_method(self, *inputs):\n '''\n Perform adversarial training against xxx adversary\n Return None means natural training\n '''\n return None\n\n # Evaluation Setting\n\n def create_evaluation_attack_method(self, *inputs):\n '''\n evaluating the robustness of model against xxx adversary\n Return None means only measuring clean accuracy\n '''\n return None\n\n\n\n\nclass SGDOptimizerMaker(object):\n\n def __init__(self, lr = 0.1, momentum = 0.9, weight_decay = 1e-4):\n self.lr = lr\n self.momentum = momentum\n self.weight_decay = weight_decay\n\n def __call__(self, params):\n return torch.optim.SGD(params, lr=self.lr, momentum=self.momentum, weight_decay=self.weight_decay)\n\n\nclass PieceWiseConstantLrSchedulerMaker(object):\n\n def __init__(self, milestones:List[int], gamma:float = 0.1):\n self.milestones = milestones\n self.gamma = gamma\n\n def __call__(self, optimizer):\n return torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.milestones, gamma=self.gamma)\n\nclass IPGDAttackMethodMaker(object):\n\n def __init__(self, eps, sigma, nb_iters, norm, mean, std):\n self.eps = eps\n self.sigma = sigma\n self.nb_iters = nb_iters\n self.norm = norm\n self.mean = mean\n self.std = std\n\n def __call__(self, DEVICE):\n father_dir = os.path.join('/', *os.path.realpath(__file__).split(os.path.sep)[:-2])\n # print(father_dir)\n if not father_dir in sys.path:\n sys.path.append(father_dir)\n from attack.pgd import IPGD\n return IPGD(self.eps, self.sigma, self.nb_iters, self.norm, DEVICE, self.mean, self.std)\n" }, { "alpha_fraction": 0.5624309182167053, "alphanum_fraction": 0.580110490322113, "avg_line_length": 26.42424201965332, "blob_id": "715a0d4f2c0a2f321f9829750bd16c0f9bf4a07d", "content_id": "ac14b75c21cbd6d8ca17b1e5508d320d75b9d4cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1810, "license_type": "no_license", "max_line_length": 101, "num_lines": 66, "path": "/lib/attack/attack_base.py", "repo_name": "a1600012888/AdversarialTraining", "src_encoding": "UTF-8", "text": "import torch\nimport numpy as np\nfrom abc import ABCMeta, abstractmethod, abstractproperty\n\nclass AttackBase(metaclass=ABCMeta):\n @abstractmethod\n def attack(self, net, inp, label, target = None):\n '''\n\n :param inp: batched images\n :param target: specify the indexes of target class, None represents untargeted attack\n :return: batched adversaril images\n '''\n pass\n\n @abstractmethod\n def to(self, device):\n pass\n\n\n\ndef clip_eta(eta, norm, eps, DEVICE = torch.device('cuda:0')):\n '''\n helper functions to project eta into epsilon norm ball\n :param eta: Perturbation tensor (should be of size(N, C, H, W))\n :param norm: which norm. should be in [1, 2, np.inf]\n :param eps: epsilon, bound of the perturbation\n :return: Projected perturbation\n '''\n\n assert norm in [1, 2, np.inf], \"norm should be in [1, 2, np.inf]\"\n\n with torch.no_grad():\n avoid_zero_div = torch.tensor(1e-12).to(DEVICE)\n eps = torch.tensor(eps).to(DEVICE)\n one = torch.tensor(1.0).to(DEVICE)\n\n if norm == np.inf:\n eta = torch.clamp(eta, -eps, eps)\n else:\n normalize = torch.norm(eta.reshape(eta.size(0), -1), p = norm, dim = -1, keepdim = False)\n normalize = torch.max(normalize, avoid_zero_div)\n\n normalize.unsqueeze_(dim = -1)\n normalize.unsqueeze_(dim=-1)\n normalize.unsqueeze_(dim=-1)\n\n factor = torch.min(one, eps / normalize)\n eta = eta * factor\n return eta\n\ndef test_clip():\n\n a = torch.rand((10, 3, 28, 28)).cuda()\n\n epss = [0.1, 0.5, 1]\n\n norms = [1, 2, np.inf]\n for e, n in zip(epss, norms):\n print(e, n)\n c = clip_eta(a, n, e, True)\n\n print(c)\n\nif __name__ == '__main__':\n test_clip()\n" }, { "alpha_fraction": 0.5755417943000793, "alphanum_fraction": 0.5835913419723511, "avg_line_length": 31.299999237060547, "blob_id": "9df579ca483eed477223b5fb239e0d88693bb693", "content_id": "b999f7dba608a217a2a5c1e074443df3bd36478a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3230, "license_type": "no_license", "max_line_length": 105, "num_lines": 100, "path": "/lib/training/train.py", "repo_name": "a1600012888/AdversarialTraining", "src_encoding": "UTF-8", "text": "import os\nimport sys\nfather_dir = os.path.join('/', *os.path.realpath(__file__).split(os.path.sep)[:-2])\n#print(father_dir)\nif not father_dir in sys.path:\n sys.path.append(father_dir)\nfrom utils.misc import torch_accuracy, AvgMeter\nfrom collections import OrderedDict\nimport torch\nfrom tqdm import tqdm\n\ndef train_one_epoch(net, batch_generator, optimizer,\n criterion, DEVICE=torch.device('cuda:0'),\n descrip_str='Training', AttackMethod = None, adv_coef = 1.0):\n '''\n\n :param attack_freq: Frequencies of training with adversarial examples. -1 indicates natural training\n :param AttackMethod: the attack method, None represents natural training\n :return: None #(clean_acc, adv_acc)\n '''\n net.train()\n pbar = tqdm(batch_generator)\n advacc = -1\n advloss = -1\n cleanacc = -1\n cleanloss = -1\n pbar.set_description(descrip_str)\n for i, (data, label) in enumerate(pbar):\n data = data.to(DEVICE)\n label = label.to(DEVICE)\n\n optimizer.zero_grad()\n\n pbar_dic = OrderedDict()\n TotalLoss = 0\n\n if AttackMethod is not None:\n adv_inp = AttackMethod.attack(net, data, label)\n optimizer.zero_grad()\n pred = net(adv_inp)\n loss = criterion(pred, label)\n\n acc = torch_accuracy(pred, label, (1,))\n advacc = acc[0].item()\n advloss = loss.item()\n TotalLoss = TotalLoss + loss * adv_coef\n\n\n pred = net(data)\n\n loss = criterion(pred, label)\n TotalLoss = TotalLoss + loss\n TotalLoss.backward()\n #param = next(net.parameters())\n #grad_mean = torch.mean(param.grad)\n\n optimizer.step()\n acc = torch_accuracy(pred, label, (1,))\n cleanacc = acc[0].item()\n cleanloss = loss.item()\n #pbar_dic['grad'] = '{}'.format(grad_mean)\n pbar_dic['Acc'] = '{:.2f}'.format(cleanacc)\n pbar_dic['loss'] = '{:.2f}'.format(cleanloss)\n pbar_dic['AdvAcc'] = '{:.2f}'.format(advacc)\n pbar_dic['Advloss'] = '{:.2f}'.format(advloss)\n pbar.set_postfix(pbar_dic)\n\n\ndef eval_one_epoch(net, batch_generator, DEVICE=torch.device('cuda:0'), AttackMethod = None):\n net.eval()\n pbar = tqdm(batch_generator)\n clean_accuracy = AvgMeter()\n adv_accuracy = AvgMeter()\n\n pbar.set_description('Evaluating')\n for (data, label) in pbar:\n data = data.to(DEVICE)\n label = label.to(DEVICE)\n\n with torch.no_grad():\n pred = net(data)\n acc = torch_accuracy(pred, label, (1,))\n clean_accuracy.update(acc[0].item())\n\n if AttackMethod is not None:\n adv_inp = AttackMethod.attack(net, data, label)\n\n with torch.no_grad():\n pred = net(adv_inp)\n acc = torch_accuracy(pred, label, (1,))\n adv_accuracy.update(acc[0].item())\n\n pbar_dic = OrderedDict()\n pbar_dic['CleanAcc'] = '{:.2f}'.format(clean_accuracy.mean)\n pbar_dic['AdvAcc'] = '{:.2f}'.format(adv_accuracy.mean)\n\n pbar.set_postfix(pbar_dic)\n\n adv_acc = adv_accuracy.mean if AttackMethod is not None else 0\n return clean_accuracy.mean, adv_acc\n" }, { "alpha_fraction": 0.6206896305084229, "alphanum_fraction": 0.6502463221549988, "avg_line_length": 17.454545974731445, "blob_id": "5c601b1da5f15c708b62a850989d1b4faa118b81", "content_id": "9e2bae4c6d3b9706620c0a7363b7f4d2527a4a38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 203, "license_type": "no_license", "max_line_length": 52, "num_lines": 11, "path": "/experiments/CIFAR10/adversarial.train/network.py", "repo_name": "a1600012888/AdversarialTraining", "src_encoding": "UTF-8", "text": "import config\nfrom base_model.cifar_resnet18 import cifar_resnet18\n\ndef create_network():\n net = cifar_resnet18()\n\n return net\n\nif __name__ == '__main__':\n net = create_network()\n print(net)\n" }, { "alpha_fraction": 0.5908023118972778, "alphanum_fraction": 0.6159785389900208, "avg_line_length": 35.77777862548828, "blob_id": "752bd6124d939e7c4d1aafe4d367493df8cb5b94", "content_id": "65728f8596ed4926453fd256d24913946c39e419", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2979, "license_type": "no_license", "max_line_length": 129, "num_lines": 81, "path": "/experiments/CIFAR10/trades.train/config.py", "repo_name": "a1600012888/AdversarialTraining", "src_encoding": "UTF-8", "text": "from easydict import EasyDict\nimport sys\nimport os\nimport argparse\nimport numpy as np\nimport torch\n\ndef add_path(path):\n if path not in sys.path:\n print('Adding {}'.format(path))\n sys.path.append(path)\n\nabs_current_path = os.path.realpath('./')\nroot_path = os.path.join('/', *abs_current_path.split(os.path.sep)[:-3])\nlib_dir = os.path.join(root_path, 'lib')\nadd_path(lib_dir)\n\nfrom training.config import TrainingConfigBase, SGDOptimizerMaker, \\\n PieceWiseConstantLrSchedulerMaker, IPGDAttackMethodMaker\n\nclass IPGDTradesMaker(object):\n def __init__(self, eps, sigma, nb_iters, norm, mean, std):\n self.eps = eps\n self.sigma = sigma\n self.nb_iters = nb_iters\n self.norm = norm\n self.mean = mean\n self.std = std\n\n def __call__(self, DEVICE):\n\n from attack.pgd_trades import IPGDTrades\n return IPGDTrades(self.eps, self.sigma, self.nb_iters, self.norm, DEVICE, self.mean, self.std)\nclass TrainingConfing(TrainingConfigBase):\n\n lib_dir = lib_dir\n\n num_epochs = 110\n val_interval = 10\n alpha = 1.0\n\n create_optimizer = SGDOptimizerMaker(lr =1e-1, momentum = 0.9, weight_decay = 2e-4)\n create_lr_scheduler = PieceWiseConstantLrSchedulerMaker(milestones = [70, 90, 100], gamma = 0.1)\n\n create_loss_function = torch.nn.CrossEntropyLoss\n\n create_attack_method = \\\n IPGDTradesMaker(eps = 8/255.0, sigma = 2/255.0, nb_iters = 10, norm = np.inf,\n mean = torch.tensor(np.array([0, 0, 0]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis]),\n std = torch.tensor(np.array([1,1,1]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis]))\n\n create_evaluation_attack_method = \\\n IPGDAttackMethodMaker(eps = 8/255.0, sigma = 2/255.0, nb_iters = 20, norm = np.inf,\n mean=torch.tensor(\n np.array([0, 0, 0]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis]),\n std=torch.tensor(\n np.array([1, 1, 1]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis]))\n\nconfig = TrainingConfing()\n\n\n# About data\n# C.inp_chn = 1\n# C.num_class = 10\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--resume', default=None, type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-b', '--batch_size', default=128, type=int,\n metavar='N', help='mini-batch size')\nparser.add_argument('-d', type=int, default=2, help='Which gpu to use')\nparser.add_argument('-freq', '--attack-interval', default=2, type = int,\n help = 'Specify how many iterations between two batch of adv images')\nparser.add_argument('--auto-continue', default=False, action = 'store_true',\n help = 'Continue from the latest checkpoint')\nargs = parser.parse_args()\n\n\nif __name__ == '__main__':\n pass\n" }, { "alpha_fraction": 0.56062912940979, "alphanum_fraction": 0.5794013142585754, "avg_line_length": 34.83636474609375, "blob_id": "2a86b04fad26fb3b73a5b5e396f13a2eb18c15be", "content_id": "2ab4dc82318c68b090933cfde90d1bcbae4719b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3942, "license_type": "no_license", "max_line_length": 117, "num_lines": 110, "path": "/lib/attack/pgd.py", "repo_name": "a1600012888/AdversarialTraining", "src_encoding": "UTF-8", "text": "'''\nReference:\n[1] Towards Deep Learning Models Resistant to Adversarial Attacks\nAleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, Adrian Vladu\narXiv:1706.06083v3\n'''\nimport torch\nimport numpy as np\nimport os\nimport sys\nfather_dir = os.path.join('/', *os.path.realpath(__file__).split(os.path.sep)[:-2])\nif not father_dir in sys.path:\n sys.path.append(father_dir)\nfrom attack.attack_base import AttackBase, clip_eta\n\nclass IPGD(AttackBase):\n # ImageNet pre-trained mean and std\n # _mean = torch.tensor(np.array([0.485, 0.456, 0.406]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis])\n # _std = torch.tensor(np.array([0.229, 0.224, 0.225]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis])\n\n # _mean = torch.tensor(np.array([0]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis])\n # _std = torch.tensor(np.array([1.0]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis])\n def __init__(self, eps = 6 / 255.0, sigma = 3 / 255.0, nb_iter = 20,\n norm = np.inf, DEVICE = torch.device('cpu'),\n mean = torch.tensor(np.array([0]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis]),\n std = torch.tensor(np.array([1.0]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis])):\n '''\n :param eps: maximum distortion of adversarial examples\n :param sigma: single step size\n :param nb_iter: number of attack iterations\n :param norm: which norm to bound the perturbations\n '''\n self.eps = eps\n self.sigma = sigma\n self.nb_iter = nb_iter\n self.norm = norm\n self.criterion = torch.nn.CrossEntropyLoss().to(DEVICE)\n self.DEVICE = DEVICE\n self._mean = mean.to(DEVICE)\n self._std = std.to(DEVICE)\n\n def single_attack(self, net, inp, label, eta, target = None):\n '''\n Given the original image and the perturbation computed so far, computes\n a new perturbation.\n :param net:\n :param inp: original image\n :param label:\n :param eta: perturbation computed so far\n :return: a new perturbation\n '''\n\n adv_inp = inp + eta\n\n net.zero_grad()\n\n pred = net(adv_inp)\n if target is not None:\n targets = torch.sum(pred[:, target])\n grad_sign = torch.autograd.grad(targets, adv_in, only_inputs=True, retain_graph = False)[0].sign()\n\n else:\n loss = self.criterion(pred, label)\n grad_sign = torch.autograd.grad(loss, adv_inp,\n only_inputs=True, retain_graph = False)[0].sign()\n\n adv_inp = adv_inp + grad_sign * (self.sigma / self._std)\n tmp_adv_inp = adv_inp * self._std + self._mean\n\n tmp_inp = inp * self._std + self._mean\n tmp_adv_inp = torch.clamp(tmp_adv_inp, 0, 1) ## clip into 0-1\n #tmp_adv_inp = (tmp_adv_inp - self._mean) / self._std\n tmp_eta = tmp_adv_inp - tmp_inp\n tmp_eta = clip_eta(tmp_eta, norm=self.norm, eps=self.eps, DEVICE=self.DEVICE)\n\n eta = tmp_eta/ self._std\n\n return eta\n\n def attack(self, net, inp, label, target = None):\n\n\n eta = torch.zeros_like(inp)\n eta = eta.to(self.DEVICE)\n net.eval()\n\n inp.requires_grad = True\n eta.requires_grad = True\n for i in range(self.nb_iter):\n eta = self.single_attack(net, inp, label, eta, target)\n #print(i)\n\n #print(eta.max())\n adv_inp = inp + eta\n tmp_adv_inp = adv_inp * self._std + self._mean\n tmp_adv_inp = torch.clamp(tmp_adv_inp, 0, 1)\n adv_inp = (tmp_adv_inp - self._mean) / self._std\n\n return adv_inp\n\n def to(self, device):\n self.DEVICE = device\n self._mean = self._mean.to(device)\n self._std = self._std.to(device)\n self.criterion = self.criterion.to(device)\n\ndef test_IPGD():\n pass\nif __name__ == '__main__':\n test_IPGD()\n" }, { "alpha_fraction": 0.5858424305915833, "alphanum_fraction": 0.5957719683647156, "avg_line_length": 29.910890579223633, "blob_id": "0d2fbf18d5343d9753055ec9d5f3e9424a1c5237", "content_id": "92abfb49130c52f8d529d99b8feb3173511fd63d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3122, "license_type": "no_license", "max_line_length": 94, "num_lines": 101, "path": "/experiments/CIFAR10/trades.train/training_function.py", "repo_name": "a1600012888/AdversarialTraining", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport config\nfrom utils.misc import torch_accuracy, AvgMeter\nfrom collections import OrderedDict\nimport torch\nfrom tqdm import tqdm\nimport torch.nn.functional as F\n\n\ndef train_one_epoch(net, batch_generator, optimizer,\n criterion, DEVICE=torch.device('cuda:0'),\n descrip_str='Training', AttackMethod = None, alpha = 1):\n '''\n\n :param AttackMethod: the attack method, None represents natural training\n :param alpha: weight coeffcient for mig loss\n :return: None #(clean_acc, adv_acc)\n '''\n\n #assert callable(AttackMethod)\n net.train()\n pbar = tqdm(batch_generator)\n advacc = -1\n advloss = -1\n cleanacc = -1\n cleanloss = -1\n criterion_kl = torch.nn.KLDivLoss(size_average=False).to(DEVICE)\n pbar.set_description(descrip_str)\n\n for i, (data, label) in enumerate(pbar):\n data = data.to(DEVICE)\n label = label.to(DEVICE)\n\n optimizer.zero_grad()\n\n pbar_dic = OrderedDict()\n\n adv_inp = AttackMethod.attack(net, data, label)\n\n optimizer.zero_grad()\n pred1 = net(adv_inp)\n pred2 = net(data)\n loss_robust = criterion_kl(F.log_softmax(pred1, dim=1), F.softmax(pred2, dim = 1))\n loss_natural = criterion(pred2, label)\n TotalLoss = loss_natural + alpha * loss_robust\n\n TotalLoss.backward()\n\n acc = torch_accuracy(pred1, label, (1,))\n advacc = acc[0].item()\n advloss = loss_robust.item()\n\n acc = torch_accuracy(pred2, label, (1,))\n cleanacc = acc[0].item()\n cleanloss = loss_natural.item()\n\n param = next(net.parameters())\n grad_mean = torch.mean(param.grad)\n optimizer.step()\n\n pbar_dic['grad'] = '{}'.format(grad_mean)\n pbar_dic['cleanAcc'] = '{:.2f}'.format(cleanacc)\n pbar_dic['cleanloss'] = '{:.2f}'.format(cleanloss)\n pbar_dic['AdvAcc'] = '{:.2f}'.format(advacc)\n pbar_dic['Robloss'] = '{:.2f}'.format(advloss)\n pbar.set_postfix(pbar_dic)\n\n\ndef eval_one_epoch(net, batch_generator, DEVICE=torch.device('cuda:0'), AttackMethod = None):\n net.eval()\n pbar = tqdm(batch_generator)\n clean_accuracy = AvgMeter()\n adv_accuracy = AvgMeter()\n\n pbar.set_description('Evaluating')\n for (data, label) in pbar:\n data = data.to(DEVICE)\n label = label.to(DEVICE)\n\n with torch.no_grad():\n pred = net(data)\n acc = torch_accuracy(pred, label, (1,))\n clean_accuracy.update(acc[0].item())\n\n if AttackMethod is not None:\n adv_inp = AttackMethod.attack(net, data, label)\n\n with torch.no_grad():\n pred = net(adv_inp)\n acc = torch_accuracy(pred, label, (1,))\n adv_accuracy.update(acc[0].item())\n\n pbar_dic = OrderedDict()\n pbar_dic['CleanAcc'] = '{:.2f}'.format(clean_accuracy.mean)\n pbar_dic['AdvAcc'] = '{:.2f}'.format(adv_accuracy.mean)\n\n pbar.set_postfix(pbar_dic)\n\n adv_acc = adv_accuracy.mean if AttackMethod is not None else 0\n return clean_accuracy.mean, adv_acc\n" } ]
12
Youngtrust/algorithm-learning-note
https://github.com/Youngtrust/algorithm-learning-note
1003a552f20978464ae3795583c37927990b6ecd
e3e2063b40a97892613b21074c14ee9609915dc4
3767ccf1d158657cfeaefed6fccc409557102665
refs/heads/master
2021-12-25T19:34:37.570822
2021-10-25T07:25:44
2021-10-25T07:25:44
149,147,799
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5871652364730835, "alphanum_fraction": 0.6154623627662659, "avg_line_length": 43.47190856933594, "blob_id": "5de456374cba69427fb4a3b641c37e94f610bff4", "content_id": "32b099155ea7e94ea328fe1109c66e9fa95e7026", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3958, "license_type": "no_license", "max_line_length": 136, "num_lines": 89, "path": "/High Freq/linkedList or array/1847.py", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "\"\"\"\nThere is a hotel with n rooms. \nThe rooms are represented by a 2D integer array rooms where \nrooms[i] = [roomIdi, sizei] denotes that there is a room with room number roomIdi and size equal to sizei. \nEach roomIdi is guaranteed to be unique.\n\nYou are also given k queries in a 2D array queries where queries[j] = [preferredj, minSizej]. \nThe answer to the jth query is the room number id of a room such that:\n\nThe room has a size of at least minSizej, and\nabs(id - preferredj) is minimized, where abs(x) is the absolute value of x.\n\n\n\nInput: rooms = [[2,2],[1,2],[3,2]], queries = [[3,1],[3,3],[5,2]]\nOutput: [3,-1,3]\nExplanation: The answers to the queries are as follows:\nQuery = [3,1]: Room number 3 is the closest as abs(3 - 3) = 0, and its size of 2 is at least 1. The answer is 3.\nQuery = [3,3]: There are no rooms with a size of at least 3, so the answer is -1.\nQuery = [5,2]: Room number 3 is the closest as abs(3 - 5) = 2, and its size of 2 is at least 2. The answer is 3.\n\n\nThe idea of this problem is to sort our rooms and queries by time and \nstart with biggest times and keep sorted list aval for avaliable rooms: \n\nsuch that we can choose from. Imagine, \nthat we have rooms with sizes [7, 6, 3, 2, 1] and queries with sizes [5, 3, 1]. \nThen on the first step we see query with size 5 and avaliable rooms we have are [7, 6]. \nThen we look at query 3 and now we have avaliable rooms [7, 6, 3]. \nFinally, we look at query 1 and now avaliable rooms are [7, 6, 3, 2, 1].\n\nHowever we need to return closest index among all avaliable rooms, \nso we need to keep our avaliable rooms sorted with id. \nIf fact, we put tuple (id, size) into our sorted list.\n\nAlso we use two pointers approach: \np1 is responsible for rooms R and \np2 is responsible for queries Q. \nIf R[p1][0] >= Q[p2][0], it means that we can add one more room to aval, so we do it and update p1. \nIf we can not do it, it means we already saturated aval, so we look at it. \nIf it is empty, we must return -1, we do not have any candidates. \nIf it is not empty, we use binary search: \n there can be at most 3 (in fact 2) candidates for the nearest index: we chose the best of them.\n\nComplexity\nLet n be number of rooms and q is number of queries. \nThen we have time complexity O(n log n + q log n) to sort our data and \nthen we have n+q iterations in our 2 pointers approach, where each time we use either add or bisect operation with complexity O(log n). \nSo, total time complexity is O(n log n + q log q + q log n). Space complexity is O(n + q).\n\"\"\"\nfrom sortedcontainers import SortedList\n\n\nclass Solution:\n def closestRoom(self, rooms, queries):\n R = sorted([(j, i) for i, j in rooms])[::-1]\n Q = sorted((k, j, i) for i, (j, k) in enumerate(queries))[::-1]\n\n # Example -\n # >>> a = '1234'\n # >>> a[::-1]\n # '4321'\n n, q = len(R), len(Q)\n # p1 is responsible for rooms R and p2 is responsible for queries Q.\n p1, p2, aval, ans = 0, 0, SortedList(), [-1]*q\n\n while p1 <= n and p2 < q:\n\n if p1 < n and R[p1][0] >= Q[p2][0]:\n # it means that we can add one more room to aval, so we do it and update p1.\n aval.add(R[p1][1])\n p1 += 1\n else:\n # If we can not do it, it means we already saturated aval, so we look at it.\n if len(aval) != 0:\n preferred, ind = Q[p2][1], Q[p2][2]\n i = aval.bisect(preferred)\n # use binary search:\n cands = []\n if i > 0:\n cands.append(aval[i-1])\n if i < len(aval):\n cands.append(aval[i])\n # there can be at most 3 (in fact 2) candidates for the nearest index: we chose the best of them.\n ans[ind] = min(cands, key=lambda x: abs(x - preferred))\n\n p2 += 1\n\n return ans\n" }, { "alpha_fraction": 0.34036144614219666, "alphanum_fraction": 0.375, "avg_line_length": 22.75, "blob_id": "e58280263e5bbb484ce5ece75bd705d7dd1e4a03", "content_id": "c3968f318c083dc0b893be9a87f52d8a10f41580", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 664, "license_type": "no_license", "max_line_length": 98, "num_lines": 28, "path": "/High Freq/sort/lc75.java", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "// We will use the integers 0, 1, and 2 to represent the color red, white, and blue, respectively.\n\n// Input: nums = [2,0,2,1,1,0]\n// Output: [0,0,1,1,2,2]\n\nclass Solution {\n public void sortColors(int[] nums) {\n int start = 0;\n int end = nums.length - 1;\n int i = 0;\n\n while (i <= end) {\n if (nums[i] == 0) {\n nums[i] = 1;\n nums[start] = 0;\n start++;\n i++;\n } else if (nums[i] == 2) {\n nums[i] = nums[end];\n nums[end] = 2;\n end--;\n } else {\n i++;\n }\n }\n\n }\n}" }, { "alpha_fraction": 0.5700934529304504, "alphanum_fraction": 0.5773624181747437, "avg_line_length": 25.027027130126953, "blob_id": "f2634350ac607188d31af8efd8b6a41dfccb3bb3", "content_id": "fe2c417760cbdb72ac5afab9a8fae9bae03299bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1001, "license_type": "no_license", "max_line_length": 81, "num_lines": 37, "path": "/LI/li-loopinsert.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "<body>\n <div id=\"content\"></div>\n</body>;\n\nvar members = [\n { name: \"Bill Denbrough\", id: 1 },\n { name: \"Ben Hanscom\", id: 2 },\n { name: \"Mike Hanlon\", id: 3 },\n { name: \"Richie Tozier\", id: 4 },\n { name: \"Beverly Marsh\", id: 5 },\n { name: \"Eddie Kaspbrak\", id: 6 },\n { name: \"Stan Uris\", id: 7 },\n];\n\n// Question: insert links for each of the members into the content div\n// The link would be like the example below\n// <a href=\"profile.jsp?id=<memeber.id>\"><membe‍‌‍‍‍‌‍‍‍‍‌‍‌‍‍‍‌‌‍r.name></a>\n// own answer\n\ndocument.getElementById(\"content\").innerHTML = members\n .map(\n (eachMember) =>\n `<a href=\"profile.jsp?id=${eachMember.id}\">${eachMember.name}</a>`\n )\n .join(\"\");\n\n//follow up:\n//use <ul> <li> to implement the same functionality\nvar customUL = document.createElement(\"ul\");\ncustomUL = members\n .map(\n (eachMember) =>\n `<li><a href=\"profile.jsp?id=${eachMember.id}\">${eachMember.name}</a></li>`\n )\n .join(\"\");\n\ndocument.getElementById(\"content\").innerHTML = customUL;\n" }, { "alpha_fraction": 0.47697368264198303, "alphanum_fraction": 0.5953947305679321, "avg_line_length": 37, "blob_id": "b9cfb2509d8ba6f8e01906d2d13db47bc1a2db01", "content_id": "93c92074421434b3a8ca5b2fd8272f7c22f65257", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 304, "license_type": "no_license", "max_line_length": 73, "num_lines": 8, "path": "/High Freq/sort/lc4.java", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "// Input: nums1 = [1,3], nums2 = [2]\n// Output: 2.00000\n// Explanation: merged array = [1,2,3] and median is 2.\n\n// Input: nums1 = [1,2], nums2 = [3,4]\n// Output: 2.50000\n// Explanation: merged array = [1,2,3,4] and median is (2 + 3) / 2 = 2.5.\n// The overall run time complexity should be O(log (m+n)).\n" }, { "alpha_fraction": 0.6837944388389587, "alphanum_fraction": 0.6837944388389587, "avg_line_length": 27.11111068725586, "blob_id": "9e50c62dd8b51a2d1125af9db59dddab44721216", "content_id": "f6564396a1fee8a929f46b9ad59dfb08887e27f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 253, "license_type": "no_license", "max_line_length": 65, "num_lines": 9, "path": "/LI/remove-btn-li.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "// pure js\nconst bookList = document.querySelector(\"#custom-class-name ul\");\n\nbookList.addEventListener(\"click\", (e) => {\n if (e.target.className == \"delete\") {\n const removedLi = e.target.parentElement;\n bookList.removeChild(removedLi);\n }\n});\n" }, { "alpha_fraction": 0.8235908150672913, "alphanum_fraction": 0.8246346712112427, "avg_line_length": 46.30864334106445, "blob_id": "bd3ad445316a9ebed416ad632a8a6407aad68132", "content_id": "a31c7af0a5ca1a8c8fbab1b74332ad971a87fc6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3852, "license_type": "no_license", "max_line_length": 399, "num_lines": 81, "path": "/Sys Design/basic.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "Things to consider\nFeatures\nAPI\nAvailability\nLatency\nScalability\nDurability\nClass Diagram\nSecurity and Privacy\nCost-effective\n\nConcepts to know\nVertical vs horizontal scaling\nCAP theorem\nACID vs BASE\nPartitioning/Sharding\nConsistent Hashing\nOptimistic vs pessimistic locking\nStrong vs eventual consistency\nRelationalDB vs NoSQL\nTypes of NoSQL\nKey value\nWide column\nDocument-based\nGraph-based\nCaching\nData center/racks/hosts\nCPU/memory/Hard drives/Network bandwidth\nRandom vs sequential read/writes to disk\nHTTP vs http2 vs WebSocket\nTCP/IP model\nipv4 vs ipv6\nTCP vs UDP\nDNS lookup\nHttp & TLS\nPublic key infrastructure and certificate authority(CA)\nSymmetric vs asymmetric encryption\nLoad Balancer\nCDNs & Edges\nBloom filters and Count-Min sketch\nPaxos\nLeader election\nDesign patterns and Object-oriented design\nVirtual machines and containers\nPub-sub architecture\nMapReduce\nMultithreading, locks, synchronization, CAS(compare and set)\n\nTools\nCassandra\nMongoDB/Couchbase\nMysql\nMemcached\nRedis\nZookeeper\nKafka\nNGINX\nHAProxy\nSolr, Elastic search\nAmazon S3\nDocker, Kubernetes, Mesos\nHadoop/Spark and HDFS\n\nload balancer\nHow does the load balancer choose the backend server?\nLoad balancers consider two factors before forwarding a request to a backend server. They will first ensure that the server they choose is actually responding appropriately to requests and then use a pre-configured algorithm to select one from the set of healthy servers. We will discuss these algorithms shortly.\n\nHealth Checks - Load balancers should only forward traffic to “healthy” backend servers. To monitor the health of a backend server, “health checks” regularly attempt to connect to backend servers to ensure that servers are listening. If a server fails a health check, it is automatically removed from the pool, and traffic will not be forwarded to it until it responds to the health checks again.\n\nThere is a variety of load balancing methods, which use different algorithms for different needs.\n\nLeast Connection Method — This method directs traffic to the server with the fewest active connections. This approach is quite useful when there are a large number of persistent client connections which are unevenly distributed between the servers.\nLeast Response Time Method — This algorithm directs traffic to the server with the fewest active connections and the lowest average response time.\nLeast Bandwidth Method - This method selects the server that is currently serving the least amount of traffic measured in megabits per second (Mbps).\nRound Robin Method — This method cycles through a list of servers and sends each new request to the next server. When it reaches the end of the list, it starts over at the beginning. It is most useful when the servers are of equal specification and there are not many persistent connections.\nWeighted Round Robin Method — The weighted round-robin scheduling is designed to better handle servers with different processing capacities. Each server is assigned a weight (an integer value that indicates the processing capacity). Servers with higher weights receive new connections before those with less weights and servers with higher weights get more connections than those with less weights.\nIP Hash — Under this method, a hash of the IP address of the client is calculated to redirect the request to a server.\n\nPlacing a cache directly on a request layer node enables the local storage of response data.\n\nWhat happens when you expand this to many nodes? If the request layer is expanded to multiple nodes, it’s still quite possible to have each node host its own cache. However, if your load balancer randomly distributes requests across the nodes, the same request will go to different nodes, thus increasing cache misses. Two choices for overcoming this hurdle are global caches and distributed caches.\n" }, { "alpha_fraction": 0.7677902579307556, "alphanum_fraction": 0.8239700198173523, "avg_line_length": 43.5, "blob_id": "1ae9d836bc2d53581d061b2d4bfe412321c29ff5", "content_id": "4ccd036683512eeff1e5afe1cb4c6054af4c4e9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 389, "license_type": "no_license", "max_line_length": 85, "num_lines": 6, "path": "/High Freq/prefixSum/prefixSum.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "前缀和本质上是在一个 list 当中,用 O(N)的时间提前算好从第 0 个数字到第 i 个数字之和,在后续使用中可以在 O(1)时间内计算出第 i 到第 j 个数字之和\n\nLeetcode 53 Maximum Subarray\nLeetcode 1423 Maximum Points You Can Obtain from Cards\nLeetcode 1031 Maximum Sum of Two Non-Overlapping Subarrays\nLeetcode 523 Continuous Subarray Sum\n" }, { "alpha_fraction": 0.46000000834465027, "alphanum_fraction": 0.48428571224212646, "avg_line_length": 24, "blob_id": "2e12ed3300bcd0ae16b829736ab6652fa98e59b0", "content_id": "247ef545eea3091aced62f3e1d6db8e5c6b05b6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 840, "license_type": "no_license", "max_line_length": 54, "num_lines": 28, "path": "/High Freq/linkedList or array/shanb.py", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "def furthestValidPair(nums):\n \"\"\"\" \n max(k), k = |i-j| == |A-A[j]|\n\n 应该这样做,把array转化成 A【i】-i和A【i】+i,\n 相当于寻找A【i】-i==A【j】-j或者A【i】+i==A【j】+j的最大值,\n search array用hash table记录重复的最大值就好了。\n\n 我们假设i<j,那么我们有如下两种情况:\n j-i = a[j]-a[i]\n j-i = a[i]-a[j]\n 移项得: a[i]-i = a[j] - j or a[i]+i = a[j]+j\n \"\"\"\n res = 0\n table1 = defaultdict(list)\n table2 = defaultdict(list)\n\n for i, v in enumerate(nums):\n a = v-i\n table1[a].append(i)\n if len(table1[a]) > 1:\n res = max(res, table1[a][-1]-table1[a][0])\n b = v+i\n table2[b].append(i)\n if len(table2[b]) > 1:\n res = max(res, table2[-1]-table2[0])\n\n return res\n" }, { "alpha_fraction": 0.8010988831520081, "alphanum_fraction": 0.8021978139877319, "avg_line_length": 169.625, "blob_id": "18d93642dd6de15d5b7e7ab2846a1c18dcb20f25", "content_id": "30290f2c480bd322048cd368792968f913efa476", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2732, "license_type": "no_license", "max_line_length": 667, "num_lines": 16, "path": "/Sys Design/cap.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "Background#\nIn distributed systems, different types of failures can occur, e.g., servers can crash or fail permanently, disks can go bad resulting in data losses, or network connection can be lost, making a part of the system inaccessible. How can a distributed system model itself to get the maximum benefits out of different resources available?\n\nSolution#\nCAP theorem states that it is impossible for a distributed system to simultaneously provide all three of the following desirable properties:\n\nConsistency ( C ): All nodes see the same data at the same time. This means users can read or write from/to any node in the system and will receive the same data. It is equivalent to having a single up-to-date copy of the data.\n\nAvailability ( A ): Availability means every request received by a non-failing node in the system must result in a response. Even when severe network failures occur, every request must terminate. In simple terms, availability refers to a system’s ability to remain accessible even if one or more nodes in the system go down.\n\nPartition tolerance ( P ): A partition is a communication break (or a network failure) between any two nodes in the system, i.e., both nodes are up but cannot communicate with each other. A partition-tolerant system continues to operate even if there are partitions in the system. Such a system can sustain any network failure that does not result in the failure of the entire network. Data is sufficiently replicated across combinations of nodes and networks to keep the system up through intermittent outages.\n\nAccording to the CAP theorem, any distributed system needs to pick two out of the three properties. The three options are CA, CP, and AP. However, CA is not really a coherent option, as a system that is not partition-tolerant will be forced to give up either Consistency or Availability in the case of a network partition. Therefore, the theorem can really be stated as: In the presence of a network partition, a distributed system must choose either Consistency or Availability.\n\nCAP theorem\nWe cannot build a general data store that is continually available, sequentially consistent, and tolerant to any partition failures. We can only build a system that has any two of these three properties. Because, to be consistent, all nodes should see the same set of updates in the same order. But if the network loses a partition, updates in one partition might not make it to the other partitions before a client reads from the out-of-date partition after having read from the up-to-date one. The only thing that can be done to cope with this possibility is to stop serving requests from the out-of-date partition, but then the service is no longer 100% available.\n" }, { "alpha_fraction": 0.48207172751426697, "alphanum_fraction": 0.5358565449714661, "avg_line_length": 27.685714721679688, "blob_id": "23d3b8eb204796eb4e4d1c38e0292725f944a0c7", "content_id": "daa2811eba4b448522a7fd8556f09c90320947d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 109, "num_lines": 35, "path": "/High Freq/linkedList or array/2007.py", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "\"\"\"\nInput: changed = [1,3,4,2,6,8]\nOutput: [1,3,4]\nExplanation: One possible original array could be [1,3,4]:\n- Twice the value of 1 is 1 * 2 = 2.\n- Twice the value of 3 is 3 * 2 = 6.\n- Twice the value of 4 is 4 * 2 = 8.\nOther original arrays could be [4,3,1] or [3,1,4].\n\nInput: changed = [6,3,0,1]\nOutput: []\nExplanation: changed is not a doubled array.\n\n\"\"\"\n\n\nclass Solution:\n def findOriginalArray(self, changed: List[int]) -> List[int]:\n if len(changed) % 2 == 1:\n return []\n\n original = []\n numbers = collections.Counter(changed)\n\n for n in sorted(changed):\n v = n*2\n if numbers.get(n, 0) > 0 and numbers.get(v, 0) > 0:\n original.append(n)\n numbers[n] -= 1\n numbers[v] -= 1\n elif n // 2 not in numbers or n % 2 == 1:\n # In Python 3.x, 5 / 2 will return 2.5 and 5 // 2 will return 2. The former is floating point\n return []\n\n return original\n" }, { "alpha_fraction": 0.5657668113708496, "alphanum_fraction": 0.604250431060791, "avg_line_length": 21.320512771606445, "blob_id": "fda2078ec2bd169d67e6ca1526dc47dd084c4ba5", "content_id": "8bc46d63abb0c28eb8e695747da5261e907a80e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1741, "license_type": "no_license", "max_line_length": 90, "num_lines": 78, "path": "/GG/h-tree.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "// config\n// https://mccarthydanielle.medium.com/constructing-an-h-tree-with-javascript-29d0f46932d6\nlet depth = 2;\n\nconst canvas = document.getElementById(\"c\");\nconst context = c.getContext(\"2d\");\ncontext.strokeStyle = \"#00386B\";\n\nconst center = {\n x: canvas.width / 2.0,\n y: canvas.height / 2.0,\n};\n\n// constants\nconst sqrt2 = Math.sqrt(2);\nconst len = canvas.width / sqrt2 / sqrt2;\n\n// recursive func\nconst hTree = (point, len, depth) => {\n if (depth === 0) {\n return;\n }\n\n // draw horizontal line\n const h1 = { x: point.x - len / 2.0, y: point.y };\n const h2 = { x: point.x + len / 2.0, y: point.y };\n drawLine(h1, h2);\n\n // draw vertical lines\n len = len / sqrt2;\n\n const v1 = { x: h1.x, y: h1.y - len / 2.0 };\n const v2 = { x: h1.x, y: h1.y + len / 2.0 };\n drawLine(v1, v2);\n\n const v3 = { x: h2.x, y: h2.y - len / 2.0 };\n const v4 = { x: h2.x, y: h2.y + len / 2.0 };\n drawLine(v3, v4);\n\n // compute new length, depth\n depth--;\n len = len / sqrt2;\n\n // recurse recurse recurse recurse\n hTree(v1, len, depth);\n hTree(v2, len, depth);\n hTree(v3, len, depth);\n hTree(v4, len, depth);\n};\n\n// line helper\nconst drawLine = (from, to) => {\n context.beginPath();\n context.moveTo(from.x, from.y);\n context.lineTo(to.x, to.y);\n context.stroke();\n};\n\n// paint helper\nconst paint = () => {\n context.clearRect(0, 0, canvas.width, canvas.height);\n hTree(center, len, depth);\n};\n\n// increase depth, to a reasonable point\nconst plus = document.getElementById(\"plus\");\nplus.addEventListener(\"click\", () => {\n depth <= 7 && depth++ && paint();\n});\n\n// decrease depth\nconst minus = document.getElementById(\"minus\");\nminus.addEventListener(\"click\", () => {\n depth !== 1 && depth-- && paint();\n});\n\n// fire away\npaint();\n" }, { "alpha_fraction": 0.5766128897666931, "alphanum_fraction": 0.5766128897666931, "avg_line_length": 18.076923370361328, "blob_id": "fee307424aa3d8ac352a878dadcebebb023d69e8", "content_id": "896eb644f748375d9e375325b986582480be4da8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 248, "license_type": "no_license", "max_line_length": 47, "num_lines": 13, "path": "/LI/li-nested-array.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "const flatten = (targetArray) => {\n let result = [];\n\n targetArray.forEach((eachSub) => {\n if (Array.isArray(eachSub)) {\n result = result.concat(flatten(eachSub));\n } else {\n result.push(eachSub);\n }\n });\n\n return result;\n};\n" }, { "alpha_fraction": 0.7562893033027649, "alphanum_fraction": 0.8207547068595886, "avg_line_length": 29.285715103149414, "blob_id": "d52dac08cf0070915616c8905b02111c06cc1373", "content_id": "92ee3ec5a7b0c6b105c27c70628c31d9d786a10a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 684, "license_type": "no_license", "max_line_length": 68, "num_lines": 21, "path": "/High Freq/twoPointer/twoPointer.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "回文串\n\nLeetcode 409. Longest Palindrome\nLeetcode 125. Valid Palindrome\nLeetcode 5. Longest Palindromic Substring\n\n这里使用的是先排序的双指针算法,不同于 hashmap 做法\nLeetcode 1. Two Sum\nLeetcode 167. Two Sum II - Input array is sorted\nLeetcode 15. 3Sum\nLeetcode 16. 3Sum Closest\nLeetcode 18. 4Sum\nLeetcode 454. 4Sum II\nLeetcode 277. Find the Celebrity\n\nLeetcode 283. Move Zeroes\nLeetcode 26. Remove Duplicate Numbers in Array\nLeetcode 395. Longest Substring with At Least K Repeating Characters\nLeetcode 340. Longest Substring with At Most K Distinct Characters\nLeetcode 76. Minimum Window Substring\nLeetcode 3. Longest Substring Without Repeating Characters\n" }, { "alpha_fraction": 0.7413173913955688, "alphanum_fraction": 0.8143712282180786, "avg_line_length": 28.821428298950195, "blob_id": "3f24a473af2b0489fc99d8dbf7136e1154e9ca64", "content_id": "141444f4259a14bc530244ffa940b91bccf3643c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1045, "license_type": "no_license", "max_line_length": 69, "num_lines": 28, "path": "/High Freq/bfs/bfs.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "1. 简单图(有向无向皆可)的最短路径长度\n2. 拓扑排序\n3. 遍历一个图(或者树)\n\n多数情况下时间复杂度空间复杂度都是 O(N+M),N 为节点个数,M 为边的个数\n\nLeetcode 102 Binary Tree Level Order Traversal\nLeetcode 103 Binary Tree Zigzag Level Order Traversal\nLeetcode 297 Serialize and Deserialize Binary Tree (很好的 BFS 和双指针结合的题)\nLeetcode 314 Binary Tree Vertical Order Traversal\n\n基于图的 BFS:(一般需要一个 set 来记录访问过的节点)\nLeetcode 200. Number of Islands\nLeetcode 133. Clone Graph\nLeetcode 127. Word Ladder\nLeetcode 490. The Maze\nLeetcode 323. Connected Component in Undirected Graph\nLeetcode 130. Surrounded Regions\nLeetcode 752. Open the Lock\nLeetcode 815. Bus Routes\nLeetcode 1091. Shortest Path in Binary Matrix\nLeetcode 542. 01 Matrix\nLeetcode 1293. Shortest Path in a Grid with Obstacles Elimination\n\n拓扑排序:\nLeetcode 207 Course Schedule (I, II)\nLeetcode 444 Sequence Reconstruction\nLeetcode 269 Alien Dictionary\n" }, { "alpha_fraction": 0.5593841671943665, "alphanum_fraction": 0.5689149498939514, "avg_line_length": 29.311111450195312, "blob_id": "87589b3d15c717164b286bacd457c0d2b6fa9e36", "content_id": "fd121f86aaa404a7ff73b2a1c873a46b02849fb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1364, "license_type": "no_license", "max_line_length": 72, "num_lines": 45, "path": "/dp/337.py", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "\"\"\"\nTraverse tree in DFS way while tracking the sum. \nApply DP while calculating the sum, which at any node is the maximum of:\n\nits own value + it's grandchildren's level summation of max values\nsummation of it's children's max values\nUsing these, bubble up to get the answer.\n\"\"\"\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def rob(self, root: Optional[TreeNode]) -> int:\n\n def recur(root):\n if not root:\n return 0, 0\n l, prev_l = recur(root.left)\n r, prev_r = recur(root.right)\n\n # if rob parent, cannot rob children\n plan1 = root.val + prev_l + prev_r\n # if don't rob parent, we can either rob or\n # don't rob the children, whichever way is\n # more profitable\n # it present node and grandchildren are considered\n plan2 = l + r\n return max(plan1, plan2), plan2\n\n\nclass Solution2:\n def rob(self, root: Optional[TreeNode]) -> int:\n\n def dfs(node=root):\n if not node:\n return (0, 0)\n L, R = dfs(node.left), dfs(node.right)\n return (node.val+L[1]+R[1], max(L)+max(R))\n\n return max(dfs())\n" }, { "alpha_fraction": 0.43002545833587646, "alphanum_fraction": 0.46310433745384216, "avg_line_length": 23.5625, "blob_id": "6f97c4a770ba919387fdb9baca6fe3719f942bf6", "content_id": "6dc5cae76dc83a9d83c0afb51560ee3af7efce20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 393, "license_type": "no_license", "max_line_length": 74, "num_lines": 16, "path": "/LI/flat-nested-array.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "function flatten(ary) {\n var ret = [];\n for (var i = 0; i < ary.length; i++) {\n if (Array.isArray(ary[i])) {\n ret = ret.concat(flatten(ary[i]));\n } else {\n ret.push(ary[i]);\n }\n }\n return ret;\n}\n\nflatten([[[[[0]], [1]], [[[2], [3]]], [[4], [5]]]]); // [0, 1, 2, 3, 4, 5]\n\nconst flatten = (ary) =>\n ary.reduce((a, b) => a.concat(Array.isArray(b) ? flatten(b) : b), []);\n" }, { "alpha_fraction": 0.64716637134552, "alphanum_fraction": 0.6512796878814697, "avg_line_length": 41.90196228027344, "blob_id": "43ea321cf935f00a90e7260af1606d75e0a50dc7", "content_id": "30406cbf8b15d9cdebdcf4c758a7ca385aabeb3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2188, "license_type": "no_license", "max_line_length": 130, "num_lines": 51, "path": "/dp/1048.py", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "\"\"\"\nhttps://leetcode.com/problems/longest-string-chain/\n\n\nInitially, each word's longest chain is set to 1. \nThen, we loop the list of words to find out whether it has a predecessor in the list. \nIf there is a predecessor, we know current word's longest chain could be predecessor's longest chain plus one.\n\nThere are two main points for this solution:\n\nSort the word list words by each length of the word.\nAs mentioned above, current word's longest chain is formed by predecessor's longest chain plus one. \nTherefore, we must calculate the predecessor's longest chain first, otherwise the answer would be incorrect.\n\nComparing the current word's chain with all its predecessor's longest chain plus one to find out the current word's longest chain.\nThis is because the current word's chain could possibly be formed in many different ways, \nso we need to compare them to find out the longest one.\n\n d[word] is the longest chain ending at word.\n We sort the words by length, iterate through them, and generate all predecessors by removing letters.\n If a predecessor p is in d, d[word] = max(1 + d[p], d[word])\n We can track the max value along the way as well.\n \n Analysis\n Time:\n - Building the DP dictionary: O(n * k) where k is the biggest word length in words. \n For each word we do len(word) operations to calculate predecessors.\n Lookups and updates to the dict are O(1), so our total time is O(n * k).\n Space:\n - Building the DP dictionary: O(n), since we have 1 entry for each word. O(n) overall.\n\nInput: words = [\"a\",\"b\",\"ba\",\"bca\",\"bda\",\"bdca\"]\nOutput: 4\nExplanation: One of the longest word chains is [\"a\",\"ba\",\"bda\",\"bdca\"].\n\n\"\"\"\n\n\nclass Solution:\n def longestStrChain(self, words: List[str]) -> int:\n d = dict()\n for word in words:\n d[word] = 1\n longest = 1\n for word in sorted(words, key=len):\n for i in range(len(word)):\n prev = word[:i] + word[i + 1:]\n if prev in d:\n d[word] = max(d[word], d[prev] + 1)\n longest = max(longest, d[word])\n return longest\n" }, { "alpha_fraction": 0.5839415788650513, "alphanum_fraction": 0.5888077616691589, "avg_line_length": 17.68181800842285, "blob_id": "6364fd06bee8534569a51e0ec2568857a738f5cc", "content_id": "243dfa4de8ea26105d443fb3dbb6e85fac4e174b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 411, "license_type": "no_license", "max_line_length": 69, "num_lines": 22, "path": "/GG/359-shouldprint.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "var Logger = function () {\n this.map = new Map();\n this.obj = {};\n};\n\n/**\n * @param {number} timestamp\n * @param {string} message\n * @return {boolean}\n */\nLogger.prototype.shouldPrintMessage = function (timestamp, message) {\n // const obj = {}\n\n if (message in this.obj) {\n if (timestamp < this.obj[message] + 10) {\n return false;\n }\n }\n this.obj[[message]] = timestamp;\n\n return true;\n};\n" }, { "alpha_fraction": 0.420560747385025, "alphanum_fraction": 0.4532710313796997, "avg_line_length": 24.176469802856445, "blob_id": "33a249867aca196b74ba1b80561a186ca7e056da", "content_id": "c6aa9b1323f2e60baa6128d07f41a3ea1bf2f285", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 466, "license_type": "no_license", "max_line_length": 104, "num_lines": 17, "path": "/LI/li-scrollRequest.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "/**\n * API Docs\n * ---------\n * /posts?page=0 => [{id: 1, title: 'Post 1'}, {id: 2, title: 'Post 2'}, {id: 3, title: 'Post 3'}, N...]\n * /posts?page=1 => [{id: 4, title: 'Post 4'}, {id: 5, title: 'Post 5'}, {id: 6, title: 'Post 6'}, N...]\n * /posts?page=N => [N...]\n */\n\n<ul id=\"posts\"></ul>;\n\n// JS\n// ---------\n// $[colo‍‌‍‍‍‌‍‍‍‍‌‍‌‍‍‍‌‌‍r=#dcdcdc](window).on('scroll', scrollHandler);\n\nfunction scrollHandler() {\n //todo\n}\n" }, { "alpha_fraction": 0.4868035316467285, "alphanum_fraction": 0.5244379043579102, "avg_line_length": 21.2391300201416, "blob_id": "bd5d620d978000496ab18cbd2259d31f95376920", "content_id": "8f6041844d46024a7f02f567753467bb30a123cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2046, "license_type": "no_license", "max_line_length": 80, "num_lines": 92, "path": "/GG/table.jsx", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "// 1 6 7 12 13\n// 2 5 8 11 14\n// 3 4 9 10 15\n// width: 5, height 3\nimport React from \"react\";\n\nconst range = (start, stop) =>\n Array.from({ length: stop - start }, (_, i) => start + i);\n\nconst mapTable = (width, height) => {\n // map vertical first\n let arrayVertical = new Array(width);\n let count = 1;\n\n for (let w = 0; w < width; w++) {\n arrayVertical[w] = range(count, count + height);\n count += height;\n }\n // [[1,2,3], [4,5,6],[7,8,9],[10,11,12]]\n for (let i = 0; i < width; i++) {\n if (i % 2 !== 0) {\n arrayVertical[i].sort((a, b) => b - a);\n }\n }\n // [[1,2,3], [6,5,4],[7,8,9],[12,11,10]]\n return arrayVertical;\n};\n// O(2width)\n\nconst resArray = mapTable(3, 5);\n\n// O(w*h)\nconst resTable = resArray.map((eachtr, index) => {\n return (\n <tr key={index}>\n {resArray[eachtr].map((eachtd) => {\n return <td>{resArray[eachtr][eachtd]}</td>;\n })}\n </tr>\n );\n});\n\n// <!DOCTYPE html>\n\nconst tableComponent = () => {\n return (\n <html lang=\"en\">\n <head>\n <meta charset=\"UTF-8\" />\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\" />\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\" />\n <title>Document</title>\n </head>\n <body>\n <table>{resTable}</table>\n </body>\n </html>\n );\n};\nexport default tableComponent;\n/* <table>\n <tr>\n <th>Month</th>\n <th>Savings</th>\n </tr>\n <tr>\n <td>January</td>\n <td>$100</td>\n </tr>\n</table> */\n\nfunction createTable(rowCnt, colCnt) {\n const frag = document.createDocumentFragment();\n for (let i = 0; i < rowCnt; i++) {\n frag.append(document.createElement(\"tr\"));\n }\n for (let y = 0, inc = -1, val = 1; y < colCnt; y++) {\n inc *= -1;\n for (\n let i = 0, x = y % 2 === 0 ? 0 : rowCnt - 1;\n i < rowCnt;\n i++, x += inc\n ) {\n const td = document.createElement(\"td\");\n td.textContent = val++;\n frag.children[x].append(td);\n }\n }\n const table = document.createElement(\"table\");\n table.append(frag);\n return table;\n}\n" }, { "alpha_fraction": 0.4456404745578766, "alphanum_fraction": 0.4790096879005432, "avg_line_length": 23.447368621826172, "blob_id": "78e52d02d7d165d0ff62debc4581e1120144af7c", "content_id": "aba1f40b280a9dc377c947d4f3fec52f924bfb02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 929, "license_type": "no_license", "max_line_length": 62, "num_lines": 38, "path": "/GG/1048-longeststrchain.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "/**\n * @param {string[]} words\n * @return {number}\n */\nvar longestStrChain = function (words) {\n const d = {};\n // O(NlogN)\n words.sort((a, b) => (a.length > b.length ? 1 : -1));\n console.log(words);\n // [ 'b', 'a', 'ba', 'bda', 'bca', 'bdca' ]\n\n // O(N)\n words.forEach((eachWord, index) => {\n d[eachWord] = 1;\n // O(L)\n for (let i = 0; i < eachWord.length; i++) {\n let prev = eachWord.slice(0, i) + eachWord.slice(i + 1);\n if (prev in d) {\n d[eachWord] = Math.max(d[eachWord], d[prev] + 1);\n //console.log(eachWord, prev, d[eachWord], d[prev]+1)\n\n // ba a 2 2\n // ba b 2 2\n // bda ba 3 3\n // bca ba 3 3\n // bdca bca 4 4\n }\n }\n });\n console.log(d);\n // { b: 1, a: 1, ba: 2, bda: 3, bca: 3, bdca: 4 }\n console.log(Object.values(d));\n // [ 1, 1, 2, 3, 3, 4 ]\n\n return Math.max(...Object.values(d));\n};\n\n// Time complexity: O(N*(logN+L ^2))\n" }, { "alpha_fraction": 0.525898814201355, "alphanum_fraction": 0.5825716257095337, "avg_line_length": 22.782608032226562, "blob_id": "8fd5a1f8ae2749d1491fb93c6694a3761e88e1f7", "content_id": "ad2d1d3bce63e84bce44a8679ac0ff44c06ff111", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1641, "license_type": "no_license", "max_line_length": 90, "num_lines": 69, "path": "/High Freq/linkedList or array/954.py", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "\"\"\"\nGiven an integer array of even length arr,\n return true if it is possible to reorder arr such that arr[2 * i + 1] = 2 * arr[2 * i] \n for every 0 <= i < len(arr) / 2, or false otherwise.\n\nExample 1:\n\nInput: arr = [3,1,3,6]\nOutput: false\n\nExample 2:\n\nInput: arr = [2,1,2,6]\nOutput: false\n\nExample 3:\n\nInput: arr = [4,-2,2,-4]\nOutput: true\nExplanation: We can take two groups, [-2,-4] and [2,4] to form [-2,-4,2,4] or [2,4,-2,-4].\nExample 4:\n\nInput: arr = [1,2,4,16,8,4]\nOutput: false\n\nLet's try to (virtually) \"write\" the final reordered array.\n\nLet's check elements in order of absolute value. \nWhen we check an element x and it isn't used, \nit must pair with 2*x. We will attempt to write x, 2x - \n\nif we can't, then the answer is false. If we write everything, the answer is true.\n\nTo keep track of what we have not yet written, we will store it in a count\n\n\nTime Complexity: O(NlogN), where N is the length of A.\nSpace Complexity: O(N)\n\"\"\"\n\n\nclass Solution(object):\n def canReorderDoubled(self, A):\n count = collections.Counter(A)\n for x in sorted(A, key=abs):\n if count[x] == 0:\n continue\n if count[2*x] == 0:\n return False\n count[x] -= 1\n count[2*x] -= 1\n\n return True\n\n\n# >>> from collections import Counter\n# >>>\n# >>> myList = [1,1,2,3,4,5,3,2,3,4,2,1,2,3]\n# >>> print Counter(myList)\n# Counter({2: 4, 3: 4, 1: 3, 4: 2, 5: 1})\n# >>>\n# >>> print Counter(myList).items()\n# [(1, 3), (2, 4), (3, 4), (4, 2), (5, 1)]\n# >>>\n# >>> print Counter(myList).keys()\n# [1, 2, 3, 4, 5]\n# >>>\n# >>> print Counter(myList).values()\n# [3, 4, 4, 2, 1]\n" }, { "alpha_fraction": 0.4616588354110718, "alphanum_fraction": 0.5023474097251892, "avg_line_length": 26.782608032226562, "blob_id": "0acf4875ca90fbbbc7dbdc3c6e681e7850d07bd8", "content_id": "01249ea7f781499e2a40fdd8030692765336acf4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 639, "license_type": "no_license", "max_line_length": 85, "num_lines": 23, "path": "/LI/339-depthsum.py", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "\"\"\"\nInput: nestedList = [[1,1],2,[1,1]]\nOutput: 10\nExplanation: Four 1's at depth 2, one 2 at depth 1. 1*2 + 1*2 + 2*1 + 1*2 + 1*2 = 10.\n\n\"\"\"\n\n\nclass Solution(object):\n def depthSum(self, nestedList):\n \"\"\"\n :type nestedList: List[NestedInteger]\n :rtype: int\n \"\"\"\n def DFS(nestedList, depth):\n temp_sum = 0\n for member in nestedList:\n if member.isInteger():\n temp_sum += member.getInteger() * depth\n else:\n temp_sum += DFS(member.getList(), depth+1)\n return temp_sum\n return DFS(nestedList, 1)\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5293367505073547, "avg_line_length": 20.189189910888672, "blob_id": "0698feba99268c26d3011e7a03f0a6d08e38bec3", "content_id": "d4a43ed0c3c21dea47f9f69d71558cc019aebcca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 930, "license_type": "no_license", "max_line_length": 53, "num_lines": 37, "path": "/GG/315.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "/**\n * @param {number[]} nums\n * @return {number[]}\n */\n\n// 算法题\n//     给一个数组, 如 [2, 1, 3],找出每个数字在该数字之后比它大的数的个数,比如:\n//     比 2 大的有 3,所以结果是 1,\n//     比 1 大的有 3,结果也是 1,\n\n// 比 3 大的没有,结果是 0,\n//     所以返回 [1, 1, 0];\n\nvar countSmaller = function (nums) {\n if (nums.length === 0) return [];\n\n let counts = [0];\n let orderedNums = [nums[nums.length - 1]];\n\n const bs = (target) => {\n let start = 0;\n let end = orderedNums.length;\n while (start < end) {\n let mid = Math.floor((start + end) / 2);\n if (target > orderedNums[mid]) start = mid + 1;\n else end = mid;\n }\n orderedNums.splice(start, 0, target);\n return start;\n };\n\n for (let i = nums.length - 2; i >= 0; i--) {\n let count = bs(nums[i]);\n counts.unshift(count);\n }\n return counts;\n};\n" }, { "alpha_fraction": 0.5514705777168274, "alphanum_fraction": 0.5535714030265808, "avg_line_length": 28.292306900024414, "blob_id": "dd27a46112c200a00b16767f498525cefe8feecb", "content_id": "f720de878718eb23d1c08d3d7e4dfd66b4daa77f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1904, "license_type": "no_license", "max_line_length": 95, "num_lines": 65, "path": "/LI/364-depthsuminverse.py", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "# \"\"\"\n# This is the interface that allows for creating nested lists.\n# You should not implement it, or speculate about its implementation\n# \"\"\"\n# class NestedInteger:\n# def __init__(self, value=None):\n# \"\"\"\n# If value is not specified, initializes an empty list.\n# Otherwise initializes a single integer equal to value.\n# \"\"\"\n#\n# def isInteger(self):\n# \"\"\"\n# @return True if this NestedInteger holds a single integer, rather than a nested list.\n# :rtype bool\n# \"\"\"\n#\n# def add(self, elem):\n# \"\"\"\n# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.\n# :rtype void\n# \"\"\"\n#\n# def setInteger(self, value):\n# \"\"\"\n# Set this NestedInteger to hold a single integer equal to value.\n# :rtype void\n# \"\"\"\n#\n# def getInteger(self):\n# \"\"\"\n# @return the single integer that this NestedInteger holds, if it holds a single integer\n# Return None if this NestedInteger holds a nested list\n# :rtype int\n# \"\"\"\n#\n# def getList(self):\n# \"\"\"\n# @return the nested list that this NestedInteger holds, if it holds a nested list\n# Return None if this NestedInteger holds a single integer\n# :rtype List[NestedInteger]\n# \"\"\"\n\nclass Solution:\n def depthSumInverse(self, nestedList: List[NestedInteger]) -> int:\n sol = []\n\n def dfs(curr, depth) -> None:\n if depth > len(sol):\n sol.append(0)\n\n if curr.getInteger():\n sol[depth - 1] += curr.getInteger()\n return\n for i in curr.getList():\n dfs(i, depth + 1)\n\n for i in nestedList:\n dfs(i, 1)\n\n max_depth = len(sol)\n for i in range(max_depth):\n sol[i] = (max_depth - i) * sol[i]\n\n return sum(sol)\n" }, { "alpha_fraction": 0.7436241507530212, "alphanum_fraction": 0.8134227991104126, "avg_line_length": 32.11111068725586, "blob_id": "c283113bd288ed0ea67b6217cef9eb2d7ae047b3", "content_id": "2fa97568cd30ef2370e87ff1b53bba45659bd031", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1548, "license_type": "no_license", "max_line_length": 89, "num_lines": 45, "path": "/High Freq/heap/heap.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "堆(Heap or Priority Queue)、栈(Stack)、队列(Queue)、哈希表类(Hashmap、Hashset)\n\nQueue\nLeetcode 225. Implement Stack using Queues\nLeetcode 346. Moving Average from Data Stream\nLeetcode 281. Zigzag Iterator\nLeetcode 1429. First Unique Number\nLeetcode 54. Spiral Matrix\nLeetcode 362. Design Hit Counter\n\nStack\nLeetcode 232. Implement Queue using Stacks\nLeetcode 150. Evaluate Reverse Polish Notation\nLeetcode 224. Basic Calculator II (I, II, III, IV)\nLeetcode 20. Valid Parentheses\nLeetcode 1472. Design Browser History\nLeetcode 1209. Remove All Adjacent Duplicates in String II\nLeetcode 1249. Minimum Remove to Make Valid Parentheses\nLeetcode 735. Asteroid Collision\n\nHashmap\nLeetcode 1. Two Sum\nLeetcode 146. LRU Cache (Python 中可以使用 OrderedDict 来代替)\nLeetcode 128. Longest Consecutive Sequence\nLeetcode 73. Set Matrix Zeroes\nLeetcode 380. Insert Delete GetRandom O(1)\nLeetcode 49. Group Anagrams\nLeetcode 350. Intersection of Two Arrays II\nLeetcode 299. Bulls and Cows\nLeetcode 348 Design Tic-Tac-Toe\n\nHeap/Priority Queue\n\nLeetcode 973. K Closest Points\nLeetcode 347. Top k Largest Elements\nLeetcode 23. Merge K Sorted Lists\nLeetcode 264. Ugly Number II\nLeetcode 1086. High Five\nLeetcode 68. Merge Sorted Arrays\nLeetcode 692. Top K Frequent Words\nLeetcode 378. Kth Smallest Element in a Sorted Matrix\nLeetcode 295. Find Median from Data Stream\nLeetcode 767. Reorganize String\nLeetcode 1438. Longest Continuous Subarray With Absolute Diff Less Than or Equal to Limit\nLeetcode 895. Maximum Frequency Stack\n" }, { "alpha_fraction": 0.7435897588729858, "alphanum_fraction": 0.8012820482254028, "avg_line_length": 27.363636016845703, "blob_id": "d2261fc755fa5805979cbe8822a94cae51e63273", "content_id": "3ec4dc68e1669426c50dedaeccab869035cc6638", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 420, "license_type": "no_license", "max_line_length": 54, "num_lines": 11, "path": "/High Freq/linkedList or array/linkedList.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "链表如何实现,如何遍历链表。链表可以保证头部尾部插入删除操作都是 O(1),查找任意元素位置 O(N)\n\nLeetcode 206. Reverse Linked List\nLeetcode 876. Middle of the Linked List\n\n快慢指针和链表反转\n\nLeetcode 160. Intersection of Two Linked Lists\nLeetcode 141. Linked List Cycle (Linked List Cycle II)\nLeetcode 92. Reverse Linked List II\nLeetcode 328. Odd Even Linked List\n" }, { "alpha_fraction": 0.39848485589027405, "alphanum_fraction": 0.46060606837272644, "avg_line_length": 22.571428298950195, "blob_id": "dabf7c369b175735da68045ce553c98a059de367", "content_id": "d72d068266b284bfc9ec8f44b0dc5c163d562dbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 660, "license_type": "no_license", "max_line_length": 69, "num_lines": 28, "path": "/GG/1277-countSquare.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "// Input: matrix =\n// [\n// [0,1,1,1],\n// [1,1,1,1],\n// [0,1,1,1]\n// ]\n// Output: 15\n// Explanation:\n// There are 10 squares of side 1.\n// There are 4 squares of side 2.\n// There is 1 square of side 3.\n// Total number of squares = 10 + 4 + 1 = 15.\n\n/**\n * @param {number[][]} matrix\n * @return {number}\n */\nlet countSquares = (A, cnt = 0) => {\n let M = A.length,\n N = A[0].length;\n let dp = [...Array(M + 1)].map((row) => Array(N + 1).fill(0));\n for (let i = 1; i <= M; ++i)\n for (let j = 1; j <= N; ++j)\n if (A[i - 1][j - 1])\n cnt += dp[i][j] =\n 1 + Math.min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1]);\n return cnt;\n};\n" }, { "alpha_fraction": 0.7743399143218994, "alphanum_fraction": 0.8345709443092346, "avg_line_length": 36.875, "blob_id": "a010e8ab6e0c72bf948c226903ce14a1150ac3a1", "content_id": "d68a94bd80bfe950dfc7005c7c81b2f9f28e2a56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3184, "license_type": "no_license", "max_line_length": 127, "num_lines": 64, "path": "/High Freq/dfs/dfs.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "1. 图中(有向无向皆可)的符合某种特征(比如最长)的路径以及长度\n2. 排列组合\n3. 遍历一个图或者树.\n4. 找出图或者树中符合题目要求的全部方案\n\n递归题目都可以用非递归迭代的方法写,但一般实现起来非常麻烦\n基于树的 DFS:需要记住递归写前序中序后序遍历二叉树的模板\nLeetcode 543 Diameter of Binary Tree\nLeetcode 226 Invert Binary Tree\nLeetcode 124 Binary Tree Maximum Path Sum\nLeetcode 236 Lowest Common Ancestor of a Binary Tree\nLeetcode 101 Symmetric Tree\nLeetcode 105 Construct Binary Tree from Preorder and Inorder Traversal\nLeetcode 104 Maximum Depth of Binary Tree\nLeetcode 951 Flip Equivalent Binary Trees\nLeetcode 987 Vertical Order Traversal of a Binary Tree\nLeetcode 1485 Clone Binary Tree With Random Pointer\nLeetcode 572 Subtree of Another Tree\nLeetcode 863 All Nodes Distance K in Binary Tree\n\n二叉搜索树(BST):BST 特征:中序遍历为单调递增的二叉树,换句话说,根节点的值比左子树任意节点值都大,比右子树任意节点值都小,增删查改均为 O(h)复杂度,h 为树的高度;注意不是所有的 BST 题目都需要递归,有的题目只需要 while 循环即可\nLeetcode 230 Kth Smallest element in a BST\nLeetcode 98 Validate Binary Search Tree\nLeetcode 270 Cloest Binary Search Tree Value\nLeetcode 235 Lowest Common Ancestor of a Binary Search Tree\nLeetcode 669 Trim a Binary Search Tree\nLeetcode 700 Search Range in Binary Search Tree\nLeetcode 108 Convert Sorted Array to Binary Search Tree\nLeetcode 333 Largest BST Subtree\nLeetcode 510 Inorder Successor in BST II\n\n基于图的 DFS: 和 BFS 一样一般需要一个 set 来记录访问过的节点,避免重复访问造成死循环\nLeetcode 341 Flatten Nested List Iterator\nLeetcode 394 Decode String\nLeetcode 51 N-Queens\nLeetcode 291 Word Pattern II (I 为简单的 Hashmap 题)\nLeetcode 126 Word Ladder II (I 为 BFS 题目)\nLeetcode 1110 Delete Nodes And Return Forest\nLeetcode 93 Restore IP Addresses\nLeetcode 22 Generate Parentheses\nLeetcode 37 Sodoku Solver\nLeetcode 301 Remove Invalid Parentheses\nLeetcode 212 Word Search II (I, II)\nLeetcode 1087 Brace Expansion\nLeetcode 399 Evaluate Division\nLeetcode 1274 Number of Ships in a Rectangle\nLeetcode 1376 Time Needed to Inform All Employees\nLeetcode 694 Number of Distinct Islands\nLeetcode 586 Score of Parentheses\n\n基于排列组合的 DFS: 其实与图类 DFS 方法一致,但是排列组合的特征更明显\nLeetcode 17 Letter Combinations of a Phone Number\nLeetcode 39 Combination Sum (I, II, III, IV)\nLeetcode 90 Subsets II (重点在于如何去重)\nLeetcode 47 Permutation II\nLeetcode 77 Combinations\nLeetcode 526 Beautiful Arrangement\n\n记忆化搜索(DFS + Memoization Search):算是动态规划的一种,递归每次返回时同时记录下已访问过的节点特征,避免重复访问同一个节点,可以有效的把指数级别的 DFS 时间复杂度降为多项式级别\nLeetcode 139 Word Break II\nLeetcode 131 Palindrome Partitioning\nLeetcode 72 Edit Distance\nLeetcode 377 Combination Sum IV\nLeetcode 1335 Minimum Difficulty of a Job Schedule\n" }, { "alpha_fraction": 0.5811870098114014, "alphanum_fraction": 0.6013438105583191, "avg_line_length": 20.780487060546875, "blob_id": "d151154924260b933755a704e1477807a973939c", "content_id": "465d8b4263e13225adc7de29deb3f6dcfab77bdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 893, "license_type": "no_license", "max_line_length": 81, "num_lines": 41, "path": "/LI/li-highlight.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "// build a widget with 4 lists and 3 nested list, \\\n// nested can be span/collapse when clicking on parents, highlight txt when hover\n<table class=\"gridview\">\n <tr>\n <th>Col1</th>\n <th>Col2</th>\n <th>Col3</th>\n <th>Col4</th>\n </tr>\n <tr onclick=\"toggleClass(this,'selected');\">\n <td>Data1</td>\n <td>Data2</td>\n <td>Data3</td>\n <td>Data4</td>\n </tr>\n <tr onclick=\"toggleClass(this,'selected');\">\n <td>Data1</td>\n <td>Data2</td>\n <td>Data3</td>\n <td>Data4</td>\n </tr>\n <tr onclick=\"toggleClass(this,'selected');\">\n <td>Data1</td>\n <td>Data2</td>\n <td>Data3</td>\n <td>Data4</td>\n </tr>\n</table>;\n\nlet currentSelect = null;\n\nconst toggleClass = (el, className) => {\n console.log(el, className);\n\n if (currentSelect) {\n currentSelect.classList.remove(\"selected\");\n }\n currentSelect = el;\n\n currentSelect.classList.add(\"selected\");\n};\n" }, { "alpha_fraction": 0.6218057870864868, "alphanum_fraction": 0.6218057870864868, "avg_line_length": 22.479999542236328, "blob_id": "b7f4d3efcacfe3350a9f6027a8a5bf6e36b6312d", "content_id": "a1a796541663443647ece61cc53215b6c67587df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 587, "license_type": "no_license", "max_line_length": 78, "num_lines": 25, "path": "/LI/tooltips-code-pen.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "const OverlayTrigger = ReactBootstrap.OverlayTrigger;\n\nconst Tooltip = ReactBootstrap.Tooltip;\n\nconst renderTooltip = (props) => (\n <Tooltip {...props}>Tooltip for the register button</Tooltip>\n);\n\nconst Hello = React.createClass({\n render() {\n return (\n <div>\n hi\n <OverlayTrigger\n placement=\"bottom\"\n overlay={<Tooltip>Tooltip for the register button</Tooltip>}\n >\n <Button>Register</Button>\n </OverlayTrigger>\n </div>\n );\n },\n});\n\nReactDOM.render(<Hello name=\"World\" />, document.getElementById(\"container\"));\n" }, { "alpha_fraction": 0.5208517909049988, "alphanum_fraction": 0.5403726696968079, "avg_line_length": 19.870370864868164, "blob_id": "feae570a2d3163ae1836b7ed0cd86064360dbfc2", "content_id": "462848de9fb539169485ea2b482daffd91c98942", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1127, "license_type": "no_license", "max_line_length": 71, "num_lines": 54, "path": "/LRU.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "class LRUCache {\n constructor(capacity) {\n this.cache = new Map();\n this.capacity = capacity;\n }\n\n get(key) {\n if (!this.cache.has(key)) return -1;\n\n let val = this.cache.get(key);\n\n this.cache.delete(key);\n this.cache.set(key, val);\n\n return val;\n }\n\n put(key, value) {\n this.cache.delete(key);\n\n if (this.cache.size === this.capacity) {\n this.cache.delete(this.cache.keys().next().value);\n this.cache.set(key, value);\n } else {\n this.cache.set(key, value);\n }\n }\n\n // Implement LRU/MRU retrieval methods\n getLeastRecent() {\n return Array.from(this.cache)[0];\n }\n\n getMostRecent() {\n return Array.from(this.cache)[this.cache.size - 1];\n }\n}\n\n// ====================================================================\nconst map1 = new Map();\n\nmap1.set(\"a\", 1);\nmap1.set(\"b\", 2);\nmap1.set(\"c\", 3);\n\nconsole.log(Array.from(map1));\nconsole.log(map1.keys().next().value);\n\nmap1.delete(\"b\");\nmap1.set(\"b\", 2);\nconsole.log(Array.from(map1));\n// > Array [Array [\"a\", 1], Array [\"b\", 2], Array [\"c\", 3]]\n// > \"a\"\n// > Array [Array [\"a\", 1], Array [\"c\", 3], Array [\"b\", 2]]\n" }, { "alpha_fraction": 0.5776658058166504, "alphanum_fraction": 0.5894206762313843, "avg_line_length": 29.538461685180664, "blob_id": "2482e14aae9500bc38c6bcd51e2cb4fc807efb24", "content_id": "61e991be31d1bf08b720acaf750dacc9d25d6c4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1191, "license_type": "no_license", "max_line_length": 113, "num_lines": 39, "path": "/LI/205-isIsomorphic.py", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "\"\"\"\nTwo strings s and t are isomorphic if the characters in s can be replaced to get t.\n\nAll occurrences of a character must be replaced with another character while preserving the order of characters. \nNo two characters may map to the same character, but a character may map to itself.\n\nInput: s = \"egg\", t = \"add\"\nOutput: true\n\nInput: s = \"foo\", t = \"bar\"\nOutput: false\n\n\"\"\"\n\n\nclass Solution:\n def isIsomorphic(self, s: str, t: str) -> bool:\n mapping_s_t = {}\n mapping_t_s = {}\n for c1, c2 in zip(s, t):\n\n # Case 1: No mapping exists in either of the dictionaries\n if (c1 not in mapping_s_t) and (c2 not in mapping_t_s):\n mapping_s_t[c1] = c2\n mapping_t_s[c2] = c1\n\n # Case 2: Ether mapping doesn't exist in one of the dictionaries or Mapping exists and\n # it doesn't match in either of the dictionaries or both\n elif mapping_s_t.get(c1) != c2 or mapping_t_s.get(c2) != c1:\n return False\n\n return True\n\n\n# a = (\"John\", \"Charles\", \"Mike\")\n# b = (\"Jenny\", \"Christy\", \"Monica\")\n\n# x = zip(a, b)\n# (('John', 'Jenny'), ('Charles', 'Christy'), ('Mike', 'Monica'))\n" }, { "alpha_fraction": 0.6057838797569275, "alphanum_fraction": 0.6073059439659119, "avg_line_length": 21.65517234802246, "blob_id": "e0d11327b6b897c4250d663e2b9a615863ab76a6", "content_id": "ee83ff9632a602326d147dd71ab844d7c7691dd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 657, "license_type": "no_license", "max_line_length": 62, "num_lines": 29, "path": "/GG/retry.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "function wait(delay) {\n return new Promise((resolve) => setTimeout(resolve, delay));\n}\n\nfunction fetchRetry(url, delay, tries, fetchOptions = {}) {\n function onError(err) {\n triesLeft = tries - 1;\n if (!triesLeft) {\n throw err;\n }\n return wait(delay).then(() =>\n fetchRetry(url, delay, triesLeft, fetchOptions)\n );\n }\n return fetch(url, fetchOptions).catch(onError);\n}\n\nasync function fetchUntilSucceeded() {\n let success = false;\n while (!success) {\n try {\n // let result = await fetch(...);\n success = true;\n //do your stuff with your result here\n } catch {\n //do your catch stuff here\n }\n }\n}\n" }, { "alpha_fraction": 0.7357604503631592, "alphanum_fraction": 0.7398707866668701, "avg_line_length": 40.53658676147461, "blob_id": "20526ded938afff3b639e747ba819d6192c5d86f", "content_id": "8b9ae71515ffb551d4a14509cd64e40246bd1e8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1707, "license_type": "no_license", "max_line_length": 227, "num_lines": 41, "path": "/Sys Design/note.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "1. clarification\n Will users of our service be able to post tweets and follow other people?\n Should we also design to create and display the user’s timeline?\n Will tweets contain photos and videos?\n Are we focusing on the backend only, or are we developing the front-end too?\n Will users be able to search tweets?\n Do we need to display hot trending topics?\n Will there be any push notification for new (or important) tweets?\n\n2. Back-of-the-envelope estimation\n estimate the scale of the system\n scale expected\n storage\n bandwidth - > traffic\n\n3. System interface definition\n Define APIs\n\n4. Defining data model\n identify various system entities & interact with each othe\n different aspects of data management\n storage, transportation, encryption,\n which db\n\n5. High-level design\n identify enough components that are needed to solve the actual problem from end to end.\n\n6. Detailed design\n Dig deeper into two or three major components\n\n Since we will be storing a massive amount of data, how should we partition our data to distribute it to multiple databases? Should we try to store all the data of a user on the same database? What issue could it cause?\n\n How will we handle hot users who tweet a lot or follow lots of people?\n\n Since users’ timeline will contain the most recent (and relevant) tweets, should we try to store our data so that it is optimized for scanning the latest tweets?\n\n How much and at which layer should we introduce cache to speed things up?\n What components need better load balancing?\n\n7. Identifying and resolving bottlenecks\n discuss as many bottlenecks as possible\n" }, { "alpha_fraction": 0.5622435212135315, "alphanum_fraction": 0.5766074061393738, "avg_line_length": 30.106382369995117, "blob_id": "0c17e9096e498070a06503e58d8aa330153260b7", "content_id": "1963958d63d40ea506532e021c022def3c92a3bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1462, "license_type": "no_license", "max_line_length": 65, "num_lines": 47, "path": "/GG/735-asteroidcollision.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "// Input: asteroids = [5,10,-5]\n// Output: [5,10]\n// Explanation: The 10 and -5 collide resulting in 10.\n// The 5 and 10 never collide.\n\n/**\n * @param {number[]} asteroids\n * @return {number[]}\n */\nvar asteroidCollision = function (asteroids) {\n const s = [];\n // Negative asteroids to the left of the stack can be ignored.\n // They'll never collide.\n for (let i = 0; i < asteroids.length; i++) {\n a = asteroids[i];\n if (s.length == 0 || (s[s.length - 1] < 0 && a < 0)) {\n s.push(a);\n } else if (a > 0) {\n s.push(a);\n } else {\n // a is negative. It can only collide with positive ones in\n // the stack. The following will keep on iterating\n // until it is dealt with.\n const pop = s.pop();\n\n // positive pop beats negative a, so pick up pop\n // and re-add it to the stack.\n if (Math.abs(pop) > Math.abs(a)) {\n s.push(pop);\n\n // a has larger size than pop, so pop will get dropped\n // and we'll retry another iteration with the same\n // negative a asteroid and whatever the stack's state is.\n } else if (Math.abs(pop) < Math.abs(a)) {\n i--;\n // magnitude of positive pop and negative a are the same\n // so we can drop both of them.\n } else {\n continue;\n }\n }\n // a is negative. It can only collide with positive ones in\n // the stack. The following will keep on iterating\n // until it is dealt with.\n }\n return s;\n};\n" }, { "alpha_fraction": 0.5708298683166504, "alphanum_fraction": 0.5804694294929504, "avg_line_length": 23.597938537597656, "blob_id": "cdcdf2661db000f19653cb140954ada6d1488c65", "content_id": "b8b631ab5f87b9bd5a04284637a5c025b5fca361", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2750, "license_type": "no_license", "max_line_length": 85, "num_lines": 97, "path": "/LI/li-ppl-youmayknow.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "// people you may know\n// 一个图片,让你用html写出来。\n// 大概是一个linkedin的用户推荐界面。\n// 第一行是文字People You May Know 我用div和p,提示让用<h5>tag\n// 下面三小行:每一行是一个推荐给你的用户\n\n// 又分三小列:每一列从左到右是:一个图片,两行文字,一个叉叉图标。\n\n// 其中第一行文字是:姓名(加粗的),职位,\n// 第二行文字是:一个加号,一个“connect”,要求用<a></a>加在connect旁边。\n\n// 最后右下角有个See more>>> 我还没写,他就问了两个CSS问题\n// (前面的代码都不用写JavaScript和CSS,只用html,但他要问你为什么用这个tag)\n<body>\n <div class=\"customCard\">\n <div class=\"row\">\n <div class=\"card-component box\"></div>\n </div>\n </div>\n</body>;\nvar userDetail = [\n {\n name: \"sunil\",\n age: \"24\",\n place: \"delhi\",\n avatar: \"http://www.gravatar.com/avatar/\",\n country: \"US\",\n background:\n \"https://upload.wikimedia.org/wikipedia/commons/9/9a/Gull_portrait_ca_usa.jpg\",\n },\n {\n name: \"sunil\",\n age: \"24\",\n place: \"delhi\",\n avatar: \"https://i.ytimg.com/vi/fUWrhetZh9M/maxresdefault.jpg\",\n country: \"US\",\n background:\n \"https://upload.wikimedia.org/wikipedia/commons/9/9a/Gull_portrait_ca_usa.jpg\",\n },\n {\n name: \"abishek\",\n age: \"26\",\n place: \"kolkata\",\n avatar: \"./image/abc.jpg\",\n country: \"India\",\n },\n {\n name: \"chiranjeev\",\n age: \"20\",\n place: \"bangalore\",\n avatar: \"./image/abc.jpg\",\n country: \"India\",\n },\n {\n name: \"sunil\",\n age: \"24\",\n place: \"delhi\",\n avatar: \"http://www.gravatar.com/avatar/\",\n country: \"US\",\n background:\n \"https://upload.wikimedia.org/wikipedia/commons/9/9a/Gull_portrait_ca_usa.jpg\",\n },\n {\n name: \"sunil\",\n age: \"24\",\n place: \"delhi\",\n avatar: \"https://i.ytimg.com/vi/fUWrhetZh9M/maxresdefault.jpg\",\n country: \"US\",\n background:\n \"https://upload.wikimedia.org/wikipedia/commons/9/9a/Gull_portrait_ca_usa.jpg\",\n },\n];\n\nvar customUL = document.createElement(\"ul\");\n\ncustomUL.className = \"user-card-ul\";\ncustomUL = userDetail\n .map(\n (each) =>\n `\n <div class='each-user-card-component'>\n <div class='delete-btn'>\n <button class='top-right'>X</buttom>\n </div>\n \n <image class='avatar-cycle-image center' src=${each.avatar} />\n <div class='center'><p>${each.name}</p></div>\n <div class='center'>${each.place}, ${each.country}</div>\n <br/>\n <button class='center'>connect</buttom>\n \n </div>\n `\n )\n .join(\"\");\n\ndocument.querySelector(\".box\").innerHTML = customUL;\n" }, { "alpha_fraction": 0.5337620377540588, "alphanum_fraction": 0.5787781476974487, "avg_line_length": 27.272727966308594, "blob_id": "07a01d78f166c4ebc5d62cd3b2e77b9a38b6cc18", "content_id": "20659b747bcd9bcd998000f20621b01dacf71757", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 311, "license_type": "no_license", "max_line_length": 42, "num_lines": 11, "path": "/High Freq/queue/queue&stack.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "var stack = [];\nstack.push(2); // stack is now [2]\nstack.push(5); // stack is now [2, 5]\nvar i = stack.pop(); // stack is now [2]\nalert(i); // displays 5\n\nvar queue = [];\nqueue.push(2); // queue is now [2]\nqueue.push(5); // queue is now [2, 5]\nvar i = queue.shift(); // queue is now [5]\nalert(i); // displays 2\n" }, { "alpha_fraction": 0.7321100831031799, "alphanum_fraction": 0.805504560470581, "avg_line_length": 33.0625, "blob_id": "2eb623e9003c968befb01e9f8af3bca27f929a78", "content_id": "6c07887b78cacda9adaee11bf24a6b7f00d3303a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 555, "license_type": "no_license", "max_line_length": 68, "num_lines": 16, "path": "/High Freq/binarySearch/binarySearch.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "时间复杂度 logN\n\nLeetcode 34. Find First and Last Position of Element in Sorted Array\nLeetcode 33. Search in Rotated Sorted Array\nLeetcode 1095. Find in Mountain Array\nLeetcode 162. Find Peak Element\nLeetcode 278. First Bad Version\nLeetcode 74. Search a 2D Matrix\nLeetcode 240. Search a 2D Matrix II\n\nLeetcode 69. Sqrt(x)\nLeetcode 540. Single Element in a Sorted Array\nLeetcode 644. Maximum Average Subarray II\nLeetcode 528. Random Pick with Weight\nLeetcode 1300. Sum of Mutated Array Closest to Target\nLeetcode 1060. Missing Element in Sorted Array\n" }, { "alpha_fraction": 0.8338192701339722, "alphanum_fraction": 0.8338192701339722, "avg_line_length": 113.33333587646484, "blob_id": "ab6c522cc9ab531084e1fc51986f9b678b44ddcb", "content_id": "6af04044f47367503750319e31acf50305c86c92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 349, "license_type": "no_license", "max_line_length": 220, "num_lines": 3, "path": "/Sys Design/sql-vs-nosql.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "Relational databases are structured and have predefined schemas like phone books that store phone numbers and addresses.\n\nNon-relational databases are unstructured, distributed, and have a dynamic schema like file folders that hold everything from a person’s address and phone number to their Facebook ‘likes’ and online shopping preferences.\n" }, { "alpha_fraction": 0.44057053327560425, "alphanum_fraction": 0.44690966606140137, "avg_line_length": 21.535715103149414, "blob_id": "d8dacb4707fcf16a2c50d455200ecb8bd103c1c4", "content_id": "800ddbf2b4dfd1431a6d9a1b35788094280276d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 631, "license_type": "no_license", "max_line_length": 61, "num_lines": 28, "path": "/High Freq/recursion/revise-string.py", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "\"\"\"\nInput: s = [\"h\",\"e\",\"l\",\"l\",\"o\"]\nOutput: [\"o\",\"l\",\"l\",\"e\",\"h\"]\n\n\"\"\"\n\n\n# class Solution: -> one line but not what interviewer want\n# def reverseString(self, s):\n# s.reverse()\n\n\nclass Solution:\n def reverseString(self, s: List[str]) -> None:\n\n def helper(left: int, right: int, string: List[str]):\n\n if left >= right:\n # base case\n return\n\n # general case\n s[left], s[right] = s[right], s[left]\n\n helper(left+1, right-1, s)\n # ------------------------------------------------\n\n helper(left=0, right=len(s)-1, string=s)\n" }, { "alpha_fraction": 0.7658558487892151, "alphanum_fraction": 0.7758581638336182, "avg_line_length": 90.64583587646484, "blob_id": "651ae2f2cadfaca37a01ed5d62833e386bc46033", "content_id": "a99c0c27af6e9eb4a88f301febff84069131d6a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4407, "license_type": "no_license", "max_line_length": 449, "num_lines": 48, "path": "/Sys Design/extra-note.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "<h3>Heartbeat</h3>\nEach server periodically sends a heartbeat message to a central monitoring server or other servers in the system to show that it is still alive and functioning.\n\n<h3>Checksum</h3>\n\nIn a distributed system, while moving data between components, it is possible that the data fetched from a node may arrive corrupted. This corruption can occur because of faults in a storage device, network, software, etc. How can a distributed system ensure data integrity, so that the client receives an error instead of corrupt data?\n\nCalculate a checksum and store it with data.\n\nTo calculate a checksum, a cryptographic hash function like MD5, SHA-1, SHA-256, or SHA-512 is used. The hash function takes the input data and produces a string (containing letters and numbers) of fixed length; this string is called the checksum.\n\nWhen a system is storing some data, it computes a checksum of the data and stores the checksum with the data. When a client retrieves data, it verifies that the data it received from the server matches the checksum stored. If not, then the client can opt to retrieve that data from another replica.\n\n<h3>Bloom Filters</h3>\nIf we have a large set of structured data (identified by record IDs) stored in a set of data files, what is the most efficient way to know which file might contain our required data? \nWe don’t want to read each file, as that would be slow, and we have to read a lot of data from the dis. one solution is index, if we want to search an ID in this index, the best we can do is a Binary Search. Can we do better than that?\n\nUse Bloom filters to quickly find if an element might be present in a set.\n\nhe Bloom filter data structure tells whether an element may be in a set, or definitely is not. The only possible errors are false positives, i.e., a search for a nonexistent element might give an incorrect answer. With more elements in the filter, the error rate increases. An empty Bloom filter is a bit-array of m bits, all set to 0. There are also k different hash functions, each of which maps a set element to one of the m bit positions.\n\nTo add an element, feed it to the hash functions to get k bit positions, and set the bits at these positions to 1.\nTo test if an element is in the set, feed it to the hash functions to get k bit positions.\nIf any of the bits at these positions is 0, the element is definitely not in the set.\nIf all are 1, then the element may be in the set.\n\nFor a fixed error rate, adding a new element and testing for membership are both constant time operations, and a filter with room for ‘n’ elements requires O(n)O(n) space.\n\n<h3>how to make sure that all replicas are consistent</h3>\nquorum\n\nSuppose a database is replicated on five machines. In that case, quorum refers to the minimum number of machines that perform the same action (commit or abort) for a given transaction in order to decide the final operation for that transaction. So, in a set of 5 machines, three machines(N/2+1) form the majority quorum, and if they agree, we will commit that operation. Quorum enforces the consistency requirement needed for distributed operations.\n\nIn systems with multiple replicas, there is a possibility that the user reads inconsistent data. For example, when there are three replicas, R1, R2, and R3 in a cluster, and a user writes value v1 to replica R1. Then another user reads from replica R2 or R3 which are still behind R1 and thus will not have the value v1, so the second user will not get the consistent state of data.\n\nQuorum is achieved when nodes follow the below protocol: R + W > NR+W>N, where:\nNN = nodes in the quorum group\nWW = minimum write nodes\nRR = minimum read nodes\n\nIf a distributed system follows R + W > NR+W>N rule, then every read will see at least one copy of the latest value written. For example, a common configuration could be (N=3, W=2, R=2) to ensure strong consistency. Here are a couple of other examples:\n\n(N=3, W=1, R=3): fast write, slow read, not very durable\n(N=3, W=3, R=1): slow write, fast read, durable\nThe following two things should be kept in mind before deciding read/write quorum:\n\nR=1 and W=N ⇒ full replication (write-all, read-one): undesirable when servers can be unavailable because writes are not guaranteed to complete.\nBest performance (throughput/availability) when 1 < r < w < n1<r<w<n, because reads are more frequent than writes in most applications\n" }, { "alpha_fraction": 0.7351778745651245, "alphanum_fraction": 0.7944663763046265, "avg_line_length": 27.11111068725586, "blob_id": "5513e779e654f028135f3a917d481c79cdc7497c", "content_id": "3a8435d8f06484c8e4124ba9b24c203ec083ec1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 351, "license_type": "no_license", "max_line_length": 69, "num_lines": 9, "path": "/High Freq/sort/sort.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "快速排序时间复杂度平均状态下 O(NlogN),空间复杂度 O(1),归并排序最坏情况下时间复杂度 O(NlogN),空间复杂度 O(N)\n\nLeetcode 148. Sort List\nLeetcode 56. Merge Intervals\n\nLeetcode 179. Largest Number\nLeetcode 75. Sort Colors\nLeetcode 215. Kth Largest Element\nLeetcode 4. Median of Two Sorted Arrays\n" }, { "alpha_fraction": 0.46785715222358704, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 20, "blob_id": "5c765f7cd75ae43970c4a3aed5942d520b2d236c", "content_id": "599407471a39ec84cf34522f684bcfb9285560f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 840, "license_type": "no_license", "max_line_length": 63, "num_lines": 40, "path": "/LI/li-endorsements.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "const endorsements = [\n { sikll: \"JS\", user: \"one\" },\n { sikll: \"JS\", user: \"two\" },\n { sikll: \"JS\", user: \"three\" },\n { sikll: \"JS\", user: \"four\" },\n { sikll: \"CSS\", user: \"five\" },\n { sikll: \"CSS\", user: \"six\" },\n];\n\nconst output = [\n { sikll: \"JS\", user: [\"one\", \"2\", \"3\", \"4\"], count: 4 },\n { sikll: \"CSS\", user: [5, 6], count: 2 },\n];\n\nconst formatData = (endorsements) => {\n let res = [];\n\n if (!Array.isArray(endorsements)) {\n return res;\n }\n\n const mappedObj = endorsements.reduce(\n (prev, curr) => (\n {\n ...prev,\n [curr.skill]: [...(prev[curr.skill] || []), curr.user],\n },\n {}\n )\n );\n res = Object.entries(mappedObj)\n .map(([key, value]) => ({\n sikll: key,\n users: value,\n count: value.length,\n }))\n .sort((a, b) => a.count - b.count);\n\n return res;\n};\n" }, { "alpha_fraction": 0.5153152942657471, "alphanum_fraction": 0.5675675868988037, "avg_line_length": 26.75, "blob_id": "2e4e1104027646c21fbd4fa900beb4fe870c873a", "content_id": "554998710f5084b545f4661dc9c9a882f985dec2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 555, "license_type": "no_license", "max_line_length": 70, "num_lines": 20, "path": "/LI/fibonacci-memo.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "const memoizeFib = (index, cache = []) => {\n // console.log('index :', index, ' cache:', cache)\n\n // > Array [undefined, undefined, undefined, 2, 3, 5, 8, 13]\n // > Array [undefined, undefined, undefined, 2, 3, 5, 8, 13, 21]\n // > Array [undefined, undefined, undefined, 2, 3, 5, 8, 13, 21, 34]\n\n if (cache[index]) {\n return cache[index];\n } else {\n if (index < 3) return 1;\n else {\n cache[index] =\n memoizeFib(index - 1, cache) + memoizeFib(index - 2, cache);\n }\n }\n return cache[index];\n};\n\nconsole.log(memoizeFib(5));\n" }, { "alpha_fraction": 0.7721254229545593, "alphanum_fraction": 0.7979093790054321, "avg_line_length": 40, "blob_id": "d52852eda76ba8b1ba31db3710f49249d00e7b90", "content_id": "4aa3abd925179e1929d725766a0efe55d7dbcc9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1485, "license_type": "no_license", "max_line_length": 195, "num_lines": 35, "path": "/LI/FE note.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "JS trickey\nhttps://www.youtube.com/watch?v=tiRhFGnCltw&t=2s\n\nJS - DOM\nhttps://www.youtube.com/watch?v=hDN5IGUv3Yw&list=PL4cUxeGkcC9gfoKa5la9dsdCNpuey2s-V&index=5\nhttps://www.youtube.com/watch?v=tiRhFGnCltw&t=2s\n\nli.parentNode.removeChild(li)\ne.preventDefault()\n\nWhat is the between callback and promise.\n\nEvent bubbling, // what is, how or when to use\nhttps://www.youtube.com/watch?v=SqQZ8SttQsI&list=PL4cUxeGkcC9gfoKa5la9dsdCNpuey2s-V&index=10\n\nWeb accessibility\n\\Could you tell me something can do on the frontend side to improve accessibility;)\n\\说了一下 color,contextual link (这个我说了,但是也是表达不太好),还是就是 tool tip when hover or click on some text.\nhttps://www.youtube.com/watch?v=20SHvU2PKsM\n\nScope & others. Hoisting (all func order)\nhttps://www.youtube.com/watch?v=tiRhFGnCltw&t=2s\n\nscroll to botton & request\n\nhttps://stackoverflow.com/questions/3898130/check-if-a-user-has-scrolled-to-the-bottom\nhttps://stackoverflow.com/questions/37620694/how-to-scroll-to-bottom-in-react\n\nPrototype and class inheritance // difference\nThe most important difference between class- and prototype-based inheritance is that a class defines a type which can be instantiated at runtime, whereas a prototype is itself an object instance.\n\nBasically, events bubble by default so the difference between the two is:\n\ntarget is the element that triggered the event (e.g., the user clicked on)\ncurrentTarget is the element that the event listener is attached to.\n" }, { "alpha_fraction": 0.5935251712799072, "alphanum_fraction": 0.6618704795837402, "avg_line_length": 15.352941513061523, "blob_id": "244aade3e74e04e25659712e253e2a5f953d83ec", "content_id": "2ef8510974332c252d0013d23bc1b2040ad6c089", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 288, "license_type": "no_license", "max_line_length": 35, "num_lines": 17, "path": "/README.md", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "# algorithm-learning-note\n\nAlgorithm learning personally note\n\nAdded some Sys Design leanring note\n\nPower(x, n)\nx^n = (x^(n.2))^2 -> )(logn)\n\nsqrt(x)\nmagic number 0x5f3759df\n\nTrailing Number of zeros in n!\nn/5 + n/25 + n/123\n\nO(1) check power of 2\n(x-1) & x == 0 --> x 是 2 的某次幂\n" }, { "alpha_fraction": 0.6132283210754395, "alphanum_fraction": 0.6481889486312866, "avg_line_length": 24.604839324951172, "blob_id": "e7844ee80cf86fb40372c6fb5c9e877cfcf98624", "content_id": "9145e36cd0f34485a9999406f8d7ce66f531d77d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4331, "license_type": "no_license", "max_line_length": 80, "num_lines": 124, "path": "/GG/21 game.py", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "\"\"\"\n21点又名黑杰克(英文:Blackjack),起源于法国,已流传到世界各地。\n21点,是一种使用扑克牌玩的赌博游戏。亦是唯一一种在赌埸中可以在概率中战胜庄家的一种赌博游戏。\n———来自好搜百科\n我们定义21点的规则如下,和原始规则稍微不一样。\n牌点数如下:\nA 2 3 4 5 6 7 8 9 10 J Q K\nA当成1点\nJQK全部当成10点。\n我们假设赌场准备了很多副牌,也就是可以假设每次摸到每张牌的概率是一样的。\n玩家有两个人,分别为庄家和闲家。\n一开始两人都拿两张牌,两个人都可以看到对方的牌。\n闲家先操作,每次可以叫牌或者停止叫牌。\n如果叫牌,从牌堆中拿一张牌,一旦叫牌后手牌超过21点,直接判输,称为“爆点”,否则一直叫牌直到停止叫牌,轮到庄家。\n轮到庄家后,跟闲家一样的叫牌或停止叫牌,一旦爆点也是直接判输。\n如果没有爆点,谁点数大就谁赢,点数一样判平。\n给你两家的牌,如果闲家胜率>50%输出”YES”,否则输出”NO”\n哦,对了,每个人都是绝顶聪明的。\n\nInput\n第一行一个数Test(Test<=100000)。表示数据组数。\n下面每组数据,一个4字符字符串,前两个字符表示闲家的牌,后两张表示庄家的。\n(用T表示10)\nOutput\n对于每组数据输出”YES”或者”NO”,表示闲家是否有50%以上的胜率。\nSample Input\n1\nTTT9\nSample Output\nYES\n\n用dp[0][i][j]表示闲家当前点数为i,庄家当前点数为j,\n闲家叫牌时闲家获胜的最大几率(闲家要最大化自身赢率),则得到转移方程\ndp[0][i][j]=max{dp[0][i][j],(dp[0][i+1][j]+…+dp[0][i+9][j]+4*dp[0][i+10][j])/13}\n\n用dp[1][i][j]表示闲家当前点数为i,庄家当前点数为j,\n庄家叫牌时闲家获胜的最小几率(庄家要最小化闲家赢率),则得到转移方程\ndp[1][i][j]=min{dp[1][i][j],(dp[1][i][j+1]+…+dp[1][i][j+9]+4*dp[1][i][j+10])/13}\n\n两个分别用记忆化搜索即可\n————————————————\n\\Blackjack is a two player card game whose rules are as follows:\n\nThe player and then the dealer are each given two cards.\nThe player can then \"hit\", or ask for arbitrarily many additional cards, \nso as his or her total does not exceed 21.\n\nThe dealer must then hit if his or her total is 16 or lower, otherwise pass.\nFinally, the two compare totals, \nand the one with the greatest sum not exceeding 21 is the winner.\n\nFor this problem, we simplify the card values to be as follows: \neach card between 2 and 10 counts as their face value, \nface cards count as 10, and aces(A) count as 1.\n\nGiven perfect knowledge of the sequence of cards in the deck, \nimplement a blackjack solver that maximizes the player's score \n(that is, wins minus losses).\n\"\"\"\n\nimport random\n\n\nclass Deck:\n def __init__(self, seed=None):\n self.cards = [i for i in range(1, 10)] * 4 + [10] * 16\n random.seed(seed)\n random.shuffle(self.cards)\n\n def deal(self, start, n):\n return self.cards[start:start + n]\n\n\nclass Player:\n def __init__(self, hand):\n self.hand = hand\n self.total = 0\n\n def deal(self, cards):\n self.hand.extend(cards)\n self.total = sum(self.hand)\n\n\ndef cmp(x, y):\n return (x > y) - (x < y)\n\n\ndef play(deck, start, scores):\n player = Player(deck.deal(start, 2))\n dealer = Player(deck.deal(start + 2, 2))\n results = []\n\n for i in range(49 - start):\n count = start + 4\n player.deal(deck.deal(count, i))\n count += i\n\n if player.total > 21:\n results.append((-1, count))\n break\n\n while dealer.total < 17 and count < 52:\n dealer.deal(deck.deal(count, 1))\n count += 1\n if dealer.total > 21:\n results.append((1, count))\n else:\n results.append((cmp(player.total, dealer.total), count))\n\n options = []\n for score, next_start in results:\n options.append(score +\n scores[next_start] if next_start <= 48 else score)\n scores[start] = max(options)\n\n\ndef blackjack(seed=None):\n deck = Deck(seed)\n scores = [0 for _ in range(52)]\n\n for start in range(48, -1, -1):\n play(deck, start, scores)\n\n return scores[0]\n" }, { "alpha_fraction": 0.37038227915763855, "alphanum_fraction": 0.40603917837142944, "avg_line_length": 26.79464340209961, "blob_id": "21085423c61d1da15205cc18e6110603533a88c6", "content_id": "a7bd1b5f4d618cd7bbbd6feea8ec228e7d839d9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3113, "license_type": "no_license", "max_line_length": 80, "num_lines": 112, "path": "/GG/swift-pars.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "keyboard = {\n x: {},\n y: [\n \" q w e r t y u i o p\",\n \" a s d f g h j k l\",\n \" z x c v b n m\",\n ],\n};\n\nfor (var i in keyboard.y) {\n for (var y of keyboard.y[i]) {\n keyboard.x[y] = +i * 7;\n }\n}\n\np = (C) => ((x = keyboard.x[C]), { x, y: keyboard.y[x / 7].indexOf(C) });\nangle = (L, C, R) => (\n (p0 = p(L)),\n (p1 = p(C)),\n (p2 = p(R)),\n (a = Math.pow(p1.x - p0.x, 2) + Math.pow(p1.y - p0.y, 2)),\n (b = Math.pow(p1.x - p2.x, 2) + Math.pow(p1.y - p2.y, 2)),\n (c = Math.pow(p2.x - p0.x, 2) + Math.pow(p2.y - p0.y, 2)),\n (Math.acos((a + b - c) / Math.sqrt(4 * a * b)) / Math.PI) * 180\n);\ncorner = (L, C, R, N, W) => {\n if (skip) {\n skip = false;\n return [];\n }\n ngl = angle(L, C, R);\n if (ngl < 80) return [C + \"{1,3}\"];\n if (\n ngl < 115 &&\n p(L).x != p(R).x &&\n p(L).x != p(C) &&\n p(R).x != p(C).x &&\n Math.abs(p(L).y - p(R).y) < 5\n )\n return [C + \"{0,3}\"];\n if (ngl < 138) {\n if (N && Math.abs(ngl - angle(C, R, N)) < 6) {\n skip = true;\n return [L + \"{0,3}\", \"([\" + C + \"]{0,3}|[\" + R + \"]{0,3})?\", N + \"{0,3}\"];\n }\n return [C + \"{0,3}\"];\n }\n return [\"([\" + L + \"]{0,3}|[\" + C + \"]{0,3}|[\" + R + \"]{0,3})?\"];\n};\nf = (S) => {\n for (W = [S[0] + \"{1,2}\"], i = 1; i < S.length - 1; i++)\n W.push(...corner(S[i - 1], S[i], S[i + 1], S[i + 2], W));\n return [\n new RegExp(\"^\" + W.join(\"\") + S[S.length - 1] + \"{1,3}$\"),\n new RegExp(\n \"^\" +\n W.filter((C) => !~C.indexOf(\"[\")).join(\"\") +\n S[S.length - 1] +\n \"{1,3}$\"\n ),\n ];\n};\nthirdPass = (F, C) => {\n if (!F[0]) return null;\n F = F.filter((s, i) => !F[i - 1] || F[i - 1] != s);\n FF = F.map((T) =>\n [...T].filter((c, i) => !T[i - 1] || T[i - 1] != c).join(\"\")\n );\n if (FF.length == 1) return F[0];\n if (\n FF.length < 6 &&\n FF[0][2] &&\n FF[1][2] &&\n FF[0][0] == FF[1][0] &&\n FF[0][1] == FF[1][1]\n )\n if (Math.abs(F[0].length - F[1].length) < 1)\n for (i = 0; i < Math.min(F[0].length, FF[1].length); i++) {\n if (C.indexOf(FF[0][i]) < C.indexOf(FF[1][i])) return F[0];\n else if (C.indexOf(FF[0][i]) > C.indexOf(FF[1][i])) return F[1];\n }\n return F[0];\n};\nvar skip = false;\nSwiftKey = (C) => (\n (C = [...C].filter((c, i) => !C[i - 1] || C[i - 1] != c).join(\"\")),\n (skip = false),\n (matched = []),\n (secondPass = []),\n (L = C.length),\n (reg = f(C)),\n words.forEach((W) => W.match(reg[0]) && matched.push(W)),\n words.forEach((W) => W.match(reg[1]) && secondPass.push(W)),\n (matched = matched.sort(\n (a, b) => Math.abs(L - a.length) > Math.abs(L - b.length)\n )),\n (secondPass = secondPass.sort(\n (a, b) => Math.abs(L - a.length) > Math.abs(L - b.length)\n )),\n (first = matched[0]),\n (second = secondPass[0]),\n (third = thirdPass(secondPass.length ? secondPass : matched, C)),\n second && second.length >= first.length - 1\n ? first != third\n ? third\n : second\n : third.length >= first.length\n ? third\n : first\n);\n\n// For use by js shell of latest\n" }, { "alpha_fraction": 0.5314258933067322, "alphanum_fraction": 0.5412757992744446, "avg_line_length": 21.691490173339844, "blob_id": "4d1c0948692d09bfd5b13041189e20b870ac8b9a", "content_id": "e5377db1dbed7c181f3a940723d6dcfb51df3d15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2596, "license_type": "no_license", "max_line_length": 77, "num_lines": 94, "path": "/High Freq/sort/lc148.java", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "import java.util.LinkedList;\n\n// Sort a linked list in O(n log n) time using constant space complexity.\n\n// Example 1:\n\n// Input: 4->2->1->3\n// Output: 1->2->3->4\n// Example 2:\n\n// Input: -1->5->3->4->0\n// Output: -1->0->3->4->5\n\n// 题目限定了时间必须为O(nlgn),符合要求只有快速排序,归并排序,堆排序,而根据单链表的特点,最适于用归并排序\n// 于不能通过坐标来直接访问元素,所以快排什么的可能不太容易实现\n// 堆排序的话,如果让新建结点的话,还是可以考虑的,若只能交换结点,最好还是不要用。\n// 而归并排序(又称混合排序)因其可以利用递归来交换数字,天然适合链表这种结构。\n\n// Definition for singly-linked list.\npublic class ListNode {\n int val;\n ListNode next;\n\n ListNode() {\n }\n\n ListNode(int val) {\n this.val = val;\n }\n\n ListNode(int val, ListNode next) {\n this.val = val;\n this.next = next;\n }\n}\n\n// 归并排序的核心是一个 merge() 函数,其主要是合并两个有序链表,\n// 而归并排序的核心其实是分治法 Divide and Conquer,就是将链表从中间断开,分成两部分,左右两边再分别调用排序的递归函数\n// sortList(),\nclass Solution {\n public ListNode sortList(ListNode head) {\n if (head == null || head.next == null) {\n return head;\n }\n\n ListNode middle = findMiddle(head);\n ListNode secondHalf = middle.next;\n middle.next = null;\n\n ListNode firstHalf = sortList(head);\n secondHalf = sortList(secondHalf);\n\n return merge(firstHalf, secondHalf);\n }\n\n private ListNode findMiddle(ListNode head) {\n ListNode slow = head;\n ListNode fast = head;\n\n while (fast != null && fast.next != null && fast.next.next != null) {\n slow = slow.next;\n fast = fast.next.next;\n }\n return slow;\n }\n\n private ListNode merge(ListNode one, ListNode two) {\n ListNode dummyHead = new ListNode(-1);\n ListNode prev = dummyHead;\n\n while (one != null && two != null) {\n if (one.val <= two.val) {\n prev.next = one;\n prev = one;\n one = one.next;\n } else {\n prev.next = two;\n prev = two;\n two = two.next;\n }\n }\n\n if (one != null) {\n prev.next = one;\n }\n\n if (two != null) {\n prev.next = two;\n }\n\n return dummyHead.next;\n }\n\n}" }, { "alpha_fraction": 0.5861538648605347, "alphanum_fraction": 0.5938461422920227, "avg_line_length": 28.545454025268555, "blob_id": "9a4e3308fba8c79cb605de9b5dcf3c2356749856", "content_id": "10b4951b877e6b84c565d404497a77d71a9861c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 650, "license_type": "no_license", "max_line_length": 71, "num_lines": 22, "path": "/GG/top-k-submission.js", "repo_name": "Youngtrust/algorithm-learning-note", "src_encoding": "UTF-8", "text": "// Input: words = [\"i\",\"love\",\"leetcode\",\"i\",\"love\",\"coding\"], k = 2\n// Output: [\"i\",\"love\"]\n// Explanation: \"i\" and \"love\" are the two most frequent words.\n// Note that \"i\" comes before \"love\" due to a lower alphabetical order.\n\n/**\n * @param {string[]} words\n * @param {number} k\n * @return {string[]}\n */\nvar topKFrequent = function (words, k) {\n let hash = {};\n for (let word of words) {\n hash[word] = hash[word] + 1 || 1;\n }\n let result = Object.keys(hash).sort((a, b) => {\n let countCompare = hash[b] - hash[a];\n if (countCompare == 0) return a.localeCompare(b);\n else return countCompare;\n });\n return result.slice(0, k);\n};\n" } ]
51
sr-henry/maze
https://github.com/sr-henry/maze
149129ac737104277500878ac3346b8e29ec3aa2
a6a29df0d219135fb91ef9e13cec82c8b87dc25d
214ddacc1bcc87d09c3d59faeede36a5569a7929
refs/heads/master
2020-03-31T13:50:43.145459
2019-06-28T12:25:00
2019-06-28T12:25:00
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5166009068489075, "alphanum_fraction": 0.5576967597007751, "avg_line_length": 22.155914306640625, "blob_id": "8324c3fd2bd6ab5ae9b7a419fbe3ed72a206b5e9", "content_id": "c983d10086288eb7400f2e8dae090c0e6c4ae639", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4307, "license_type": "no_license", "max_line_length": 83, "num_lines": 186, "path": "/_Best_first.py", "repo_name": "sr-henry/maze", "src_encoding": "UTF-8", "text": "import importlib\nimport sys\nimport math\nfrom PIL import Image, ImageDraw\n\nblocked = []\n\ndef compute_image(cat, blocks, exits):\n\tim = Image.open(\"tabuleiro.png\").convert(\"RGBA\")\n\tdraw = ImageDraw.Draw(im)\n\n\tfor el in [exits]:\n\t\tshift = el[0] % 2 * 25\n\t\tinit_x = shift + el[1]*50 + el[1]*5\n\t\tend_x = shift + (el[1]+1)*50 + el[1]*5\n\t\tinit_y = el[0]*49\n\t\tend_y = (el[0]+1)*49\n\t\tdraw.ellipse([init_x, init_y, end_x, end_y],\n\t\t\t\t\tfill=\"blue\"\n\t\t\t\t\t)\n\n\tfor el in blocks:\n\t\tshift = el[0] % 2 * 25\n\t\tinit_x = shift + el[1]*50 + el[1]*5\n\t\tend_x = shift + (el[1]+1)*50 + el[1]*5\n\t\tinit_y = el[0]*49\n\t\tend_y = (el[0]+1)*49\n\t\tdraw.line([init_x+10, init_y+10, end_x-10, end_y-10],\n\t\t\t\t\tfill=\"red\", width=6)\n\t\tdraw.line([init_x+10, end_y-10, end_x-10, init_y+10],\n\t\t\t\t\tfill=\"red\", width=6)\n\n\tfor el in [cat]:\n\t\tshift = el[0] % 2 * 25\n\t\tinit_x = shift + el[1]*50 + el[1]*5\n\t\tend_x = shift + (el[1]+1)*50 + el[1]*5\n\t\tinit_y = el[0]*49\n\t\tend_y = (el[0]+1)*49\n\t\tdraw.ellipse([init_x, init_y, end_x, end_y],\n\t\t\t\t\t\tfill=\"black\"\n\t\t\t\t\t\t)\n\tdel draw\n\treturn im\n\ndef is_valid(player):\n if (player[0] < 0 or player[0] > 10 or\n player[1] < 0 or player[1] > 10 or\n player in blocked):\n return False\n return True\n\ndef move(player, direction):\n canditates = []\n if player[0] % 2 == 0:\n candidates = [\n (player[0] - 1, player[1] - 1, \"NW\"),\n (player[0] - 1, player[1], \"NE\"),\n (player[0], player[1] - 1, \"W\" ),\n (player[0], player[1] + 1, \"E\" ),\n (player[0] + 1, player[1] - 1, \"SW\"),\n (player[0] + 1, player[1], \"SE\")\n ]\n else:\n candidates = [\n (player[0] - 1, player[1], \"NW\"),\n (player[0] - 1, player[1] + 1, \"NE\"),\n (player[0], player[1] - 1, \"W\" ),\n (player[0], player[1] + 1, \"E\" ),\n (player[0] + 1, player[1], \"SW\"),\n (player[0] + 1, player[1]+1, \"SE\")\n ]\n for el in candidates:\n cand = (el[0], el[1])\n if direction == el[2] and is_valid(cand):\n return cand\n return False\n\n# Diagonal\ndef heuristic_3(player, exit):\n\tdx = abs(player[0] - exit[0])\n\tdy = abs(player[1] - exit[1])\n\treturn max(dx, dy)\n\n# Manhattan\ndef heuristic_2(player, exit):\n\tdx = abs(player[0] - exit[0])\n\tdy = abs(player[1] - exit[1])\n\treturn dx + dy\n\n# Euclidean\ndef heuristic_1(player, exit):\n\tdx = (player[0] - exit[0])**2\n\tdy = (player[1] - exit[1])**2\n\tresultado = math.sqrt((dx + dy))\n\treturn resultado\n\n# Chebyshev\ndef heuristic(player, exit):\n\tD = 1\n\tD2 = 1\n\tdx = abs(player[0] - exit[0])\n\tdy = abs(player[1] - exit[1])\n\tresultado = D * (dx + dy) + (D2 - 2 * D) * min(dx, dy)\n\treturn resultado\n\ndef neighbourhood(player):\n\tn = []\n\tfor d in ['E','NE','SE','W','NW','SW']:\n\t\t_player = move(player, d)\n\t\tif _player:\n\t\t\tn.append(_player)\n\treturn n\n\nvisited = set()\n\nbf_path = []\n\ndef generate_routes(player, exit):\n\n\tF = {}\n\n\tcurrent = player\n\n\tprint('Current Node: ' + str(current))\n\t\n\tbf_path.append(current)\n\n\tif current == exit:\n\t\treturn True\n\n\tvisited.add(current)\n\n\tneighbours = []\n\n\tfor n in neighbourhood(current):\n\t\tif n in visited:\n\t\t\tcontinue\n\t\tneighbours.append(n)\n\t\tF[n] = heuristic_1(n, exit)\n\t\n\tif neighbours:\n\t\tprint('Checking neighbourhood: ' + str(F))\n\t\tfor el in sorted(F.items(), key=lambda t: t[1]):\n\t\t\tif not generate_routes(el[0], exit):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\treturn True\n\telse:\n\t\tprint('Backtracking...')\n\t\treturn False\n\t\n\treturn False\n\n\ndef create_gif(path, exit):\n\timages = []\n\tfor step in path:\n\t\timages.append(compute_image(step, blocked, exit))\n\timages[0].save('BestFirst_' + filename + '-' + str(saida) + '.gif', save_all=True,\n\t append_images=images[1:], duration=200, loop=0)\n\nfilename = sys.argv[1]\n\nmod = importlib.import_module(filename)\n\njogador = tuple(mod.cat)\nsaidas = set(mod.exits)\nbloqueados = mod.blocks\nminimo = mod.minimum\n\nblocked = bloqueados[:]\n\nrotas = {}\n\nprint('[:] Minimum Cost: ' + str(minimo))\n\nfor saida in saidas:\n\tprint('\\n\\n[@] BestFirst\\t' + str(jogador) + '->' + str(saida))\n\tif generate_routes(jogador, saida):\n\t\tprint('[+] Found Route\\n[!] Route Cost:' + str(len(bf_path)-1))\n\t\tcreate_gif(bf_path, saida)\n\telse:\n\t\tprint('[-] BestFirst failed to find a solution')\n\tbf_path.clear()\n\tvisited.clear()\n\t#rotas[custo] = rota\n" }, { "alpha_fraction": 0.5154072642326355, "alphanum_fraction": 0.5546614527702332, "avg_line_length": 22.92018699645996, "blob_id": "696e3f42bfd8c87ee0fdada0473be770871325ca", "content_id": "3ddcdcc94c1bb0c1857aaefbb2781d5ba1ea36ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5095, "license_type": "no_license", "max_line_length": 127, "num_lines": 213, "path": "/_A_star.py", "repo_name": "sr-henry/maze", "src_encoding": "UTF-8", "text": "import importlib\nimport sys\nimport math\nfrom PIL import Image, ImageDraw\n\nblocked = []\n\ndef compute_image(cat, blocks, exits):\n\tim = Image.open(\"tabuleiro.png\").convert(\"RGBA\")\n\tdraw = ImageDraw.Draw(im)\n\n\tif type(exits) == list:\n\t\tfor el in exits:\n\t\t\tshift = el[0] % 2 * 25\n\t\t\tinit_x = shift + el[1]*50 + el[1]*5\n\t\t\tend_x = shift + (el[1]+1)*50 + el[1]*5\n\t\t\tinit_y = el[0]*49\n\t\t\tend_y = (el[0]+1)*49\n\t\t\tdraw.ellipse([init_x, init_y, end_x, end_y],\n\t\t\t\t\t\tfill=\"blue\"\n\t\t\t\t\t\t)\n\telse:\n\t\tfor el in [exits]:\n\t\t\tshift = el[0] % 2 * 25\n\t\t\tinit_x = shift + el[1]*50 + el[1]*5\n\t\t\tend_x = shift + (el[1]+1)*50 + el[1]*5\n\t\t\tinit_y = el[0]*49\n\t\t\tend_y = (el[0]+1)*49\n\t\t\tdraw.ellipse([init_x, init_y, end_x, end_y],\n fill=\"blue\"\n )\n\n\tfor el in blocks:\n\t\tshift = el[0] % 2 * 25\n\t\tinit_x = shift + el[1]*50 + el[1]*5\n\t\tend_x = shift + (el[1]+1)*50 + el[1]*5\n\t\tinit_y = el[0]*49\n\t\tend_y = (el[0]+1)*49\n\t\tdraw.line([init_x+10, init_y+10, end_x-10, end_y-10],\n\t\t\t\t\tfill=\"red\", width=6)\n\t\tdraw.line([init_x+10, end_y-10, end_x-10, init_y+10],\n\t\t\t\t\tfill=\"red\", width=6)\n\n\tfor el in [cat]:\n\t\tshift = el[0] % 2 * 25\n\t\tinit_x = shift + el[1]*50 + el[1]*5\n\t\tend_x = shift + (el[1]+1)*50 + el[1]*5\n\t\tinit_y = el[0]*49\n\t\tend_y = (el[0]+1)*49\n\t\tdraw.ellipse([init_x, init_y, end_x, end_y],\n\t\t\t\t\t\tfill=\"black\"\n\t\t\t\t\t\t)\n\tdel draw\n\treturn im\n\ndef is_valid(player):\n if (player[0] < 0 or player[0] > 10 or\n player[1] < 0 or player[1] > 10 or\n player in blocked):\n return False\n return True\n\ndef move(player, direction):\n canditates = []\n if player[0] % 2 == 0:\n candidates = [\n (player[0] - 1, player[1] - 1, \"NW\"),\n (player[0] - 1, player[1], \"NE\"),\n (player[0], player[1] - 1, \"W\" ),\n (player[0], player[1] + 1, \"E\" ),\n (player[0] + 1, player[1] - 1, \"SW\"),\n (player[0] + 1, player[1], \"SE\")\n ]\n else:\n candidates = [\n (player[0] - 1, player[1], \"NW\"),\n (player[0] - 1, player[1] + 1, \"NE\"),\n (player[0], player[1] - 1, \"W\" ),\n (player[0], player[1] + 1, \"E\" ),\n (player[0] + 1, player[1], \"SW\"),\n (player[0] + 1, player[1]+1, \"SE\")\n ]\n for el in candidates:\n cand = (el[0], el[1])\n if direction == el[2] and is_valid(cand):\n return cand\n return False\n\n# Diagonal\ndef heuristic_3(player, exit):\n\tdx = abs(player[0] - exit[0])\n\tdy = abs(player[1] - exit[1])\n\treturn max(dx, dy)\n\n# Manhattan\ndef heuristic_2(player, exit):\n\tdx = abs(player[0] - exit[0])\n\tdy = abs(player[1] - exit[1])\n\treturn dx + dy\n\n# Euclidean\ndef heuristic_1(player, exit):\n\tdx = (player[0] - exit[0])**2\n\tdy = (player[1] - exit[1])**2\n\tresultado = math.sqrt((dx + dy))\n\treturn resultado\n\n# Chebyshev\ndef heuristic(player, exit):\n\tD = 1\n\tD2 = 1\n\tdx = abs(player[0] - exit[0])\n\tdy = abs(player[1] - exit[1])\n\tresultado = D * (dx + dy) + (D2 - 2 * D) * min(dx, dy)\n\treturn resultado\n\ndef neighbourhood(player):\n\tn = []\n\tfor d in ['E','NE','SE','W','NW','SW']:\n\t\t_player = move(player, d)\n\t\tif _player:\n\t\t\tn.append(_player)\n\treturn n\n\ndef generate_routes(player, exit):\n\n\tprint('\\n\\n[@] A* Search\\t' + str(player) + '->' + str(exit))\n\n\tG = {} \n\tF = {}\n\n\tG[player] = 0 \n\tF[player] = heuristic(player, exit)\n\n\topen = set([player])\n\tclosed = set()\n\ttrajeto = {}\n\n\twhile len(open) > 0:\n\n\t\tcurrent = None\n\t\tcurrentF = None\n\n\t\tfor pos in open:\n\t\t\tif current is None or F[pos] < currentF:\n\t\t\t\tcurrentF = F[pos]\n\t\t\t\tcurrent = pos\n\n\t\tprint('Selected Node :' + str(current))\n\t\t\n\t\tif current == exit:\n\t\t\tprint('[+] Found Route ')\n\t\t\tcaminho = [current]\n\t\t\twhile current in trajeto:\n\t\t\t\tcurrent = trajeto[current]\n\t\t\t\tcaminho.append(current)\n\t\t\tcaminho.reverse()\n\t\t\treturn caminho, len(caminho)-1\n \n\t\topen.remove(current)\n\t\tclosed.add(current)\n\n\t\tprint('Checking neighbourhood: ', end='')\n\n\t\tfor neighbour in neighbourhood(current):\n\t\t\tif neighbour in closed:\n\t\t\t\tcontinue \n\t\t\t\t\n\t\t\tcandidateG = G[current] + 1\n \n\t\t\tif neighbour not in open:\n\t\t\t\topen.add(neighbour)\n\t\t\telif candidateG >= G[neighbour]:\n\t\t\t\tcontinue \n\n\t\t\ttrajeto[neighbour] = current\n\t\t\tG[neighbour] = candidateG\n\t\t\tF[neighbour] = G[neighbour] + heuristic_1(neighbour, exit)\n\n\t\t\tprint('[' + str(neighbour) + ':' + str(F[neighbour]) + ']', end=', ')\n\n\t\tprint()\n\t\t\n\tprint(\"[-] A* failed to find a solution\")\n\ndef create_gif(path):\n\timages = []\n\tfor step in path:\n\t\timages.append(compute_image(step, blocked, saida))\n\timages[0].save('AStar_' + filename + '-' + str(saida) + '.gif', save_all=True, append_images=images[1:], duration=200, loop=0)\n\nfilename = sys.argv[1]\n\nmod = importlib.import_module(filename)\n\njogador = tuple(mod.cat)\nsaidas = set(mod.exits)\nbloqueados = mod.blocks\nminimo = mod.minimum\n\nblocked = bloqueados[:]\n\nrotas = {}\n\nprint('[:] Minimum Cost: ' + str(minimo))\n\n# compute_image(jogador, blocked, list(saidas)).save(filename + '.png')\n\nfor saida in saidas:\n\trota, custo = generate_routes(jogador, saida)\n\tprint('[!] Route Cost: ' + str(custo))\n\trotas[custo] = rota\n\tcreate_gif(rota)\n" } ]
2
clovekim/example2
https://github.com/clovekim/example2
8b6e24777f82d0d0591d2a336cdb716d9f9d8844
f20c4386c8b11c74912d11a5ee9bf6c734e137de
f87212105040c41f5b6040bd34ed102031c1eab2
refs/heads/master
2020-04-25T02:50:37.159837
2019-02-25T07:52:52
2019-02-25T07:52:52
172,454,728
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47191011905670166, "alphanum_fraction": 0.47191011905670166, "avg_line_length": 20.75, "blob_id": "6eb43494ab39d32b1d0fda1763584a74cd33816d", "content_id": "4a06fac8d681caf511ee3710dd30e182c95ea378", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 26, "num_lines": 4, "path": "/index.py", "repo_name": "clovekim/example2", "src_encoding": "UTF-8", "text": "if __name__ == \"__main__\":\r\n print(\"hello world\")\r\n print(\"h\")\r\n print(\"remote\")" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.529411792755127, "avg_line_length": 8.600000381469727, "blob_id": "20cea134c3608e311235841a22ce924f2c02f489", "content_id": "545661f03d66fa0305788256595433531d77bb72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51, "license_type": "no_license", "max_line_length": 11, "num_lines": 5, "path": "/ClassA.py", "repo_name": "clovekim/example2", "src_encoding": "UTF-8", "text": "class A:\r\n kdkdkgj\r\n pass\r\nclass B:\r\n pass" } ]
2
messente/verigator-python
https://github.com/messente/verigator-python
8366911446ab9b95d9b25f212abec4746d4780ae
a1eb020785dc1474b8a74ff351b6f0c47b649e16
066f8af8063270087bd1e03b44f36b04fe21c90e
refs/heads/master
2018-12-06T10:47:07.294944
2018-10-05T12:29:36
2018-10-05T12:29:36
103,281,307
0
0
Apache-2.0
2017-09-12T14:30:35
2018-09-10T14:00:45
2018-10-05T12:29:37
Python
[ { "alpha_fraction": 0.6643677949905396, "alphanum_fraction": 0.6850574612617493, "avg_line_length": 38.54545593261719, "blob_id": "9073688f07a944bc102b706907b5828a8a0fbeb2", "content_id": "7bd8531245a01f1dc043d35b6e178525325600ed", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 435, "license_type": "permissive", "max_line_length": 53, "num_lines": 11, "path": "/messente/verigator/routes.py", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "URL = \"https://api.verigator.com\"\n\nCREATE_SERVICE = \"v1/service/service\"\nGET_SERVICE = \"v1/service/service/{}\"\nDELETE_SERVICE = \"v1/service/service/{}\"\nGET_USERS = \"v1/service/service/{}/users\"\nGET_USER = \"v1/service/service/{}/users/{}\"\nCREATE_USER = \"v1/service/service/{}/users\"\nDELETE_USER = \"v1/service/service/{}/users/{}\"\nAUTH_INITIATE = \"v1/service/service/{}/users/{}/auth\"\nAUTH_VERIFY = \"v1/service/service/{}/users/{}/auth\"\n" }, { "alpha_fraction": 0.4897959232330322, "alphanum_fraction": 0.6938775777816772, "avg_line_length": 15.666666984558105, "blob_id": "22a0e0dcc3700962d7b0746746bec20e64001fff", "content_id": "51d7397a1bc1d5e9208332f727ce3afac69178ec", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 49, "license_type": "permissive", "max_line_length": 20, "num_lines": 3, "path": "/requirements.txt", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "requests==2.18.4\nrequests-mock==1.3.0\nmock==2.0.0" }, { "alpha_fraction": 0.6270374059677124, "alphanum_fraction": 0.6458932757377625, "avg_line_length": 41.863014221191406, "blob_id": "374f544605c4bfeab456cd3d1c23ba6a9396e1f9", "content_id": "a1a6e57380a5773e325472000e5f881f66e0b522", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3129, "license_type": "permissive", "max_line_length": 108, "num_lines": 73, "path": "/messente/verigator/test/test_users.py", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom mock import MagicMock\n\nfrom messente.verigator import routes, client, controllers, exceptions\n\n\nclass TestUsers(TestCase):\n def setUp(self):\n self.rest_client = client.RestClient(\"http://test\", \"test\", \"test\")\n self.users = controllers.Users(self.rest_client)\n self.sample_response = {\n \"id_in_service\": \"test2\",\n \"ctime\": \"2017-09-15T08:13:26.965341\",\n \"id\": \"38fb335c-025d-45eb-9cf2-d2d4d9f54203\"\n }\n\n def tearDown(self):\n pass\n\n def test_create(self):\n self.rest_client.post = MagicMock(return_value=self.sample_response)\n res = self.users.create(\"service_id\", \"0123\", \"username\")\n\n self.rest_client.post.assert_called_with(routes.CREATE_USER.format(\"service_id\"),\n json={\"id_in_service\": \"username\", \"phone_number\": \"0123\"})\n self.assertEqual(res.id, self.sample_response['id'])\n self.assertEqual(res.creation_time, self.sample_response['ctime'])\n self.assertEqual(res.username, self.sample_response['id_in_service'])\n\n def test_get(self):\n self.rest_client.get = MagicMock(return_value=self.sample_response)\n res = self.users.get(\"sid\", \"uid\")\n\n self.rest_client.get.assert_called_with(routes.GET_USER.format(\"sid\", \"uid\"))\n self.assertEqual(res.id, self.sample_response['id'])\n self.assertEqual(res.creation_time, self.sample_response['ctime'])\n self.assertEqual(res.username, self.sample_response['id_in_service'])\n\n def test_get_all(self):\n self.rest_client.get = MagicMock(return_value={\"users\": [self.sample_response]})\n res = self.users.get_all(\"sid\")\n\n self.rest_client.get.assert_called_with(routes.GET_USERS.format(\"sid\"))\n self.assertEqual(len(res), 1)\n self.assertEqual(res[0].id, self.sample_response['id'])\n self.assertEqual(res[0].creation_time, self.sample_response['ctime'])\n self.assertEqual(res[0].username, self.sample_response['id_in_service'])\n\n def test_delete(self):\n self.rest_client.delete = MagicMock(return_value=self.sample_response)\n res = self.users.delete(\"sid\", \"uid\")\n\n self.rest_client.delete.assert_called_with(routes.DELETE_USER.format(\"sid\", \"uid\"))\n self.assertTrue(res)\n\n def test_create_failed(self):\n self.rest_client.post = MagicMock(side_effect=exceptions.ResourceAlreadyExistsError(409, \"message\"))\n\n try:\n self.users.create(\"\", \"\", \"\")\n except exceptions.ResourceAlreadyExistsError as e:\n self.assertEqual(e.code, 409)\n self.assertEqual(e.message, \"message\")\n else:\n self.fail(\"Exception not raised\")\n\n def test_invalid_input(self):\n self.assertRaises(ValueError, controllers.Users, None)\n self.assertRaises(ValueError, self.users.create, None, None, None)\n self.assertRaises(ValueError, self.users.get, None, None)\n self.assertRaises(ValueError, self.users.get_all, None)\n self.assertRaises(ValueError, self.users.delete, None, None)\n" }, { "alpha_fraction": 0.6438356041908264, "alphanum_fraction": 0.6472602486610413, "avg_line_length": 20.120481491088867, "blob_id": "dbea9021f1a84a18ee884cc3db221f2fcaf761fc", "content_id": "ccc2da7e6f5780a76afe6c98b6c00834ca3d2504", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1752, "license_type": "permissive", "max_line_length": 91, "num_lines": 83, "path": "/README.rst", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "Verigator Python Library\n========================\n\n|Build Status|\n\nEasy to use Python (2.7 and 3) wrapper for verigator rest api.\n\nInstalling\n~~~~~~~~~~\n\nThe library can be installed from pip:\n\n::\n\n pip install verigator\n\nOr you can build it manually:\n\n::\n\n git clone https://github.com/messente/verigator-python.git\n\n cd verigator-python\n\n python setup.py install\n\n::\n\nDocumentation\n~~~~~~~~~~~~~\n\ndetailed docs can be found `here`_\n\nExamples\n~~~~~~~~\n\n.. code:: python\n\n from messente.verigator.api import Api\n\n # initialize api\n api = Api(\"username\", \"password\")\n\n # create example service\n service = api.services.create(\"http://example.com\", \"service_name\")\n\n # add user to the created service\n user = api.users.create(service.id, \"+xxxxxxxxxxx\", \"username\")\n\n # initiate sms authentication, you can use api.auth.METHOD_TOTP for time\n api.auth.initiate(service.id, user.id, api.auth.METHOD_SMS)\n\n # check user input until successfull pin verification\n while True:\n try:\n input = raw_input # Python 2 compatibility\n except NameError:\n pass\n\n # read user input\n token = input(\"Enter Sms Pin: \")\n\n # verify pin\n verified = api.auth.verify(service.id, user.id, token)\n\n if verified:\n break\n\n print(\"Not Verified...\")\n\n print(\"Verified Successfully!\")\n\nLicense\n~~~~~~~\n\nThis project is licensed under the Apache License 2.0 - see the\n`LICENSE.txt`_ file for details\n\n.. _here: https://messente.github.io/verigator-python/modules.html\n.. _LICENSE.txt: LICENSE.txt\n\n.. |Build Status| image:: https://travis-ci.org/messente/verigator-python.svg?branch=master\n :target: https://travis-ci.org/messente/verigator-python" }, { "alpha_fraction": 0.5885471701622009, "alphanum_fraction": 0.5991516709327698, "avg_line_length": 32.92086410522461, "blob_id": "b6f60ba500b8e7d2c04196ba0104e9b9a480942b", "content_id": "c0f8fac574929d06c62ebdea010fa869f6b40b63", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4715, "license_type": "permissive", "max_line_length": 101, "num_lines": 139, "path": "/messente/verigator/client.py", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "import requests\n\nfrom messente.verigator import exceptions\n\n\nclass RestClient(object):\n \"\"\"Simple http client that handles authentication and content-type\n by default for post and put calls. Default headers are\n content-type: application/json and accept: application/json, however they can be override\n\n Note: If server returns any other status code except 2xx, client will raise appropriate exception\n\n\n Attributes:\n endpoint (str): server url, any other paths will be appended to it\n auth_header (dict): default headers for each request (contains only auth header)\n \"\"\"\n\n def __init__(self, endpoint, username, password):\n \"\"\"\n\n Args:\n endpoint (str): server url, any other paths will be appended to it\n username (str): used for authentication\n password (str): used for authentication\n \"\"\"\n self.endpoint = endpoint\n self.auth_header = {\n \"X-Service-Auth\": \":\".join([username, password])\n }\n self.content_type_headers = {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\"\n }\n\n def get(self, path, params=None, headers=None):\n \"\"\"\n Wrapper around requests get method\n Args:\n path (str): request path\n params (dict): url parameters\n headers (dict): additional headers\n\n Returns:\n dict: response body\n \"\"\"\n new_headers = self.__merge_dicts(self.auth_header, headers)\n return self._request(\"GET\", self.__url(path), params=params, headers=new_headers)\n\n def post(self, path, headers=None, json=None):\n \"\"\"\n Wrapper around requests post method\n Args:\n path (str): request path\n headers (dict): additional headers\n json (dict): request payload\n\n Returns:\n dict: response body\n \"\"\"\n new_headers = self.__merge_dicts(self.auth_header, self.content_type_headers)\n new_headers = self.__merge_dicts(new_headers, headers)\n return self._request(\"POST\", self.__url(path), headers=new_headers, json=json)\n\n def put(self, path, headers=None, json=None):\n \"\"\"\n Wrapper around requests put method\n Args:\n path (str): request path\n headers (dict): additional headers\n json (dict): request payload\n\n Returns:\n dict: response body\n \"\"\"\n new_headers = self.__merge_dicts(self.auth_header, self.content_type_headers)\n new_headers = self.__merge_dicts(new_headers, headers)\n return self._request(\"PUT\", self.__url(path), headers=new_headers, json=json)\n\n def delete(self, path, headers=None):\n \"\"\"\n Wrapper around requests delete method\n Args:\n path (str): request path\n headers (dict): additional headers\n\n Returns:\n dict: response body\n \"\"\"\n new_headers = self.__merge_dicts(self.auth_header, headers)\n return self._request(\"DELETE\", self.__url(path), headers=new_headers)\n\n def __url(self, path):\n return \"/\".join([self.endpoint.strip(\"/\"), path])\n\n @staticmethod\n def __merge_dicts(first, second):\n try:\n new_headers = first.copy()\n except AttributeError:\n new_headers = {}\n\n try:\n new_headers.update(second)\n except TypeError:\n pass\n\n return new_headers\n\n @staticmethod\n def _request(method, path, params=None, headers=None, json=None):\n resp = requests.request(method, path, params=params, headers=headers, json=json)\n\n status_code = resp.status_code\n try:\n resp_json = resp.json()\n except ValueError:\n raise exceptions.InvalidResponseError(0, resp.text)\n\n message = resp_json.get('message', None)\n\n if status_code == 400:\n raise exceptions.InvalidDataError(400, message)\n elif status_code == 401:\n raise exceptions.WrongCredentialsError(401, message)\n elif status_code == 403:\n raise exceptions.ResourceForbiddenError(403, message)\n elif status_code == 404:\n raise exceptions.NoSuchResourceError(404, message)\n elif status_code == 409:\n raise exceptions.ResourceAlreadyExistsError(409, message)\n elif status_code == 422:\n raise exceptions.InvalidDataError(422, message)\n elif status_code == 500:\n raise exceptions.InternalError(500, resp_json)\n elif 300 <= status_code <= 600:\n raise exceptions.VerigatorError(status_code, resp_json)\n\n return resp_json\n" }, { "alpha_fraction": 0.6730158925056458, "alphanum_fraction": 0.6730158925056458, "avg_line_length": 23.230770111083984, "blob_id": "8831af9ee55f0e722db96ced64f5176229b5abf4", "content_id": "c5d2e321661bca14b38901daead5ecbb5b0e66ae", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 315, "license_type": "permissive", "max_line_length": 42, "num_lines": 13, "path": "/messente/verigator/test/test_api.py", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom messente.verigator import api\n\n\nclass ApiTest(TestCase):\n def setUp(self):\n self.api = api.Api(\"\", \"\")\n\n def test_contains_required_libs(self):\n self.assertTrue(self.api.auth)\n self.assertTrue(self.api.services)\n self.assertTrue(self.api.users)\n" }, { "alpha_fraction": 0.6849817037582397, "alphanum_fraction": 0.6862027049064636, "avg_line_length": 23.81818199157715, "blob_id": "0e6f289f772367523a526252da09d5ae849da65f", "content_id": "f6837ba98a6f5383a3ed4b33a52ec3eec5fae664", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 819, "license_type": "permissive", "max_line_length": 72, "num_lines": 33, "path": "/examples/example.py", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "from messente.verigator.api import Api\n\n# initialize api\napi = Api(\"username\", \"password\")\n\n# create example service\nservice = api.services.create(\"http://example.com\", \"service_name\")\n\n# add user to the created service\nuser = api.users.create(service.id, \"+xxxxxxxxxxx\", \"username\")\n\n# initiate sms authentication, you can use api.auth.METHOD_TOTP for time\napi.auth.initiate(service.id, user.id, api.auth.METHOD_SMS)\n\n# check user input until successfull pin verification\nwhile True:\n try:\n input = raw_input # Python 2 compatibility\n except NameError:\n pass\n\n # read user input\n token = input(\"Enter Sms Pin: \")\n \n # verify pin\n verified = api.auth.verify(service.id, user.id, token)\n\n if verified:\n break\n\n print(\"Not Verified...\")\n\nprint(\"Verified Successfully!\")\n" }, { "alpha_fraction": 0.6458072662353516, "alphanum_fraction": 0.6566541790962219, "avg_line_length": 38.295082092285156, "blob_id": "78bccb0324b55e6f4b185f013b8fbc0099cb77c5", "content_id": "c7a0331dd3f2c94118edfa6877e490e89893529b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2397, "license_type": "permissive", "max_line_length": 112, "num_lines": 61, "path": "/messente/verigator/test/test_services.py", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom mock import MagicMock\n\nfrom messente.verigator import routes, client, controllers, exceptions\n\n\nclass TestServices(TestCase):\n def setUp(self):\n self.rest_client = client.RestClient(\"https://test\", \"test\", \"test\")\n self.services = controllers.Services(self.rest_client)\n self.sample_response = {\n \"id\": \"id\",\n \"ctime\": \"2017-09-15T06:44:15.274438\",\n \"name\": \"name\"\n }\n\n def tearDown(self):\n pass\n\n def test_create(self):\n self.rest_client.post = MagicMock(return_value=self.sample_response)\n res = self.services.create(\"domain\", \"name\")\n\n self.rest_client.post.assert_called_with(routes.CREATE_SERVICE, json={\"fqdn\": \"domain\", \"name\": \"name\"})\n self.assertEqual(res.id, self.sample_response['id'])\n self.assertEqual(res.creation_time, self.sample_response['ctime'])\n self.assertEqual(res.name, self.sample_response['name'])\n\n def test_get(self):\n self.rest_client.get = MagicMock(return_value=self.sample_response)\n res = self.services.get(\"id\")\n\n self.rest_client.get.assert_called_with(routes.GET_SERVICE.format(\"id\"))\n self.assertEqual(res.id, self.sample_response['id'])\n self.assertEqual(res.creation_time, self.sample_response['ctime'])\n self.assertEqual(res.name, self.sample_response['name'])\n\n def test_delete(self):\n self.rest_client.delete = MagicMock(return_value=self.sample_response)\n res = self.services.delete(\"id\")\n\n self.rest_client.delete.assert_called_with(routes.DELETE_SERVICE.format(\"id\"))\n self.assertTrue(res)\n\n def test_create_failed(self):\n self.rest_client.post = MagicMock(side_effect=exceptions.ResourceAlreadyExistsError(409, \"message\"))\n\n try:\n self.services.create(\"\", \"\")\n except exceptions.ResourceAlreadyExistsError as e:\n self.assertEqual(e.code, 409)\n self.assertEqual(e.message, \"message\")\n else:\n self.fail(\"Exception not raised\")\n\n def test_invalid_input(self):\n self.assertRaises(ValueError, controllers.Services, None)\n self.assertRaises(ValueError, self.services.create, None, None, None)\n self.assertRaises(ValueError, self.services.get, None, None)\n self.assertRaises(ValueError, self.services.delete, None, None)\n" }, { "alpha_fraction": 0.5641207695007324, "alphanum_fraction": 0.5641207695007324, "avg_line_length": 37.5616455078125, "blob_id": "114976e77e88d387fc69962b10743cb249a8c097", "content_id": "c5c4b843c7fd13a2f6e538077785664ffa7e0575", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2815, "license_type": "permissive", "max_line_length": 91, "num_lines": 73, "path": "/messente/verigator/test/test_auth.py", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom mock import MagicMock\n\nfrom messente.verigator import client, controllers, routes\n\n\nclass TestAuth(TestCase):\n def setUp(self):\n self.rest_client = client.RestClient(\"http://test\", \"test\", \"test\")\n self.auth = controllers.Auth(self.rest_client)\n self.sms_init_response = {\n \"method\": \"sms\"\n }\n self.totp_init_response = {\n \"method\": \"totp\"\n }\n\n self.verified_response = {\n \"verified\": True\n }\n self.failed_response = {\n \"verified\": False,\n \"status\": {\n \"throttled\": False,\n \"expired\": False,\n \"invalid\": True,\n \"result\": \"INVALID\"\n },\n }\n\n def test_initiate_sms(self):\n self.rest_client.post = MagicMock(return_value=self.sms_init_response)\n self.auth.initiate(\"sid\", \"uid\", self.auth.METHOD_SMS)\n\n self.rest_client.post.assert_called_with(routes.AUTH_INITIATE.format(\"sid\", \"uid\"),\n json={\"method\": \"sms\"})\n\n def test_initiate_totp(self):\n self.rest_client.post = MagicMock(return_value=self.totp_init_response)\n self.auth.initiate(\"sid\", \"uid\", self.auth.METHOD_TOTP)\n\n self.rest_client.post.assert_called_with(routes.AUTH_INITIATE.format(\"sid\", \"uid\"),\n json={\"method\": \"totp\"})\n\n def test_verify_sms(self):\n self.rest_client.put = MagicMock(return_value=self.verified_response)\n verified = self.auth.verify(\"sid\", \"uid\", \"token\")\n\n self.rest_client.put.assert_called_with(routes.AUTH_VERIFY.format(\"sid\", \"uid\"),\n json={\"token\": \"token\"})\n self.assertTrue(verified)\n\n def test_verify_totp(self):\n self.rest_client.put = MagicMock(return_value=self.verified_response)\n verified = self.auth.verify(\"sid\", \"uid\", \"token\")\n\n self.rest_client.put.assert_called_with(routes.AUTH_VERIFY.format(\"sid\", \"uid\"),\n json={\"token\": \"token\"})\n self.assertTrue(verified)\n\n def test_verify_failed(self):\n self.rest_client.put = MagicMock(return_value=self.failed_response)\n verified = self.auth.verify(\"sid\", \"uid\", \"token\")\n\n self.rest_client.put.assert_called_with(routes.AUTH_VERIFY.format(\"sid\", \"uid\"),\n json={\"token\": \"token\"})\n self.assertFalse(verified)\n\n def test_invalid_input(self):\n self.assertRaises(ValueError, controllers.Auth, None)\n self.assertRaises(ValueError, self.auth.initiate, None, None, None)\n self.assertRaises(ValueError, self.auth.verify, None, None, None, None)\n" }, { "alpha_fraction": 0.5734703540802002, "alphanum_fraction": 0.5744339227676392, "avg_line_length": 22.49811363220215, "blob_id": "caf7217d0b5b340b2ed6dea4f9f754f135eae1fa", "content_id": "92fd02d11d45bdc10f199042650a87081a0c820f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6227, "license_type": "permissive", "max_line_length": 110, "num_lines": 265, "path": "/messente/verigator/controllers.py", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "\"\"\"\nControllers module of the api\n\"\"\"\n\nimport sys\nfrom functools import wraps\n\nfrom messente.verigator import routes, models, client\n\nPY2 = sys.version_info.major == 2\n\n\ndef _validate_input(func):\n # decorator for validating that passed arguments are all string\n @wraps(func)\n def wrapper(*args):\n for arg in args[1:]:\n if not isinstance(arg, str if not PY2 else basestring):\n raise ValueError(\"{} should be string\".format(arg))\n return func(*args)\n\n return wrapper\n\n\ndef _validate_client(func):\n # decorator for validating that passed client is RestClient\n @wraps(func)\n def wrapper(self, rest_client):\n if not isinstance(rest_client, client.RestClient):\n raise ValueError(\"client should be RestClient\")\n return func(self, rest_client)\n\n return wrapper\n\n\nclass Services(object):\n \"\"\"\n Controller for service resource\n \"\"\"\n\n @_validate_client\n def __init__(self, rest_client):\n \"\"\"\n\n Args:\n rest_client (client.RestClient):\n \"\"\"\n self.rest_client = rest_client\n\n @_validate_input\n def create(self, domain, name):\n \"\"\"Creates new service.\n\n Args:\n domain (str): The domain name.\n\n name (str): The name of the service.\n\n Returns:\n models.Service: created service\n\n \"\"\"\n json = {\n 'fqdn': domain,\n 'name': name\n }\n\n response = self.rest_client.post(routes.CREATE_SERVICE, json=json)\n return self.__service_from_json(response)\n\n @_validate_input\n def get(self, id):\n \"\"\"Fetches service with a given id from the server.\n\n Args:\n id (str): The id of the service\n\n Returns:\n models.Service: Fetched service\n\n \"\"\"\n response = self.rest_client.get(routes.GET_SERVICE.format(id))\n\n return self.__service_from_json(response)\n\n @_validate_input\n def delete(self, id):\n \"\"\"Deletes service with id\n\n Args:\n id (str): service id\n\n Returns:\n bool:\n\n \"\"\"\n self.rest_client.delete(routes.DELETE_SERVICE.format(id))\n return True\n\n @staticmethod\n def __service_from_json(json):\n return models.Service(json['id'], json['ctime'], json['name'])\n\n\n# noinspection PyShadowingBuiltins\nclass Users(object):\n \"\"\"Controller for service resource\n \"\"\"\n\n @_validate_client\n def __init__(self, rest_client):\n \"\"\"\n\n Args:\n rest_client (client.RestClient):\n \"\"\"\n self.rest_client = rest_client\n\n @_validate_input\n def get_all(self, service_id):\n \"\"\"Fetches all users for the given service\n\n Args:\n service_id (str): service id to search users for\n\n Returns:\n list[models.User]: list of users\n\n \"\"\"\n response = self.rest_client.get(routes.GET_USERS.format(service_id))\n\n return [self.__user_from_json(user) for user in response['users']]\n\n @_validate_input\n def get(self, service_id, id):\n \"\"\"Fetches single user with given id for the given service\n\n Args:\n service_id (str): service id\n\n id (str): user id\n\n Returns:\n models.User: fetched user\n\n \"\"\"\n response = self.rest_client.get(routes.GET_USER.format(service_id, id))\n return self.__user_from_json(response)\n\n @_validate_input\n def create(self, service_id, number, username):\n \"\"\"Creates new user for the given service\n\n Args:\n service_id (str): service id\n\n number (str): users phone number, used for 2fa\n\n username (str): username\n\n Returns:\n models.User: created user\n\n \"\"\"\n\n route = routes.CREATE_USER.format(service_id)\n json = {\n \"phone_number\": number,\n \"id_in_service\": username\n }\n\n response = self.rest_client.post(route, json=json)\n return self.__user_from_json(response)\n\n @_validate_input\n def delete(self, service_id, id):\n \"\"\"Deleted user with given id for the given service\n\n Args:\n service_id (str): service id\n\n id (str): user id\n\n Returns:\n bool: True on success raises exception on error\n\n \"\"\"\n self.rest_client.delete(routes.DELETE_USER.format(service_id, id))\n\n return True\n\n @staticmethod\n def __user_from_json(json):\n return models.User(json['id'], json['ctime'], json['id_in_service'])\n\n\nclass Auth(object):\n \"\"\"Controller for service resource\n\n \"\"\"\n\n METHOD_SMS = \"sms\"\n METHOD_TOTP = \"totp\"\n\n @_validate_client\n def __init__(self, rest_client):\n \"\"\"\n\n Args:\n rest_client (client.RestClient):\n \"\"\"\n self.rest_client = rest_client\n\n @_validate_input\n def initiate(self, service_id, user_id, method):\n \"\"\"Initiates authentication process\n sends sms in case of sms authentication\n\n Args:\n service_id (str): service id\n\n user_id (str): user id\n\n method (str): auth method (sms or totp) use Auth.METHOD_SMS or Auth.METHOD_TOTP\n\n Note:\n System will automatically fall back from TOTP to SMS if user has no devices attached to the number\n\n Returns:\n str: string indicating 2FA method used (sms, totp)\n\n \"\"\"\n route = routes.AUTH_INITIATE.format(service_id, user_id)\n json = {\"method\": method}\n\n response = self.rest_client.post(route, json=json)\n\n method = response['method']\n\n return method\n\n @_validate_input\n def verify(self, service_id, user_id, token):\n \"\"\"Verifies user input validity\n\n Args:\n service_id (str): service id\n\n user_id (str): user id\n\n token (str): user provided token\n\n Returns:\n bool: boolean indicating verification status\n\n \"\"\"\n\n route = routes.AUTH_VERIFY.format(service_id, user_id)\n json = {\"token\": token}\n\n response = self.rest_client.put(route, json=json)\n\n verified = response['verified']\n\n return verified\n" }, { "alpha_fraction": 0.5544871687889099, "alphanum_fraction": 0.5544871687889099, "avg_line_length": 21.285715103149414, "blob_id": "aed30e49239f33a523f12df65d4b9eb118c3f2a2", "content_id": "4252b37dd5f9c632c9f787df4f8c97edb76ed142", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 624, "license_type": "permissive", "max_line_length": 52, "num_lines": 28, "path": "/messente/verigator/models.py", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "class Service(object):\n \"\"\"\n Attributes:\n id (str): unique id\n\n creation_time (str): creation time\n\n name (str): name of the service\n \"\"\"\n def __init__(self, id, creation_time, name):\n self.id = id\n self.creation_time = creation_time\n self.name = name\n\n\nclass User(object):\n \"\"\"\n Attributes:\n id (str): unique id\n\n creation_time (str): creation time\n\n username (str): name of the user\n \"\"\"\n def __init__(self, id, creation_time, username):\n self.id = id\n self.creation_time = creation_time\n self.username = username\n" }, { "alpha_fraction": 0.6640079617500305, "alphanum_fraction": 0.6640079617500305, "avg_line_length": 27.657142639160156, "blob_id": "13c5d2608f3a12b0aa3fa59f56b138a99db62f03", "content_id": "29366ae8906f55dad1fe695cf79b1bb2a3f24bd1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1003, "license_type": "permissive", "max_line_length": 72, "num_lines": 35, "path": "/messente/verigator/api.py", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "\"\"\"\nApi description\n\"\"\"\n\nfrom messente.verigator import routes, client, controllers\n\n\nclass Api(object):\n \"\"\"Main class for verigator api,\n contains references to other controllers\n\n Attributes:\n services (controllers.Services): controller for service resource\n\n users (controllers.Users): controller for user resource\n\n auth (controllers.Auth): controller for auth resource\n\n \"\"\"\n\n def __init__(self, username, password, endpoint=routes.URL):\n \"\"\"\n Initialize Verigator api\n\n Args:\n username (str): api username. Can be obtained from dashboard\n\n password (str): api password. Can be obtained from dashboard\n\n endpoint (str): api endpoint. Can be obtained from dashboard\n \"\"\"\n rest_client = client.RestClient(endpoint, username, password)\n self.users = controllers.Users(rest_client)\n self.services = controllers.Services(rest_client)\n self.auth = controllers.Auth(rest_client)\n" }, { "alpha_fraction": 0.6181507110595703, "alphanum_fraction": 0.6523972749710083, "avg_line_length": 29.736841201782227, "blob_id": "1b3359cd9b69647f0efc1d52515b1f8726c13a96", "content_id": "11aee998c6bbd8d395ed9ebf68ad1c9d97495b5b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 584, "license_type": "permissive", "max_line_length": 64, "num_lines": 19, "path": "/setup.py", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup\n\nsetup(\n name=\"verigator\",\n version=\"1.0.4\",\n packages=[\"messente.verigator\"],\n setup_requires=[\"requests==2.18.4\"],\n install_requires=[\"requests==2.18.4\"],\n tests_require=[\"requests-mock==1.3.0\", \"mock==2.0.0\"],\n author=\"Verigator.com\",\n author_email=\"[email protected]\",\n description=\"Official Verigator.com API library\",\n license=\"Apache License, Version 2\",\n keywords=\"verigator messente sms verification 2FA pin code\",\n url=\"http://messente.com/documentation/\",\n test_suite=\"messente.verigator.test\"\n)\n" }, { "alpha_fraction": 0.6715627312660217, "alphanum_fraction": 0.6778232455253601, "avg_line_length": 44.63736343383789, "blob_id": "c5bb83533851172947b539be6804680d97b6c311", "content_id": "9f05a673bb8b144aafcc78711661a353163beccc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4153, "license_type": "permissive", "max_line_length": 115, "num_lines": 91, "path": "/messente/verigator/test/test_restClient.py", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nimport requests_mock\nfrom messente.verigator import exceptions, client\n\n\n@requests_mock.mock()\nclass TestRestClient(TestCase):\n def setUp(self):\n self.rest_client = client.RestClient(\"http://test\", \"test\", \"test\")\n\n self.valid_headers = {\n \"X-Service-Auth\": \"test:test\"\n }\n\n self.valid_get_headers = self.valid_headers\n self.valid_post_headers = {\n \"X-Service-Auth\": \"test:test\",\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\"\n }\n\n self.sample_response = {\n \"foo\": \"bar\"\n }\n\n def test_auth_header(self, request_mocker):\n request_mocker.get(\"http://test/\", request_headers=self.valid_headers, json={})\n\n self.rest_client.get(\"\")\n self.assertTrue(request_mocker.called)\n\n def test_get(self, request_mocker):\n request_mocker.get(\"http://test/test?foo=bar\", complete_qs=True, request_headers={\"foo\": \"bar\"},\n json=self.sample_response)\n json = self.rest_client.get(\"test\", params={\"foo\": \"bar\"}, headers={\"foo\": \"bar\"})\n self.assertTrue(request_mocker.called)\n self.assertEqual(json, self.sample_response)\n\n def test_post(self, request_mocker):\n request_mocker.post(\"http://test/test\", json=self.sample_response, request_headers=self.valid_post_headers)\n\n json = self.rest_client.post(\"test\", json=self.sample_response)\n\n self.assertTrue(request_mocker.called)\n self.assertEqual(json, self.sample_response)\n self.assertEqual(request_mocker.request_history[0].json(), self.sample_response)\n\n def test_put(self, request_mocker):\n request_mocker.put(\"http://test/test\", json=self.sample_response, request_headers=self.valid_post_headers)\n\n json = self.rest_client.put(\"test\", json=self.sample_response)\n\n self.assertTrue(request_mocker.called)\n self.assertEqual(json, self.sample_response)\n self.assertEqual(request_mocker.request_history[0].json(), self.sample_response)\n\n def test_delete(self, request_mocker):\n request_mocker.delete(\"http://test/test\", json=self.sample_response, request_headers=self.valid_headers)\n\n json = self.rest_client.delete(\"test\")\n self.assertTrue(request_mocker.called)\n self.assertEqual(json, self.sample_response)\n\n def test_raises_errors(self, request_mocker):\n self._assertAllRoutesRaises(exceptions.InternalError, request_mocker, 500)\n self._assertAllRoutesRaises(exceptions.InvalidDataError, request_mocker, 400)\n self._assertAllRoutesRaises(exceptions.WrongCredentialsError, request_mocker, 401)\n self._assertAllRoutesRaises(exceptions.ResourceForbiddenError, request_mocker, 403)\n self._assertAllRoutesRaises(exceptions.NoSuchResourceError, request_mocker, 404)\n self._assertAllRoutesRaises(exceptions.ResourceAlreadyExistsError, request_mocker, 409)\n self._assertAllRoutesRaises(exceptions.VerigatorError, request_mocker, 447)\n\n def test_non_json_response(self, request_mocker):\n request_mocker.register_uri('GET', \"http://test/test\", text=\"Some non json response\", status_code=200)\n self.assertRaises(exceptions.InvalidResponseError, self.rest_client.get, \"test\")\n\n def _assertAllRoutesRaises(self, exception, request_mocker, code):\n self._register_addresses(request_mocker, code)\n\n self.assertRaises(exception, self.rest_client.get, \"test\")\n self.assertRaises(exception, self.rest_client.post, \"test\")\n self.assertRaises(exception, self.rest_client.put, \"test\")\n self.assertRaises(exception, self.rest_client.delete, \"test\")\n\n @staticmethod\n def _register_addresses(request_mocker, code):\n request_mocker.register_uri('GET', 'http://test/test', json={}, status_code=code)\n request_mocker.register_uri('POST', 'http://test/test', json={}, status_code=code)\n request_mocker.register_uri('PUT', 'http://test/test', json={}, status_code=code)\n request_mocker.register_uri('DELETE', 'http://test/test', json={}, status_code=code)\n" }, { "alpha_fraction": 0.6908602118492126, "alphanum_fraction": 0.6908602118492126, "avg_line_length": 15.17391300201416, "blob_id": "1ef5c4d079121c9245dd9b944a03cf9a9b594387", "content_id": "4e5d717bd6c1ba95f42f3fa9ba58d003360fcf41", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 744, "license_type": "permissive", "max_line_length": 46, "num_lines": 46, "path": "/docs/modules.rst", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "API REFERENCE\n========================\n\n.. automodule:: messente.verigator.api\n\n.. autoclass:: Api\n :members:\n :special-members:\n\n.. automodule:: messente.verigator.controllers\n\n.. autoclass:: Services\n :members:\n\n.. autoclass:: Users\n :members:\n\n.. autoclass:: Auth\n :members:\n\n\n.. automodule:: messente.verigator.models\n\n.. autoclass:: Service\n :members:\n\n.. autoclass:: User\n :members:\n\n.. automodule:: messente.verigator.exceptions\n\n.. autoclass:: VerigatorError\n\n.. autoclass:: InvalidDataError\n\n.. autoclass:: NoSuchResourceError\n\n.. autoclass:: ResourceAlreadyExistsError\n\n.. autoclass:: ResourceForbiddenError\n\n.. autoclass:: WrongCredentialsError\n\n.. autoclass:: InternalError\n\n.. autoclass:: InvalidResponseError\n" }, { "alpha_fraction": 0.6655086874961853, "alphanum_fraction": 0.6655086874961853, "avg_line_length": 25.513158798217773, "blob_id": "56e5b9bd32e0fcfbf3170bdcae1e9c5bb9fad535", "content_id": "fb58269571258ebce352fefa8aa4969d159ccdb4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2015, "license_type": "permissive", "max_line_length": 82, "num_lines": 76, "path": "/messente/verigator/exceptions.py", "repo_name": "messente/verigator-python", "src_encoding": "UTF-8", "text": "\"\"\"\nExceptions module of the api\n\"\"\"\n\n\nclass VerigatorError(Exception):\n \"\"\"\n Base error class for all verigator related errors.\n \"\"\"\n\n def __init__(self, code, message):\n super(VerigatorError, self).__init__(message)\n self.code = code\n self.message = message\n\n\nclass InvalidDataError(VerigatorError):\n \"\"\"\n This error is raised when provided data is invalid\n \"\"\"\n\n def __init__(self, code, message):\n super(InvalidDataError, self).__init__(code, message)\n\n\nclass NoSuchResourceError(VerigatorError):\n \"\"\"\n This error is raised when the resource that yu were looking for does not exist\n \"\"\"\n\n def __init__(self, code, message):\n super(NoSuchResourceError, self).__init__(code, message)\n\n\nclass ResourceAlreadyExistsError(VerigatorError):\n \"\"\"\n This error is raised when you are creating resource that already exists\n \"\"\"\n\n def __init__(self, code, message):\n super(ResourceAlreadyExistsError, self).__init__(code, message)\n\n\nclass ResourceForbiddenError(VerigatorError):\n \"\"\"\n This error raises when you don't have permissions to access the resource\n \"\"\"\n\n def __init__(self, code, message):\n super(ResourceForbiddenError, self).__init__(code, message)\n\n\nclass WrongCredentialsError(VerigatorError):\n \"\"\"\n This error raises when you provided invalid credentials.\n Please see messente dashboard for correct username and password\n \"\"\"\n def __init__(self, code, message):\n super(WrongCredentialsError, self).__init__(code, message)\n\n\nclass InternalError(VerigatorError):\n \"\"\"\n This error means that there is a problem on the server side.\n \"\"\"\n\n def __init__(self, code, message):\n super(InternalError, self).__init__(code, message)\n\n\nclass InvalidResponseError(VerigatorError):\n \"\"\"\n This error usually raises when server returned non-json response\n \"\"\"\n def __init__(self, code, message):\n super(InvalidResponseError, self).__init__(code, message)\n" } ]
16
danielrincon-m/AYED
https://github.com/danielrincon-m/AYED
20c3fbbe3c0065644f63313f7281c1f42615a0d6
5877a93086069a8654c91da8a64a42442b3a044f
363259e98744289bd378025d00fcae7c85ffb9be
refs/heads/master
2020-08-05T21:15:19.825674
2019-12-08T03:03:28
2019-12-08T03:03:28
212,713,848
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6442105174064636, "alphanum_fraction": 0.6536841988563538, "avg_line_length": 31.758621215820312, "blob_id": "de96d4c7357227f34987a2a6e3527da4d730f17e", "content_id": "c9bcb20713ff066241da72557af5447a832bc4cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1909, "license_type": "no_license", "max_line_length": 89, "num_lines": 58, "path": "/Arenas/Arena 1/S0018 - Repisa con Cajas.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "# Algoritmo:\n# 1. Obtener matriz de numeros de la entrada\n# 2. Realizar una lista de longitud (numero de filas de la matriz) en donde cada elemento\n# es la posición máxima en la que se podría mover una caja vacía (columna)\n# 3. Recorrer la matriz desde la posición máxima de columna de cada fila hasta cero\n# 3.1. Si se encuentra una caja moverla a la posición máxima y restarle 1\n# 4. Imprimir la solución\n\n\nfrom sys import stdin\n\n\ndef leerEntrada():\n size = [int(x) for x in stdin.readline().strip().split(\" \")]\n filas, columnas = size[0], size[1]\n matrizEntrada = []\n for i in range(filas):\n matrizEntrada.append([int(x) for x in stdin.readline().strip().split(\" \")])\n return filas, columnas, matrizEntrada\n\n\ndef computarPosiciones(filas, cols, matrizEntrada):\n posicionesMaximas = []\n for i in range(filas):\n posicionesMaximas.append(-1)\n for j in range(cols):\n if matrizEntrada[i][j] == 0:\n posicionesMaximas[i] = j\n elif matrizEntrada[i][j] == 2:\n break\n return posicionesMaximas\n\n\ndef calcularSolucion(filas, matrizEntrada, posicionesMaximas):\n for i in range(filas):\n for j in range(posicionesMaximas[i], -1, -1):\n if matrizEntrada[i][j] == 1:\n matrizEntrada[i][posicionesMaximas[i]] = 1\n posicionesMaximas[i] -= 1\n matrizEntrada[i][j] = 0\n return matrizEntrada\n\n\ndef imprimirRespuesta(filas, cols, matrizSolucion):\n for i in range(filas):\n for j in range(cols):\n print(matrizSolucion[i][j], end=' ')\n print()\n\n\ndef main():\n filas, columnas, matrizEntrada = leerEntrada()\n posicionesMaximas = computarPosiciones(filas, columnas, matrizEntrada)\n matrizSolucion = calcularSolucion(filas, matrizEntrada, posicionesMaximas)\n imprimirRespuesta(filas, columnas, matrizSolucion)\n\n\nmain()\n" }, { "alpha_fraction": 0.5601173043251038, "alphanum_fraction": 0.567937433719635, "avg_line_length": 25.230770111083984, "blob_id": "3d87897b980a960fd2126f1f351a28f3b6adccb6", "content_id": "24990311810cbcbb3e31b7680e42306d5f97fa98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1023, "license_type": "no_license", "max_line_length": 70, "num_lines": 39, "path": "/Arenas/Arena 3/PrinterQueue.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n\nclass Queue:\n def __init__(self, items, checkIndex):\n self.checkIndex = checkIndex\n self.time = 0\n self.priorities = items\n\n def removeElement(self):\n value = self.priorities[0]\n del(self.priorities[0])\n self.checkIndex -= 1\n if all(i <= value for i in self.priorities):\n self.time += 1\n else:\n self.priorities.append(value)\n if self.checkIndex == -1:\n self.checkIndex = len(self.priorities) - 1\n\n def printed(self):\n return True if self.checkIndex < 0 else False\n\n def getTime(self):\n return self.time\n\n\ndef main():\n cases = int(stdin.readline().strip())\n for i in range(cases):\n size, pos = [int(x) for x in stdin.readline().strip().split()]\n values = [int(x) for x in stdin.readline().strip().split()]\n queue = Queue(values, pos)\n while not queue.printed():\n queue.removeElement()\n print(queue.getTime())\n\n\nmain()\n" }, { "alpha_fraction": 0.5132743120193481, "alphanum_fraction": 0.5221238732337952, "avg_line_length": 17.4489803314209, "blob_id": "aa59cec0ac0e03ffce417164040e66ad405284b8", "content_id": "39a56bf2c9dbd7552c829cfa00e874bf283a3a05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 904, "license_type": "no_license", "max_line_length": 62, "num_lines": 49, "path": "/Arenas/Arena 2/Digit sum.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\nimport time\n\n\ndef superDigit(n):\n if len(n) == 1:\n return int(n)\n newDigit = 0\n for i in range(len(n)):\n newDigit += int(n[i])\n return superDigit(str(newDigit))\n\n\ndef memoSuperDigit(n, M):\n if n in M:\n return M[n]\n M[n] = superDigitP(n, M)\n return memoSuperDigit(n, M)\n\n\ndef superDigitP(n, M):\n if len(n) == 1:\n return int(n)\n newDigit = 0\n for i in range(len(n)):\n newDigit += int(n[i])\n return memoSuperDigit(str(newDigit), M)\n\n\ndef calcNumber(n, k):\n k = int(k)\n nb = n\n for i in range(0, k - 1):\n n += nb\n return(n)\n\n\ndef main():\n inp = [x for x in stdin.readline().strip().split(\" \")]\n M = {}\n while inp != ['']:\n n, k = inp[0], inp[1]\n n = calcNumber(n, k)\n print(memoSuperDigit(n, M))\n\n inp = [x for x in stdin.readline().strip().split(\" \")]\n\n\nmain()\n" }, { "alpha_fraction": 0.5311795473098755, "alphanum_fraction": 0.5574755668640137, "avg_line_length": 26.163265228271484, "blob_id": "12fdb783c373df6afc389a946e1ccf9f8d988ace", "content_id": "f817ef680e8cbf94985e536d9db523ca98d93ca0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1331, "license_type": "no_license", "max_line_length": 94, "num_lines": 49, "path": "/Arenas/Arena 2/ways.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\nimport sys\nsys.setrecursionlimit(1000000000)\nvalues = [1, 5, 10, 25, 50]\n\n\n# def ways(n, actualIndex):\n# if n < 0 or actualIndex >= len(values):\n# return 0\n# elif n == 0:\n# return 1\n# return ways(n - values[actualIndex], actualIndex) + ways(n, actualIndex + 1)\n\n\ndef memoWays(n, actualIndex, M):\n if n < 0 or actualIndex >= len(values):\n return 0\n if (n, actualIndex) in M:\n return M[(n, actualIndex)]\n M[(n, actualIndex)] = waysP(n, actualIndex, M)\n return M[(n, actualIndex)]\n\n\ndef waysP(n, actualIndex, M):\n if n < 0 or actualIndex >= len(values):\n return 0\n elif n == 0:\n return 1\n return memoWays(n - values[actualIndex], actualIndex, M) + memoWays(n, actualIndex + 1, M)\n\n\ndef main():\n M = [[None for x in range(5)] for y in range(6)]\n M = {}\n inp = stdin.readline().strip()\n while inp != '':\n n = int(inp)\n #buenas = ways(n, 0)\n buenas = memoWays(n, 0, M)\n # for key, value in M.items():\n # print(key, value)\n if buenas != 1:\n print('There are', str(buenas), 'ways to produce', str(n), 'cents change.')\n else:\n print('There is only', str(buenas), 'way to produce', str(n), 'cents change.')\n inp = stdin.readline().strip()\n\n\nmain()\n" }, { "alpha_fraction": 0.5651341080665588, "alphanum_fraction": 0.5702426433563232, "avg_line_length": 23.46875, "blob_id": "3be7fbd33694dc1f627abcf6c87b98b8dc94c54e", "content_id": "8ad3fdbb79d2151c068e4e76a8ce0d3642c1d5e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1566, "license_type": "no_license", "max_line_length": 83, "num_lines": 64, "path": "/Arenas/Arena 3/arboles.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n\nclass Nodo:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n def setLeft(self, node):\n self.left = node\n\n def setRight(self, node):\n self.right = node\n\n def printPost(self):\n if self.left is not None:\n self.left.printPost()\n if self.right is not None:\n self.right.printPost()\n print(self.value, end='')\n\n\nclass Arbol:\n def __init__(self):\n self.raiz = None\n\n def buildTree(self, preord, inord, inBeg, inEnd, preIndex):\n if inBeg > inEnd:\n return None\n\n value = preord[preIndex]\n actualNode = Nodo(value)\n inIndex = inord.index(value)\n\n if inIndex < inBeg:\n return self.buildTree(preord, inord, inBeg, inEnd, preIndex + 1)\n if self.raiz is None:\n self.raiz = actualNode\n\n leftNode = self.buildTree(preord, inord, inBeg, inIndex - 1, preIndex + 1)\n rightNode = self.buildTree(preord, inord, inIndex + 1, inEnd, preIndex + 1)\n actualNode.setLeft(leftNode)\n actualNode.setRight(rightNode)\n\n return actualNode\n\n def printPos(self):\n if self.raiz is not None:\n self.raiz.printPost()\n print()\n\n\ndef main():\n entrada = stdin.readline().strip()\n while entrada != '':\n preord, inord = entrada.split()\n tree = Arbol()\n tree.buildTree(preord, inord, 0, len(inord) - 1, 0)\n tree.printPos()\n entrada = stdin.readline().strip()\n\n\nmain()\n" }, { "alpha_fraction": 0.49580538272857666, "alphanum_fraction": 0.5142617225646973, "avg_line_length": 25.19780158996582, "blob_id": "81d28a4f70251832980c4ef9da66e0892d465f4e", "content_id": "bf7c570ffef7e6e91d6a65f7447638c71763a3aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2385, "license_type": "no_license", "max_line_length": 121, "num_lines": 91, "path": "/Arenas/Arena 4/freckles.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\nimport math\n\n\n# Clase DisjointSet\nclass DisjointSet:\n def __init__(self):\n self.setList = {}\n self.pos = {}\n self.nextKey = 1\n\n def getKey(self):\n key = self.nextKey\n self.nextKey += 1\n return key\n\n def makeSet(self, rep):\n key = self.getKey()\n self.setList[key] = ({rep})\n self.pos[rep] = key\n\n def findSet(self, rep):\n if rep in self.pos:\n return self.setList[self.pos[rep]]\n return set()\n\n def union(self, st1, st2, r1, r2):\n key = self.getKey()\n\n del self.setList[self.pos[r1]]\n del self.setList[self.pos[r2]]\n\n newSet = st1.union(st2)\n self.setList[key] = newSet\n\n for elt in newSet:\n self.pos[elt] = key\n\n def getSetList(self):\n return self.setList\n\n def joinSets(self, r1, r2):\n set1 = self.findSet(r1)\n set2 = self.findSet(r2)\n if set1 != set2:\n self.union(set1, set2, r1, r2)\n return True\n return False\n\n\n# Funcion que se encarga de encontrar el grafo de menor costo en donde todos los nodos estén conectados\ndef MST_Kruskal(graph):\n mst = {'V': graph['V'], 'E': []}\n ds = DisjointSet()\n graph['E'] = sorted(graph['E'], key=lambda arc: (arc[2]))\n for vertex in graph['V']:\n ds.makeSet(vertex)\n for edge in graph['E']:\n res = ds.joinSets(edge[0], edge[1])\n if res:\n mst['E'].append(edge)\n return mst\n\n\ndef distsq(x1, y1, x2, y2):\n return (x2 - x1)**2 + (y2 - y1)**2\n\n\ndef main():\n ncases = int(stdin.readline().strip())\n for n in range(ncases):\n if n != 0:\n print()\n blank = stdin.readline()\n graph = {'V': {}, 'E': []}\n npecas = int(stdin.readline().strip())\n for i in range(npecas):\n graph['V'][str(i)] = [float(x) for x in stdin.readline().strip().split()]\n for i in range(npecas):\n for j in range(i + 1, npecas):\n if i != j:\n d = distsq(graph['V'][str(i)][0], graph['V'][str(i)][1], graph['V'][str(j)][0],graph['V'][str(j)][1])\n graph['E'].append((str(i), str(j), d))\n mst = MST_Kruskal(graph)\n distanciasq = 0\n for edge in mst['E']:\n distanciasq += math.sqrt(edge[2])\n print('%.2f'%distanciasq)\n\n\nmain()\n" }, { "alpha_fraction": 0.4618717432022095, "alphanum_fraction": 0.48093587160110474, "avg_line_length": 25.837209701538086, "blob_id": "aac8799fb4a0efa314062c3f11f9acc0e26895b2", "content_id": "b77dce803196cb6bc0522313c02e7929054b6645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1154, "license_type": "no_license", "max_line_length": 79, "num_lines": 43, "path": "/Arenas/Arena 4/worm.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "#wormholes\n\nfrom sys import stdin\nimport math\n\ndef bellManFord(source, nodes, edges):\n nodes[source] = (0, None)\n for i in range(len(nodes) - 1):\n changed = False\n for edge in edges:\n u = nodes[edge[0]]\n v = nodes[edge[1]]\n w = edge[2]\n if v[0] > u[0] + w:\n nodes[edge[1]] = (u[0] + w, edge[0])\n changed = True\n if not changed:\n return False\n for edge in edges:\n u = nodes[edge[0]]\n v = nodes[edge[1]]\n w = edge[2]\n if v[0] > u[0] + w:\n nodes[edge[1]] = (u[0] + w, edge[0])\n return True\n return False\n\n\ndef main():\n cases = int(stdin.readline().strip())\n for i in range(cases):\n N = {}\n E = []\n n, m = stdin.readline().strip().split()\n for star in range(int(n)):\n N[str(star)] = (math.inf, None)\n for wh in range(int(m)):\n wormhole = stdin.readline().strip().split()\n E.append((wormhole[0], wormhole[1], int(wormhole[2])))\n print('possible' if bellManFord('0', N, E) is True else 'not possible')\n\n\nmain()\n" }, { "alpha_fraction": 0.6223827004432678, "alphanum_fraction": 0.6613718271255493, "avg_line_length": 37.47222137451172, "blob_id": "327403abcdf2d44466abccb1aab574a7c68d4f72", "content_id": "6109c930d40efc483fd6f1b182651e93ba546e63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1390, "license_type": "no_license", "max_line_length": 125, "num_lines": 36, "path": "/_Requerimientos documentación/igualar cadenas.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "#SOLUCION AL PROBLEMA AGTC 3356, Simón Marín\nfrom sys import stdin\n\n#Función que se encarga de minimizar el número de operaciones posibles para transformar cualquier cadena c1 en una cadena c2.\n#param -c1: cadena numero 1\n# -c2: cadena numero 2\n#return -mínimo de operaciones posibles para transformar cadena c1 a cadena c2.\ndef cadenas(c1,c2):\n #Caso base, alguna cadena vacia\n if c1 == '' or c2 == '':\n return 0\n elif c1[0] == c2[0]:\n return cadenas(c1[1:],c2[1:])\n else:\n return min(1+cadenas(c1[1:],c2),1+cadenas(c1,c2[1:]),1+cadenas(c1[1:],c2[1:]))\n#Función Principal.\ndef main():\n while True:\n try:\n long1,cadena1 = [x for x in stdin.readline().strip().split()]\n long2,cadena2 = [x for x in stdin.readline().strip().split()]\n print(cadenas(cadena1,cadena2))\n except ValueError:\n break\nmain()\n#Función de pruebas, se realizan las correspondientes pruebas para verificar el funcionamiento del codigo\n#return -Resultado de la prueba.\ndef pruebas():\n cadena1 = \"AGTCTGACGC\"\n cadena2 = \"AGTAAGTAGGC\"\n #Respuesta = 4, para pasar de cadena1 a cadena2 es necesario cambiar 3 letrasy eliminar una letra en la cadena2 \n respuesta = 4\n mi_respuesta = cadenas(cadena1,cadena2)\n if mi_respuesta == respuesta:\n return \"Prueba pasada con exito.\"\n return \"Prueba fallida.\"\n" }, { "alpha_fraction": 0.5189040899276733, "alphanum_fraction": 0.5249314904212952, "avg_line_length": 25.071428298950195, "blob_id": "d3ff72e4d77a76816fc0352b1488dd83826acbca", "content_id": "f55fe7e497fe6828f160ae9cb44ca533d6482ca7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1825, "license_type": "no_license", "max_line_length": 69, "num_lines": 70, "path": "/Lab S08 - Árboles/TeamQueue.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n\nclass Team:\n def __init__(self, teamNumber):\n self.miembros = []\n self.teamNumber = teamNumber\n\n def addMember(self, member):\n self.miembros.append(member)\n\n def removeMember(self):\n member = self.miembros[0]\n del self.miembros[0]\n return member\n\n def isEmpty(self):\n return True if len(self.miembros) == 0 else False\n\n\nclass Queue:\n def __init__(self):\n self.queue = []\n self.teams = []\n\n def addElement(self, x, team):\n for i in range(len(self.teams)):\n if self.teams[i] == team:\n self.queue[i].addMember(x)\n return\n newTeam = Team(team)\n newTeam.addMember(x)\n self.queue.append(newTeam)\n self.teams.append(team)\n\n def removeElement(self):\n member = self.queue[0].removeMember()\n if self.queue[0].isEmpty():\n del(self.queue[0])\n del(self.teams[0])\n return member\n\n\n\ndef main():\n nTeams = int(stdin.readline().strip())\n case = 1\n while nTeams != 0:\n print('Scenario #' + str(case))\n case += 1\n teamDict = {}\n queue = Queue()\n for i in range(nTeams):\n team = [int(x) for x in stdin.readline().strip().split()]\n for j in range(1, len(team)):\n teamDict[team[j]] = i\n instruction = stdin.readline().strip()\n while instruction != 'STOP':\n if instruction == 'DEQUEUE':\n print(queue.removeElement())\n else:\n instruction, x = [x for x in instruction.split()]\n x = int(x)\n queue.addElement(x, teamDict[x])\n instruction = stdin.readline().strip()\n nTeams = int(stdin.readline().strip())\n print()\n\n\nmain()\n" }, { "alpha_fraction": 0.5655868053436279, "alphanum_fraction": 0.5836431384086609, "avg_line_length": 28.421875, "blob_id": "078ad9f840f5958bd99a78c49321b20a60880bca", "content_id": "564e39f7e86273d57f677a6b96ce519964e1040e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1886, "license_type": "no_license", "max_line_length": 89, "num_lines": 64, "path": "/Tarea 1/main.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n\ndef test_solution():\n inputs = [0, 1, 2]\n expected_outputs = ['-', [[1]], [[2, 2, 2], [2, 1, 2], [2, 2, 2]]]\n number_of_cases = len(inputs)\n for case in range(number_of_cases):\n output = solve(inputs[case] * 2 - 1)\n if output == expected_outputs[case]:\n print('Caso ' + str(case + 1) + ' válido')\n else:\n print('Caso ' + str(case + 1) + ' inválido')\n\n\ndef calculate_matrix_value(x, y, center):\n return max(abs(x - center), abs(y - center)) + 1\n\n\ndef print_solution(solution_matrix, size_of_matrix):\n if size_of_matrix == -1:\n print(solution_matrix)\n return\n for x in range(size_of_matrix):\n for y in range(size_of_matrix):\n if y != size_of_matrix - 1:\n print(solution_matrix[x][y], end='\\t')\n else:\n print(solution_matrix[x][y], end='\\n')\n\n\ndef solve(size_of_matrix):\n # 2. Verificar entrada\n if size_of_matrix == -1:\n return '-'\n # 4. Construir matriz de ceros\n solution_matrix = [[0 for x in range(size_of_matrix)] for y in range(size_of_matrix)]\n # 5. Encontrar el centro de la matriz\n center_coord = size_of_matrix // 2\n # 6. Recorrer la matriz\n for x in range(size_of_matrix):\n for y in range(size_of_matrix):\n # 6.Encontrar la distancia al centro, sumarle 1 y escribirla en la matriz\n solution_matrix[x][y] = calculate_matrix_value(x, y, center_coord)\n # 7. Imprimir la respuesta\n return solution_matrix\n\n\ndef get_input():\n # 1. Leer entrada\n n = int(stdin.readline().strip())\n # 3. Calcular el tamaño de la matriz\n size_of_matrix = n * 2 - 1\n return size_of_matrix\n\n\ndef main():\n size_of_matrix = get_input()\n solution_matrix = solve(size_of_matrix)\n print_solution(solution_matrix, size_of_matrix)\n\n\nmain()\n#test_solution()\n" }, { "alpha_fraction": 0.662001371383667, "alphanum_fraction": 0.6759971976280212, "avg_line_length": 31.477272033691406, "blob_id": "ea44f64e11034d0e5e49d28847469fb79657df8b", "content_id": "8405c1befab5c839c59a5b61c25ff813e20eb920", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1431, "license_type": "no_license", "max_line_length": 87, "num_lines": 44, "path": "/Arenas/Arena 1/HR00008 - Gridland Metro.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "# Algoritmo:\n# 1.Leer la entrada, variables separadas para tamaño y cantidad de lineas de metro\n# 2.Generar una matriz del tamaño adecuado, inicializada en cero\n# 3.Por cada una de las lineas de metro, ubicar un 1 en la fila y columnas indicadas\n# 4.Contar el numero de celdas con 0 en la matriz\n# 5.Divulgar respuesta\n\nfrom sys import stdin\n\n\ndef leerEntrada():\n lineas = []\n datosIniciales = [int(x) for x in stdin.readline().strip().split(\" \")]\n filas, cols, cantDeLineas = datosIniciales[0], datosIniciales[1], datosIniciales[2]\n for i in range(cantDeLineas):\n lineas.append([int(x) for x in stdin.readline().strip().split(\" \")])\n return filas, cols, lineas\n\n\ndef crearMatrizRespuesta(filas, cols, lineas):\n matrizRespuesta = [[0 for i in range(cols)] for j in range(filas)]\n for i in range(len(lineas)):\n for j in range(lineas[i][1] - 1, lineas[i][2]):\n matrizRespuesta[lineas[i][0] - 1][j] = 1\n return matrizRespuesta\n\n\ndef calcularPostes(filas, cols, matrizRespuesta):\n numeroDePostes = 0\n for i in range(filas):\n for j in range(cols):\n if matrizRespuesta[i][j] == 0:\n numeroDePostes += 1\n return numeroDePostes\n\n\ndef main():\n filas, cols, lineas = leerEntrada()\n matrizRespuesta = crearMatrizRespuesta(filas, cols, lineas)\n numeroDePostes = calcularPostes(filas, cols, matrizRespuesta)\n print(numeroDePostes)\n\n\nmain()\n" }, { "alpha_fraction": 0.6358381509780884, "alphanum_fraction": 0.6439306139945984, "avg_line_length": 28.827587127685547, "blob_id": "3741df1d5e4b3388d4134057edc43889e6a16886", "content_id": "7c9a971c33899f9ec409d7d7276b1b929200e7b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1735, "license_type": "no_license", "max_line_length": 175, "num_lines": 58, "path": "/Arenas/Arena 1/11220 - Decoding the message.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "# Algoritmo:\n# 1. Leer la entrada, una lista de oraciones en donde cada oración es una lista de palabras.\n# 2. Para cada lista de oraciones, definir una variable para la posición de letra, una lista de palabras de salida y una variable para la palabra de salida, recorrer la lista.\n# 2.1. Verificar si la posición de la letra se encuentra en la palabra.\n# 2.2. Si es así, agregar la letra a la palabra destino, agregar 1 a la posición de letra.\n# 2.3. agregar la palabra final a la lista de salida.\n# 3. Imprimir cada elemento de la lista de salida.\n\nfrom sys import stdin\n\n\ndef leerEntrada():\n oraciones = []\n while True:\n oracion = stdin.readline().strip().split(\" \")\n if oracion == [\"\"]:\n break\n else:\n oraciones.append(oracion)\n return oraciones\n\n\ndef obtenerLetra(oracion, indiceDeLetra):\n return oracion[indiceDeLetra]\n\n\ndef construirSalida(oraciones):\n respuestas = []\n for i in range(len(oraciones)):\n indiceDeLetra = 0\n palabraConstruida = \"\"\n for j in range(len(oraciones[i])):\n if indiceDeLetra < len(oraciones[i][j]):\n palabraConstruida += obtenerLetra(oraciones[i][j], indiceDeLetra)\n indiceDeLetra += 1\n respuestas.append(palabraConstruida)\n return respuestas\n\n\ndef imprimirSalida(caso, respuestas):\n if caso != 1:\n print()\n print('Case #' + str(caso) + \":\")\n for respuesta in respuestas:\n print(respuesta)\n\n\ndef main():\n casos = int(stdin.readline().strip())\n stdin.readline()\n\n for i in range(casos):\n oraciones = leerEntrada()\n respuestas = construirSalida(oraciones)\n imprimirSalida(i + 1, respuestas)\n\n\nmain()\n" }, { "alpha_fraction": 0.5088712573051453, "alphanum_fraction": 0.5141562819480896, "avg_line_length": 22.236841201782227, "blob_id": "3d0645d0e6849ff2ee3c0647fe173b0ddf3b0acd", "content_id": "2fb70a03b7c4c7600a4ae4aa4cedc3376d65de54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2649, "license_type": "no_license", "max_line_length": 51, "num_lines": 114, "path": "/Lab S07 - Lineales/3 - invertir.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "# Linked List Node definition\nclass Node:\n def __init__(self, initdata):\n self.data = initdata\n self.next = None\n\n def getData(self):\n return self.data\n\n def getNext(self):\n return self.next\n\n def setData(self, newdata):\n self.data = newdata\n\n def setNext(self, newnext):\n self.next = newnext\n\n\n# Linked List definition\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def isEmpty(self):\n return self.head == None\n\n def add(self, item):\n node = Node(item)\n node.setNext(self.head)\n self.head = node\n\n def size(self):\n current = self.head\n count = 0\n while current != None:\n count = count + 1\n current = current.getNext()\n return count\n\n def printList(self):\n current = self.head\n while current != None:\n if current.getData() is not None:\n print(current.getData(), end=' ')\n\n current = current.getNext()\n\n def search(self, item):\n current = self.head\n found = False\n while current != None and not found:\n if current.getData() == item:\n found = True\n else:\n current = current.getNext()\n\n return found\n\n def getHead(self):\n return self.head\n\n def setHead(self, newHead):\n self.head = newHead\n\n def remove(self, item):\n try:\n current = self.head\n previous = None\n found = False\n while current != None and not found:\n if current.getData() == item:\n found = True\n else:\n previous = current\n current = current.getNext()\n\n if previous == None:\n self.head = current.getNext()\n else:\n previous.setNext(current.getNext())\n except:\n return -1\n\n\ndef invertLinkedList(lili):\n if lili.size() > 1:\n first = lili.getHead()\n second = first.getNext()\n third = second.getNext()\n first.setNext(None)\n while second != None:\n second.setNext(first)\n first = second\n second = third\n if third != None:\n third = third.getNext()\n lili.setHead(first)\n return lili\n\n\ndef main():\n lili = LinkedList()\n elements = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n for elt in elements:\n lili.add(elt)\n print('Lista original')\n lili.printList()\n lili = invertLinkedList(lili)\n print('\\nLista invertida')\n lili.printList()\n\n\nmain()\n" }, { "alpha_fraction": 0.44477173686027527, "alphanum_fraction": 0.4639175236225128, "avg_line_length": 21.633333206176758, "blob_id": "ea359993776d1f81f0d7091f5076af562d32badb", "content_id": "7f8f2816b669d4fa13f1d6346b8f4af29ab8943a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 679, "license_type": "no_license", "max_line_length": 65, "num_lines": 30, "path": "/Arenas/Arena 4/hardwood.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n\ndef main():\n cases = int(stdin.readline().strip())\n stdin.readline() #first white line\n for i in range(cases):\n if i != 0:\n print()\n\n trees = {}\n tree = stdin.readline().strip()\n total = 0\n\n while tree != '':\n if tree not in trees:\n trees[tree] = 1\n else:\n trees[tree] += 1\n total += 1\n tree = stdin.readline().strip()\n\n ltrees = [(d, trees[d]) for d in trees]\n ltrees = sorted(ltrees, key= lambda d: (d[0]))\n\n for elt in ltrees:\n print(elt[0], \"{0:.4f}\".format(elt[1] * 100 / total))\n\n\nmain()\n" }, { "alpha_fraction": 0.4969325065612793, "alphanum_fraction": 0.5143149495124817, "avg_line_length": 24.710525512695312, "blob_id": "edf56d21593fd19a3f651ecabc3b4b9cec08a9fd", "content_id": "081d7676fb906b7234527d6579b4463cb756a268", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 978, "license_type": "no_license", "max_line_length": 87, "num_lines": 38, "path": "/Arenas/Parcial final/Heap.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "class Heap:\n def __init__(self):\n self.heap = []\n self.heap_size = 0\n\n def makeHeap(self, lista):\n self.heap = lista\n self.heap_size = len(lista) - 1\n for i in range(self.heap_size, -1, -1):\n self.heapify(i)\n\n def left(self, index):\n return int(index * 2 + 1)\n\n def right(self, index):\n return int(index * 2 + 2)\n\n def heapify(self, index):\n l = self.left(index)\n r = self.right(index)\n largest = index\n if l <= self.heap_size:\n if self.heap[index] < self.heap[l]:\n largest = l\n if r <= self.heap_size:\n if self.heap[largest] < self.heap[r]:\n largest = r\n if largest != index:\n self.heap[index], self.heap[largest] = self.heap[largest], self.heap[index]\n self.heapify(largest)\n\n def printHeap(self):\n print(self.heap)\n\n\na = Heap()\na.makeHeap([1,2,3,4,5,7,81,6])\na.printHeap()\n\n" }, { "alpha_fraction": 0.47777777910232544, "alphanum_fraction": 0.4912280738353729, "avg_line_length": 20.935897827148438, "blob_id": "d862c7f4d41247c86f4238d6aacae439b2591049", "content_id": "10c4607c8498e529c77beda8ea4ca1a588cd3fc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1710, "license_type": "no_license", "max_line_length": 60, "num_lines": 78, "path": "/Lab S14 - Arboles de expansion/Graph.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "class Node:\n def __init__(self, name):\n self.name = name\n\nclass edge:\n def __init__(self, node1, node2, weight):\n self.n1 = node1\n self.n2 = node2\n self.weight = weight\n\nclass Graph:\n def __init__(self):\n self.nodes = {}\n self.edges = {}\n\n def addEdge(self, u, v):\n\n\n def BFS(self, s):\n # Mark all the vertices as not visited\n visited = [False] * (len(self.graph))\n queue = []\n queue.append(s)\n visited[s] = True\n while queue:\n s = queue.pop(0)\n for i in self.graph[s]:\n if visited[i] == False:\n queue.append(i)\n visited[i] = True\n return visited\n\n def DFSUtil(self, v, visited):\n visited[v] = True\n # Recur for all the vertices adjacent to this vertex\n for i in self.graph[v]:\n if visited[i] == False:\n self.DFSUtil(i, visited)\n\n # The function to do DFS traversal. It uses\n # recursive DFSUtil()\n def DFS(self, v):\n # Mark all the vertices as not visited\n visited = [False] * (len(self.graph))\n # DFS traversal\n self.DFSUtil(v, visited)\n return visited\n\n def MST_Prim(self):\n\n\ndef main():\n #Defimne\n g = Graph()\n g.addEdge(0, 1)\n g.addEdge(0, 2)\n g.addEdge(1, 2)\n g.addEdge(2, 0)\n g.addEdge(2, 3)\n g.addEdge(3, 3)\n\n\n bfs = g.BFS(2)\n dfs = g.DFS(0)\n\n print('BFS:')\n print(2, ' : ', end='')\n\n for el in range(len(bfs)):\n print(el, ',', end='')\n print()\n print('DFS:')\n print(2, ' : ', end='')\n for el in range(len(dfs)):\n print(el, ',', end='')\n\n\nmain()" }, { "alpha_fraction": 0.4933333396911621, "alphanum_fraction": 0.5066666603088379, "avg_line_length": 14, "blob_id": "c2837dfb9b4da149e8e5bcabdc6603434070436d", "content_id": "7a62955f6ae6f180dbce685b1a6ba77a3340c296", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 61, "num_lines": 15, "path": "/Lab S03/3-Invertir.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n\ndef inv(s):\n if len(s) == 1:\n return s\n return [s[len(s) - 1]] + inv(s[:len(s) - 1])\n\n\ndef main():\n s = [int(x) for x in stdin.readline().strip().split(\" \")]\n print(inv(s))\n\n\nmain()\n" }, { "alpha_fraction": 0.525464653968811, "alphanum_fraction": 0.5382573008537292, "avg_line_length": 20.46632194519043, "blob_id": "4f0f1f7ad26ddbf2d83fb21ba0066856798d6238", "content_id": "8cfe2d221a9e7e662d2905389e4140da4431d208", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4143, "license_type": "no_license", "max_line_length": 91, "num_lines": 193, "path": "/Lab S15 - Camino Minimo/BellManFord - Dijkstra.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\nimport math\n\n\nclass Node:\n def __init__(self, name):\n self.name = name\n self.d = math.inf\n self.phi = None\n\n def initialize(self):\n self.d = math.inf\n self.phi = None\n\n def getName(self):\n return self.name\n\n def setD(self, value):\n self.d = value\n\n def getD(self):\n return self.d\n\n def setPhi(self, value):\n self.phi = value\n\n def getPhi(self):\n return self.phi\n\n def getInfo(self):\n return (self.name, self.phi, self.d)\n\n\nclass Edge:\n def __init__(self, start, end, weight):\n self.start = start\n self.end = end\n self.weight = weight\n\n def getStart(self):\n return self.start\n\n def getEnd(self):\n return self.end\n\n def getWeight(self):\n return self.weight\n\n def getTripla(self):\n return (self.start, self.end, self.weight)\n\n\nclass Graph:\n #Los nodes son una lista de nombres de nodos\n #Los edges son una lista de triplas (inicio, fin, peso)\n def __init__(self, V, E):\n self.nodes = {}\n self.edges = []\n\n for node in V:\n self.nodes[node] = Node(node)\n for edge in E:\n self.edges.append(Edge(edge[0], edge[1], int(edge[2])))\n\n def addNode(self, name):\n self.nodes[name] = Node(name)\n\n def addEdge(self, edge):\n if edge[0] not in self.nodes:\n self.addNode(edge[0])\n if edge[1] not in self.nodes:\n self.addNode(edge[1])\n self.edges.append(Edge(edge[0], edge[1], int(edge[2])))\n\n def getNodes(self):\n return self.nodes.keys()\n\n def getEdges(self, getSorted = False):\n if not getSorted:\n return self.edges\n else:\n return sorted(self.edges, key=lambda edge: (edge.weight, edge.start, edge.end))\n\n def initializeSingleSource(self, source):\n for node in self.nodes:\n self.nodes[node].initialize()\n self.nodes[source].setD(0)\n\n def relax(self, edge):\n u = self.nodes[edge.getStart()]\n v = self.nodes[edge.getEnd()]\n w = edge.getWeight()\n if v.getD() > u.getD() + w:\n v.setD(u.getD() + w)\n v.setPhi(u.getName())\n return True\n return False\n\n def bellManFord(self, source):\n self.initializeSingleSource(source)\n for i in range(len(self.nodes)):\n for edge in self.edges:\n self.relax(edge)\n for edge in self.edges:\n if self.relax(edge):\n return False\n return True\n\n def dijkstra(self, source):\n self.initializeSingleSource(source)\n s = []\n q = list(self.nodes)\n while len(q) > 0:\n u = self.getMin(q)\n q.remove(u)\n s.append(u)\n adj = self.getNeighbors(u)\n for edge in adj:\n self.relax(edge)\n\n def getMin(self, q):\n m = min(q, key=lambda d: (self.nodes[d].getD()))\n return m\n\n def getNeighbors(self, node):\n neighbors = []\n for edge in self.edges:\n if edge.getStart() == self.nodes[node].getName():\n neighbors.append(edge)\n return neighbors\n\n def printGraph(self):\n print()\n print('Nodes = '.join(self.getNodes()))\n print(\"Edges:\")\n for edge in self.getEdges(True):\n print(edge.getTripla())\n print()\n\n def printResult(self):\n print()\n for node in self.nodes:\n print(self.nodes[node].getInfo())\n print()\n\n\ndef main():\n nodes = stdin.readline().strip().split()\n nEdges = int(stdin.readline().strip())\n edges = []\n for i in range(nEdges):\n edge = tuple(stdin.readline().strip().split())\n edges.append((edge[0], edge[1], int(edge[2])))\n\n graph = Graph(nodes, edges)\n #graph.bellManFord('A')\n graph.dijkstra('A')\n graph.printResult()\n\n\n\nmain()\n\n'''\nA B C D E F G\n10\nA B 7\nA C 3\nA E 2\nB D 5\nB C 10\nC E 5\nD E 8\nD F 25\nD G 45\nF G 30\n'''\n'''\nA B C D F G H I J\n12\nA B 15\nA C 2\nA D 20\nC D 3\nC J 13\nD F 25\nD G 5\nD H 2\nF G 35\nG I 100\nH I 1\nI J 55\n'''\n" }, { "alpha_fraction": 0.4797297418117523, "alphanum_fraction": 0.4932432472705841, "avg_line_length": 23.75, "blob_id": "cef687116ce9e1832ae35a77f8867e68c118d47e", "content_id": "f3fea076eca415469b568f39e7e9d1cc94a58f43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 57, "num_lines": 12, "path": "/Taller DyQ/Punto2.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "def main():\n n = int(input('Numero de elementos: '))\n lista = []\n for i in range(n):\n lista.append(int(input('Escriba un elemento: ')))\n for i in range(n - 1):\n if lista[i + 1] != lista[i] + 1:\n print('missing=' + str(lista[i] + 1))\n break\n\n\nmain()" }, { "alpha_fraction": 0.5659065842628479, "alphanum_fraction": 0.5898220539093018, "avg_line_length": 34.60396194458008, "blob_id": "22e635cf535ba4d46599aff9988277c6b0af31ea", "content_id": "6addc68b17fde44d45e72157b8b56f2c6d2a14db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3608, "license_type": "no_license", "max_line_length": 146, "num_lines": 101, "path": "/Lab S13 - Hash/Estadio.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n# Clase que respresenta una has table en una lista\nclass HashTable:\n\n # Función que inicializa las variables necesarias para la clase\n # Param: -m: Tamaño de la hash table\n def __init__(self, m):\n self.data = [ [] for i in range(m) ]\n self.A = 0.98\n\n # Función que pobla la hash table a partir de una lista de elementos\n # Param: -v: lista de elementos para añadir a la hash table\n def buildHash(self, v):\n for e in v:\n self.insert(e)\n\n # Función que calcula el hash correspondiente al elemento u\n # Param: -u: Elemento al cual se le calculará el hash\n # return: El hash calculado del elemento\n def fHash(self, u):\n c=u\n return int(c[0])%4\n #return hash(u) % len(self.data)\n\n # Función que inserta un elemento en la hash table, previamente calculado el hash\n # Param: -u: Elemento que se desea insertar a la hash table\n def insert(self, u):\n self.data[self.fHash(u)].append(u)\n\n # Función que elimina un elemento en la hash table, previamente calculado el hash\n # Param: -u: Elemento que se desea eliminar de la hash table\n def delete(self, u):\n self.data[self.fHash(u)].remove(u)\n\n # Función que actualiza un elemento en la hash table, previamente calculado el hash\n # Param: -u1: Elemento que se desea actualizar en la hash table\n # -u2: Elemento por el cual se desea reemplazar u1\n def update(self, u1, u2):\n self.delete(u1)\n self.insert(u2)\n\n # Función que busca un elemento en la hash table, previamente calculado el hash\n # Param: -u: Elemento que se desea buscar en la hash table\n #return: El elemento buscado si existe en la hash table, o none si este no existe\n def search(self, u):\n slot = self.data[self.fHash(u)]\n for e in slot:\n if e == u:\n return e\n return None\n\n # Función que retorna los datos presentes en la hash table\n # Return: -Una lista con todos los elementos de la hash table\n def getData(self):\n return self.data\n\n# Función principal\ndef main():\n hTable = HashTable(4)\n print(\"*************************************************\")\n print(\"*******************BIENVENIDO********************\")\n print(\"*************************************************\")\n print(\"Digite solo el numero de la tribuna que desea comprar: \")\n print(\"1. Occidental\")\n print(\"2. Oriental\")\n print(\"3. Norte\")\n print(\"4. Sur\")\n #dato=input()\n print(\"Digite los datos de la siguiente forma y separados por un espacio: Tribuna Cedula Nombre Cantidad y presione la tecla Enter 2 veces.\")\n datos=stdin.readline().strip().split()\n while datos!=[]:\n if datos[0]==\"1\":\n total=int(datos[3])*50000\n print(\"Total: $\",total)\n if datos[0]==\"2\":\n total=int(datos[3])*100000\n print(\"Total: $\",total)\n if datos[0]==\"3\":\n total=int(datos[3])*30000\n print(\"Total: $\",total)\n if datos[0]==\"4\":\n total=int(datos[3])*30000\n print(\"Total: $\",total)\n hTable.buildHash([datos])\n datos=stdin.readline().strip().split()\n data = hTable.getData()\n print('Sur:', data[0])\n print('Occidental:', data[1])\n print('Oriental:', data[2])\n print('Norte:', data[3])\n\n# Funcion de pruebas\ndef pruebas():\n hTable = HashTable(4)\n datos = [[1, 123456, 'Daniel', 5], [2, 548479458, 'Alejandro', 8], [3, 841321654, 'Jonathan', 9]]\n hTable.buildHash(datos)\n print(hTable.getData())\n\nmain()\n#pruebas()\n" }, { "alpha_fraction": 0.5611293911933899, "alphanum_fraction": 0.5641447305679321, "avg_line_length": 30.860261917114258, "blob_id": "08193110053ed889fb6fc1d7d16a474bda1af608", "content_id": "e19e3bfd2fbcaa88a3c282c64b1ea4d018bbbfbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7307, "license_type": "no_license", "max_line_length": 142, "num_lines": 229, "path": "/Lab S07 - Lineales/referenciaestructuraslineales.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "import queue\nimport collections\n\n\n# Author: Sebastian Martinez Reyes\n# El siguiente es un ejemplo de las funciones, usos y definiciones disponibles para Colas(LIFO y FIFO), Listas circulares y listas Encadenadas\n# El proposito de este material es de consulta, a continuación las definiciones disponibles:\n# Colas sincronizadas (FIFO y LIFO): https://docs.python.org/3/library/queue.html?highlight=queue#module-queue\n# Listas circulares: https://docs.python.org/3/library/collections.html?highlight=deque#collections.deque\n\n# Linked List Node definition\nclass Node:\n def __init__(self, initdata):\n self.data = initdata\n self.next = None\n\n def getData(self):\n return self.data\n\n def getNext(self):\n return self.next\n\n def setData(self, newdata):\n self.data = newdata\n\n def setNext(self, newnext):\n self.next = newnext\n\n\n# Linked List definition\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def isEmpty(self):\n return self.head == None\n\n def add(self, item):\n node = Node(item)\n node.setNext(self.head)\n self.head = node\n\n def size(self):\n current = self.head\n count = 0\n while current != None:\n count = count + 1\n current = current.getNext()\n return count\n\n def printList(self):\n current = self.head\n while current != None:\n if current.getData() is not None:\n print(current.getData())\n\n current = current.getNext()\n\n def search(self, item):\n current = self.head\n found = False\n while current != None and not found:\n if current.getData() == item:\n found = True\n else:\n current = current.getNext()\n\n return found\n\n def head(self):\n return self.head()\n\n def remove(self, item):\n try:\n current = self.head\n previous = None\n found = False\n while current != None and not found:\n if current.getData() == item:\n found = True\n else:\n previous = current\n current = current.getNext()\n\n if previous == None:\n self.head = current.getNext()\n else:\n previous.setNext(current.getNext())\n except:\n return -1\n\n\n# El siguiente es un ejemplo de las funciones, usos y definiciones disponibles para Colas(LIFO y FIFO), Listas circulares\n# El proposito de este material es de consulta, a continuación las definiciones disponibles:\n# Colas sincronizadas (FIFO y LIFO): https://docs.python.org/3/library/queue.html?highlight=queue#module-queue\n# Listas circulares: https://docs.python.org/3/library/collections.html?highlight=deque#collections.deque\n\n# FIFO(First in First out) Queue\ndef queueSample(orden):\n print('========================================= EJEMPLO DE COLAS FIFO ========================================= ')\n fifoQueue = queue.Queue()\n for i in orden:\n fifoQueue.put(i)\n print('\\nEl tamaño de la Cola FIFO es: ', fifoQueue.qsize())\n print('\\nEl contenido de la cola FIFO es: ')\n while not fifoQueue.empty():\n print(fifoQueue.get())\n print(\n '========================================= FIN EJEMPLO DE COLAS FIFO ========================================= ')\n\n\n# LIFO(Last in Last out) Queue\ndef LIFOQueueSample(orden):\n print('========================================= EJEMPLO DE COLAS LIFO ========================================= ')\n lifoQueue = queue.LifoQueue()\n for i in orden:\n lifoQueue.put(i)\n print('\\nEl tamaño de la Cola LIFO es: ', lifoQueue.qsize())\n print('\\nEl contenido de la cola LIFO es: ')\n while not lifoQueue.empty():\n print(lifoQueue.get())\n print(\n '========================================= FIN EJEMPLO DE COLAS LIFO ========================================= ')\n\n\ndef dequeSample(orden):\n print(\n '========================================= EJEMPLO DE LISTAS CIRCULARES =========================================')\n deque = collections.deque()\n for i in orden:\n deque.append(i) # añadir elementos a la derecha\n\n print('\\nEl tamaño de la lista circular es: ', len(deque))\n print('\\nEl contenido de la lista circular es: ')\n print(deque)\n # rotar la lista 2 posiciones a la derecha\n deque.rotate(2)\n print(deque)\n # rotar la lista 5 posiciones a la izquierda (Desde la ultima instruccion)\n deque.rotate(-5)\n print(deque)\n\n deque.appendleft('kalash kalash') # añadir elementos a la izquierda\n print(deque)\n\n # Extender la lista\n deque.extend(['ex1', 'ex2', 'ex3'])\n print(deque)\n\n # Reversar la lista\n reverdesDeque = collections.deque(reversed(deque))\n print(reverdesDeque)\n\n print('\\nLos elementos de la lista son:')\n for elem in deque: # recorrer los elementos de la lista\n print(elem)\n\n print('\\nElementos indexados :')\n # Elementos indexados por derecha\n print(deque[3])\n\n # Elementos indexados por izquierda\n print(deque[-6])\n\n print('\\nLos elementos de la lista son:')\n # Otra manera de recorrer eliminando\n while True:\n try:\n print(deque.pop())\n except IndexError:\n break\n\n print('\\nLos elementos de la lista son:')\n # Otra manera de recorrer eliminando por izquierda\n while True:\n try:\n print(reverdesDeque.pop())\n except IndexError:\n break\n print(\n '========================================= FIN EJEMPLO DE LISTAS CIRCULARES ========================================= ')\n\n\ndef linkedListSample(orden):\n print(\n '========================================= EJEMPLO DE LISTAS ENLAZADAS ========================================= ')\n\n list = LinkedList()\n print('La lista es Vacia ? :', list.isEmpty())\n for i in orden:\n list.add(i)\n print('\\nEl tamaño de la lista enlazada es: ', list.size())\n print('La lista es Vacia ? :', list.isEmpty())\n print('Buscando un elemento en la Lista :', list.search('Spaguetti'))\n print('Buscando un elemento en la Lista que no existe :', list.search('Spaguetti232323'))\n print('\\nLos elementos de la lista son:')\n list.printList()\n print('\\nEliminación correcta:')\n print(list.remove('Pizza')) # Eliminando elementos de la lista\n print('\\nEliminación errada:')\n print(list.remove('pizza')) # Eliminando elementos de la lista que no esta\n print('\\nLos elementos de la lista son:')\n list.printList()\n print(\n '========================================= FIN EJEMPLO DE LISTAS ENLAZADAS ========================================= ')\n\n\ndef main():\n pedido = []\n\n pedido.append('Pizza')\n pedido.append('Spaguetti')\n pedido.append('Carne en salsa')\n pedido.append('Hamburguesa')\n pedido.append('Pollo a la Naranja')\n pedido.append('Huevos fritos con tocineta.')\n pedido.append('Shawarma')\n pedido.append('Carpaccio de lomito')\n pedido.append('Sopa de pollo')\n pedido.append('Causa limeña.')\n pedido.append('Lomo saltado')\n\n queueSample(pedido[:])\n LIFOQueueSample(pedido[:])\n dequeSample(pedido[:])\n linkedListSample(pedido[:])\n\n\nmain()\n" }, { "alpha_fraction": 0.6067307591438293, "alphanum_fraction": 0.6105769276618958, "avg_line_length": 31.5, "blob_id": "8f5c975838dc27f47ca1e9facb42a1f778170b63", "content_id": "03b17297252a6c43afd0bb5c65820a90c25c14eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1040, "license_type": "no_license", "max_line_length": 105, "num_lines": 32, "path": "/Arenas/Arena 2/Botin.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\nimport math\n\n\ndef max_value(valores, valorActual, pesos, pesoActual, pesoMax, indiceActual):\n pesoActual += pesos[indiceActual]\n valorActual += valores[indiceActual]\n if pesoActual > pesoMax:\n return 0\n maxValor = -math.inf\n for i in range(indiceActual + 1, len(pesos)):\n valorResultado = max(valorActual, max_value(valores, valorActual, pesos, pesoActual, pesoMax, i))\n if valorResultado > maxValor:\n maxValor = valorResultado\n return maxValor\n\n\ndef main():\n casos = int(stdin.readline().strip())\n for i in range(casos):\n n, w = [int(x) for x in stdin.readline().strip().split(\" \")]\n valores = [int(x) for x in stdin.readline().strip().split(\" \")]\n pesos = [int(x) for x in stdin.readline().strip().split(\" \")]\n max = -math.inf\n for j in range(len(pesos)):\n valorResultado = max_value(valores, 0, pesos, 0, w, j)\n if valorResultado > max:\n max = valorResultado\n print(max)\n\n\nmain()\n" }, { "alpha_fraction": 0.5086705088615417, "alphanum_fraction": 0.5202311873435974, "avg_line_length": 10.533333778381348, "blob_id": "3f627f89dec278afc13a20dae86126927c2ec536", "content_id": "460d580a9f9f71dc325ca438e7ae3e8ac17f395e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 37, "num_lines": 15, "path": "/Lab S03/1-Sumatoria.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n\ndef sum(n):\n if n == 0:\n return n\n return n + sum(n - 1)\n\n\ndef main():\n n = int(stdin.readline().strip())\n print(sum(n))\n\n\nmain()\n" }, { "alpha_fraction": 0.4842931926250458, "alphanum_fraction": 0.49738219380378723, "avg_line_length": 18, "blob_id": "f36af90802fd1b156eb586b2944063c0696ff703", "content_id": "2a56837086814ae7b8c18ef50f3afaa3e1459598", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 382, "license_type": "no_license", "max_line_length": 40, "num_lines": 20, "path": "/Taller DyQ/Punto5.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "\n\ndef suma(num):\n acum = 0\n for i in range(len(num)):\n acum += int(num[i])\n return acum\n\n\ndef main():\n x = int(input('Numero X: '))\n num_actual = 1\n num_bits_acum = 0\n while True:\n bits = suma(bin(num_actual)[2:])\n num_bits_acum += bits\n if num_bits_acum >= x:\n break\n num_actual += 1\n print(num_actual)\n\nmain()\n" }, { "alpha_fraction": 0.3801916837692261, "alphanum_fraction": 0.42705005407333374, "avg_line_length": 28.34375, "blob_id": "2c530fe9bf51830185c83238089d9f3a828f473e", "content_id": "1dd304ecb39f21ee035d5ce4b6e1dcb5362ee191", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 939, "license_type": "no_license", "max_line_length": 67, "num_lines": 32, "path": "/Arenas/Arena 3/vecinos.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n\ndef main():\n cases = int(stdin.readline().strip())\n for case in range(cases):\n nHouses = int(stdin.readline().strip())\n m1 = [0 for x in range(nHouses - 1)]\n m2 = [0 for x in range(nHouses - 1)]\n values = [int(x) for x in stdin.readline().strip().split()]\n\n for i in range(len(values) - 1):\n if i == 0:\n m1[0] = values[0]\n elif i == 1:\n m1[1] = max(m1[0], values[1])\n else:\n m1[i] = max(values[i] + m1[i - 2], m1[i - 1])\n\n for i in range(1, len(values)):\n if i == 1:\n m2[0] = values[1]\n elif i == 2:\n m2[1] = max(m2[0], values[2])\n else:\n m2[i - 1] = max(values[i] + m2[i - 3], m2[i - 2])\n answ = max(m1[nHouses - 2], m2[nHouses - 2])\n\n print('Caso #' + str(case + 1) + ': ' + str(answ))\n\n\nmain()\n" }, { "alpha_fraction": 0.44993969798088074, "alphanum_fraction": 0.4909529685974121, "avg_line_length": 18.279069900512695, "blob_id": "e883f1bdd31376e65a88749d0942dd32d588a28b", "content_id": "fc6f2677091753887ce1657eb09f22779cf26b12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 829, "license_type": "no_license", "max_line_length": 66, "num_lines": 43, "path": "/Arenas/Arena 2/3n+1.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\nimport math\n\ndef tn1(n):\n if n == 1:\n return 1\n elif n%2 == 0:\n return 1 + tn1(n//2)\n return 1 + tn1(3 * n + 1)\n\n\ndef memotn1(n, M):\n if n in M:\n return M[n]\n M[n] = tn1p(n, M)\n return M[n]\n\n\ndef tn1p(n, M):\n if n == 1:\n return 1\n elif n % 2 == 0:\n return 1 + memotn1(n//2, M)\n return 1 + memotn1(3 * n + 1, M)\n\n\ndef r3n1(a, b, M):\n maxLocal = -math.inf\n for i in range(a, b + 1):\n maxLocal = max(maxLocal, memotn1(i, M))\n return maxLocal\n\n\ndef main():\n entrada = [x for x in stdin.readline().strip().split(\" \")]\n M = {}\n while entrada != [\"\"]:\n a, b = int(entrada[0]), int(entrada[1])\n print(a, b, r3n1(min(a, b), max(a, b), M))\n entrada = [x for x in stdin.readline().strip().split(\" \")]\n\n\nmain()\n" }, { "alpha_fraction": 0.5462962985038757, "alphanum_fraction": 0.5648148059844971, "avg_line_length": 13.399999618530273, "blob_id": "bbc27d3f1b9e95e34eb0f25bd9eaded1391680f2", "content_id": "8c98c2e1c0f87aab57e659e6f6c4f96997226141", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 216, "license_type": "no_license", "max_line_length": 46, "num_lines": 15, "path": "/Lab S03/2-Fibonacci.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n\ndef fibonacci(n):\n if n == 0 or n == 1:\n return n\n return fibonacci(n - 1) + fibonacci(n - 2)\n\n\ndef main():\n n = int(stdin.readline().strip())\n print(fibonacci(n))\n\n\nmain()\n" }, { "alpha_fraction": 0.46010637283325195, "alphanum_fraction": 0.48404255509376526, "avg_line_length": 18.02531623840332, "blob_id": "907dbc582489e36231488b4ca78259791b9348ee", "content_id": "01e8c07eee0296e97575e1aa53c0541d852b1b11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1504, "license_type": "no_license", "max_line_length": 46, "num_lines": 79, "path": "/Arenas/Arena 4/network.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\nsets = {}\nadj = {}\nconsec = 0\n\ndef makeSet(node):\n global sets, adj, consec\n\n sets[consec] = {node}\n adj[node] = consec\n consec += 1\n\ndef findset(node):\n global sets, adj, consec\n\n return sets[adj[node]]\n\ndef joinSets(s1, s2):\n global sets, adj, consec\n\n newSet = s1.union(s2)\n sets[consec] = newSet\n for elt in s1:\n adj[elt] = consec\n for elt in s2:\n adj[elt] = consec\n consec += 1\n\ndef join(el1, el2):\n global sets, adj, consec\n\n s1 = findset(el1)\n s2 = findset(el2)\n if s1 != s2:\n joinSets(s1, s2)\n\ndef sameSet(el1, el2):\n global sets, adj, consec\n\n if sets[adj[el1]] == sets[adj[el2]]:\n return True\n return False\n\n\ndef main():\n global sets, adj, consec\n\n ncases = int(stdin.readline().strip())\n void = stdin.readline()\n for c in range(ncases):\n sets = {}\n adj = {}\n consec = 0\n\n aff = 0\n neg = 0\n nNodes = int(stdin.readline().strip())\n\n for i in range(nNodes):\n makeSet(str(i + 1))\n\n inp = stdin.readline().strip()\n while inp != '':\n comm, n1, n2 = inp.split()\n if comm == 'c':\n join(n1, n2)\n else:\n if sameSet(n1, n2):\n aff += 1\n else:\n neg += 1\n inp = stdin.readline().strip()\n print(str(aff) + ',' + str(neg))\n if c != ncases - 1:\n print()\n\n\nmain()\n\n" }, { "alpha_fraction": 0.5623835921287537, "alphanum_fraction": 0.5679702162742615, "avg_line_length": 23.43181800842285, "blob_id": "593a29912808636c973801b3898c9f87481bbd61", "content_id": "70a974dce516e6af75ca9fb8e1bcaf100f885e6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1079, "license_type": "no_license", "max_line_length": 70, "num_lines": 44, "path": "/Arenas/Arena 3/queue2stacks.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n\n#clase de una cola realizada con una lista\nclass Queue:\n #Inicialización de la clase\n def __init__(self):\n self.queue = []\n\n #Función que se encarga de agregar un elemento a la cola\n #Param: -value: un elemento para agregar a la cola\n def put(self, value):\n self.queue.append(value)\n\n # Función que se encarga de quitarle un elemento a la cola\n def pop(self):\n del(self.queue[0])\n\n # Función que se encarga de retornar el primer elemento de la cola\n # Return: El primer elemento de la cola\n def peek(self):\n return self.queue[0]\n\n\n# Función principal\ndef main():\n cola = Queue()\n queries = int(stdin.readline().strip())\n tipo = None\n x = None\n for i in range(queries):\n query = stdin.readline().strip()\n if query[0] == '1':\n tipo, x = [int(x) for x in query.split()]\n cola.put(x)\n else:\n tipo = int(query[0])\n if tipo == 2:\n cola.pop()\n else:\n print(cola.peek())\n\n\nmain()" }, { "alpha_fraction": 0.4897502660751343, "alphanum_fraction": 0.5128587484359741, "avg_line_length": 25.574256896972656, "blob_id": "4844edddaf632152ff08da9d7f32eaa177411306", "content_id": "042b463165d0f493de20d4e7bb022a97f64608b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2683, "license_type": "no_license", "max_line_length": 63, "num_lines": 101, "path": "/Arenas/Arena 4/sending.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "#sending email\n\nfrom sys import stdin\nimport math\nimport heapq\n\n\nclass Graph:\n def __init__(self):\n self.nodes = {} # {name: (d, phi)}\n self.edges = [] # [(start, end, weight)]\n self.neighbors = {} # {startName: [edgesIndex]}\n self.visited = {}\n\n def createGraph(self, V, E):\n for node in V:\n self.nodes[node] = (math.inf, None)\n for edge in E:\n self.edges.append((edge[0], edge[1], int(edge[2])))\n if edge[0] not in self.neighbors:\n self.neighbors[edge[0]] = []\n self.neighbors[edge[0]].append(len(self.edges) - 1)\n\n def clearGraph(self):\n self.nodes.clear()\n self.edges.clear()\n self.neighbors.clear()\n\n def getNodeD(self, name):\n return self.nodes[name][0]\n\n def initializeSingleSource(self, source):\n for node in self.nodes:\n self.nodes[node] = (math.inf, None)\n self.visited[node] = False\n self.nodes[source] = (0, None)\n\n def relax(self, edge):\n u = self.nodes[edge[0]]\n v = self.nodes[edge[1]]\n w = edge[2]\n if v[0] > u[0] + w:\n self.nodes[edge[1]] = (u[0] + w, edge[0])\n return True\n return False\n\n def dijkstra(self, source):\n self.initializeSingleSource(source)\n q = []\n heapq.heappush(q, (0, source))\n while q:\n u = heapq.heappop(q)\n self.visited[u[1]] = True\n adj = self.getNeighbors(u[1])\n for edge in adj:\n result = self.relax(edge)\n if result:#not self.visited[edge[1]]:\n heapq.heappush(q, (edge[2], edge[1]))\n\n def getNeighbors(self, node):\n neigh = []\n if node in self.neighbors:\n for edge in self.neighbors[node]:\n neigh.append(self.edges[edge])\n return neigh\n\nimport time\ndef main():\n cases = int(stdin.readline().strip())\n ti = time.time()\n g = Graph()\n for c in range(cases):\n g.clearGraph()\n n, m, s, t = stdin.readline().strip().split()\n nodes = [str(i) for i in range(int(n))]\n edges = []\n for conn in range(int(m)):\n inp = stdin.readline().strip().split()\n edges.append((inp[0], inp[1], int(inp[2])))\n edges.append((inp[1], inp[0], int(inp[2])))\n\n g.createGraph(nodes, edges)\n g.dijkstra(s)\n ans = g.getNodeD(t)\n pans = str(ans) if ans != math.inf else \"unreachable\"\n print(\"Case #\" + str(c + 1) + \": \" + pans)\n print(\"total time: \", time.time() - ti)\n\n\nmain()\n\n'''\n3\n2 1 0 1\n0 1 100\n3 3 2 0\n0 1 100\n0 2 200\n1 2 50\n2 0 0 1\n'''" }, { "alpha_fraction": 0.46031278371810913, "alphanum_fraction": 0.4688698649406433, "avg_line_length": 27.016529083251953, "blob_id": "4550c925f1034e647675693a36360dca7317f1ea", "content_id": "1a90a84384bfb194053490d4b04d4028cb3a75c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3389, "license_type": "no_license", "max_line_length": 140, "num_lines": 121, "path": "/Lab S09 - Grafos/bfs.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "import math\nimport queue\nfrom sys import stdin\n\nclass Grafo:\n def __init__(self, v, e):\n self.v = []\n self.e = {}\n #Mapear relaciones desde los arcos\n for arco in e:\n self.addEdge(arco)\n\n def initializeEdge(self, value):\n return {'value': value, 'color': None, 'dist': None, 'phi': None }\n\n \n def addEdge(self, e):\n nodo1,nodo2 = e[0],e[1]\n if nodo1 in self.e.keys():\n self.e[nodo1].append(nodo2)\n else:\n self.e[nodo1] = [nodo2]\n\n existsN1 = False\n existsN2 = False\n\n for vertex in self.v:\n existsN1 = existsN1 or (vertex['value'] == nodo1)\n existsN2 = existsN2 or (vertex['value'] == nodo2)\n\n if not existsN1:\n self.v.append(self.initializeEdge(nodo1))\n \n if not existsN2:\n self.v.append(self.initializeEdge(nodo2))\n \n\n def getNeighbors(self, value):\n neighbors = self.e[value] if value in self.e.keys() else []\n retArray = []\n for e in neighbors:\n #Buscar los atributos de ese vertice\n for u in self.v:\n if u['value'] == e:\n retArray.append(u)\n\n return retArray\n\n def BFS(self, s):\n processQueue = queue.Queue()\n for u in self.v :\n value = u['value']\n if value != s:\n u['color'] = 'WHITE'\n u['dist'] = math.inf\n u['phi'] = None\n else:\n u['color'] = 'GRAY'\n u['dist'] = 0\n u['phi'] = None\n processQueue.put(u)\n\n while not processQueue.empty():\n u = processQueue.get()\n neigh = self.getNeighbors(u['value'])\n\n for vec in neigh:\n if vec['color'] == 'WHITE':\n vec['color'] = 'GRAY'\n vec['dist'] = u['dist'] + 1\n vec['phi'] = u\n processQueue.put(vec)\n u['color']='Black'\n return self.v\n\n def DFS(self):\n for u in self.v:\n u['color'] = 'WHITE'\n u['phi'] = None\n time = 0\n for u in self.v:\n if u['color'] == 'WHITE':\n self.DFS_visit(u, time)\n return self.v\n\n def DFS_visit(self, u, time):\n u['dist'] = time\n time += 1\n u['color'] = 'GRAY'\n neigh = self.getNeighbors(u['value'])\n\n for vec in neigh:\n if vec['color'] == 'WHITE':\n vec['phi'] = u\n self.DFS_visit(vec, time)\n u['color'] = 'BLACK'\n\ndef main():\n vertex = stdin.readline().strip().split()\n arcsSize = int(stdin.readline().strip())\n arcs = []\n for i in range(arcsSize):\n inp = stdin.readline().strip().split()\n arcs.append((inp[0], inp[1]))\n\n # grafo = Grafo(vertex, arcs)\n # s = stdin.readline().strip()\n # vertex = grafo.BFS(s)\n\n grafo = Grafo(vertex, arcs)\n vertex = grafo.DFS()\n\n for u in vertex :\n antecesor = u['phi']\n if antecesor is None:\n print('Nodo %s tiene distancia %s desde %s ' % (u['value'], u['dist'], vertex[0]['value']) )\n else:\n print('Nodo %s tiene distancia %s desde %s con antecesor %s' % (u['value'], u['dist'], vertex[0]['value'], u['phi']['value'] ) )\n\n\nmain()" }, { "alpha_fraction": 0.514132559299469, "alphanum_fraction": 0.5146198868751526, "avg_line_length": 22.31818199157715, "blob_id": "57f9cf5997fa862fbbdfc4ed769c8221eac97155", "content_id": "b3d0460b2f7841d7b9c3bb59d09b2ea016878247", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2052, "license_type": "no_license", "max_line_length": 69, "num_lines": 88, "path": "/Arenas/Parcial 2/bicolor.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\nclass Node:\n def __init__(self, name):\n self.name = name\n self.color = None\n\n def setColor(self, color):\n self.color = color\n\n def getColor(self):\n return self.color\n\n def colored(self):\n if self.color is None:\n return False\n return True\n\n\nclass Graph:\n def __init__(self):\n self.nodes = {}\n self.edges = {}\n self.isBicolor = True\n\n def addNode(self, name):\n self.nodes[name] = Node(name)\n self.edges[name] = []\n\n def addEdge(self, a, b):\n self.edges[a].append(b)\n\n def getGraph(self):\n return self.edges\n\n def getOtherColor(self, color):\n if color == 'red':\n return 'blue'\n else:\n return 'red'\n\n def bicolor(self):\n self.DFS()\n return self.isBicolor\n\n def DFS(self):\n for elt in self.nodes.keys():\n if not self.nodes[elt].colored():\n self.DFSVisit(elt, 'red')\n\n def DFSVisit(self, elt, color):\n if not self.nodes[elt].colored():\n self.nodes[elt].setColor(color)\n else:\n color = self.nodes[elt].getColor()\n\n for hijo in self.edges[elt]:\n if self.nodes[hijo].colored():\n if self.nodes[hijo].getColor() == color:\n self.isBicolor = False\n else:\n self.DFSVisit(hijo, self.getOtherColor(color))\n\n\ndef main():\n nodes = int(stdin.readline().strip())\n while nodes != 0:\n graph = Graph()\n for i in range(nodes):\n graph.addNode(i)\n\n edges = int(stdin.readline().strip())\n for i in range(edges):\n a, b = [int(x) for x in stdin.readline().strip().split()]\n graph.addEdge(a, b)\n graph.addEdge(b, a)\n\n ans = graph.bicolor()\n #print(graph.getGraph())\n if ans == True:\n print('BICOLORABLE.')\n else:\n print('NOT BICOLORABLE.')\n\n nodes = int(stdin.readline().strip())\n\n\nmain()\n" }, { "alpha_fraction": 0.49214091897010803, "alphanum_fraction": 0.5252032279968262, "avg_line_length": 19.065217971801758, "blob_id": "db182d520398196c83886c5fd89fcd23b9617252", "content_id": "d500120eacd5b86ca8f8842a04bc07e30a1f6547", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1845, "license_type": "no_license", "max_line_length": 65, "num_lines": 92, "path": "/Arenas/Arena 4/amigos.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n\n# Clase DisjointSet\nclass DisjointSet:\n def __init__(self):\n self.setList = {}\n self.pos = {}\n self.nextKey = 1\n\n def getKey(self):\n key = self.nextKey\n self.nextKey += 1\n return key\n\n def makeSet(self, rep):\n key = self.getKey()\n self.setList[key] = ({rep})\n self.pos[rep] = key\n\n def findSet(self, rep):\n if rep in self.pos:\n return self.setList[self.pos[rep]]\n return set()\n\n def union(self, st1, st2, r1, r2):\n key = self.getKey()\n\n del self.setList[self.pos[r1]]\n del self.setList[self.pos[r2]]\n\n newSet = st1.union(st2)\n self.setList[key] = newSet\n\n for elt in newSet:\n self.pos[elt] = key\n\n def getSetList(self):\n return self.setList\n\n def joinSets(self, r1, r2):\n set1 = self.findSet(r1)\n set2 = self.findSet(r2)\n if set1 != set2:\n self.union(set1, set2, r1, r2)\n\n def findConnectedRegions(self, graph):\n #Make Sets\n for v in graph['V']:\n self.makeSet(v)\n #For each relationship, Join Sets\n for arc in graph['E']:\n self.joinSets(arc[0], arc[1])\n return self.getSetList()\n\n\ndef main():\n cases = int(stdin.readline().strip())\n for c in range(cases):\n graph = {'V': [], 'E': []}\n n, m = map(int, stdin.readline().strip().split())\n graph['V'] = [str(i + 1) for i in range(n)]\n ds = DisjointSet()\n for couple in range(m):\n a, b = stdin.readline().strip().split()\n graph['E'].append((a, b))\n finalSets = ds.findConnectedRegions(graph)\n print(max([len(finalSets[i]) for i in finalSets.keys()]))\n\n\nmain()\n\n'''\n2\n3 2\n1 2\n2 3\n10 12\n1 2\n3 1\n3 4\n5 4\n3 5\n4 6\n5 2\n2 1\n7 1\n1 2\n9 10\n8 9\n\n'''" }, { "alpha_fraction": 0.5254988670349121, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 17.79166603088379, "blob_id": "673fda178d1fc1c25174aa894eba65aa5d744c06", "content_id": "1b2d2f0afa60578996440e417a009f703b091c09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "no_license", "max_line_length": 53, "num_lines": 24, "path": "/Arenas/Arena 2/uqs.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\nimport sys\n\n\ndef uqs(seq):\n if len(seq) <= 1:\n return 0\n min_pos = seq.index(min(seq))\n del(seq[min_pos])\n return min_pos + uqs(seq)\n\n\ndef main():\n sys.setrecursionlimit(100000)\n n = int(stdin.readline().strip())\n while n != 0:\n seq = []\n for i in range(n):\n seq.append(int(stdin.readline().strip()))\n print(uqs(seq))\n n = int(stdin.readline().strip())\n\n\nmain()\n" }, { "alpha_fraction": 0.5466764569282532, "alphanum_fraction": 0.5554741024971008, "avg_line_length": 26.648649215698242, "blob_id": "c6857c76c3d916d1a17981236707ddc4362cd84a", "content_id": "751e3caef49ae7cf5d842d5e5574fa185972fcd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4092, "license_type": "no_license", "max_line_length": 100, "num_lines": 148, "path": "/Arenas/Arena 4/cookies.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\nclass PriorityQueue(object):\n\n def __init__(self):\n\n # List of items, flattened binary heap. The first element is not used.\n # Each node is a tuple of (value, priority, insert_counter)\n self.nodes = [None] # first element is not used\n\n # Current state of the insert counter\n self.insert_counter = 0 # tie breaker, keeps the insertion order\n\n # Comparison function between two nodes\n # Higher priority wins\n # On equal priority: Lower insert counter wins\n def _is_higher_than(self, a, b):\n return b[1] > a[1] or (a[1] == b[1] and a[2] > b[2])\n\n # Move a node up until the parent is bigger\n def _heapify(self, new_node_index):\n while 1 < new_node_index:\n new_node = self.nodes[new_node_index]\n parent_index = new_node_index // 2\n parent_node = self.nodes[parent_index]\n\n # Parent too big?\n if self._is_higher_than(parent_node, new_node):\n break\n\n # Swap with parent\n tmp_node = parent_node\n self.nodes[parent_index] = new_node\n self.nodes[new_node_index] = tmp_node\n\n # Continue further up\n new_node_index = parent_index\n\n # Add a new node with a given priority\n def add(self, value, priority):\n new_node_index = len(self.nodes)\n self.insert_counter += 1\n self.nodes.append((value, priority, self.insert_counter))\n\n # Move the new node up in the hierarchy\n self._heapify(new_node_index)\n\n # Return the top element\n def peek(self):\n if len(self.nodes) == 1:\n return None\n else:\n return self.nodes[1][0]\n\n # Remove the top element and return it\n def pop(self):\n\n if len(self.nodes) == 1:\n raise LookupError(\"Heap is empty\")\n\n result = self.nodes[1][0]\n\n # Move empty space down\n empty_space_index = 1\n while empty_space_index * 2 < len(self.nodes):\n\n left_child_index = empty_space_index * 2\n right_child_index = empty_space_index * 2 + 1\n\n # Left child wins\n if (\n len(self.nodes) <= right_child_index\n or self._is_higher_than(self.nodes[left_child_index], self.nodes[right_child_index])\n ):\n self.nodes[empty_space_index] = self.nodes[left_child_index]\n empty_space_index = left_child_index\n\n # Right child wins\n else:\n self.nodes[empty_space_index] = self.nodes[right_child_index]\n empty_space_index = right_child_index\n\n # Swap empty space with the last element and heapify\n last_node_index = len(self.nodes) - 1\n self.nodes[empty_space_index] = self.nodes[last_node_index]\n self._heapify(empty_space_index)\n\n # Throw out the last element\n self.nodes.pop()\n\n return result\n\n def getSize(self):\n return len(self.nodes) - 1\n\n\n\ndef sweetness(a, k):\n heap = PriorityQueue()\n for i in a:\n heap.add(i, i)\n steps = 0\n smallerCookie = heap.pop()\n while smallerCookie < k:\n if heap.getSize() == 0:\n return -1\n\n secondSmaller = heap.pop()\n newCookie = smallerCookie + (2 * secondSmaller)\n heap.add(newCookie, newCookie)\n steps += 1\n\n smallerCookie = heap.pop()\n\n return steps\n\n# def sweetness(a, k):\n# steps = 0\n# a = sorted(a)\n# smallerCookie = a[0]\n# while smallerCookie < k:\n# if len(a) == 0:\n# return -1\n#\n# secondSmaller = a[0]\n# newCookie = smallerCookie + (2 * secondSmaller)\n# a.append(newCookie)\n# steps += 1\n#\n# a = sorted(a)\n# smallerCookie = a[0]\n#\n# return steps\n\n\n\ndef main():\n inp = stdin.readline().strip()\n while inp != '':\n n, k = [int(x) for x in inp.split()]\n a = [int(x) for x in stdin.readline().strip().split()]\n\n print(sweetness(a, k))\n\n inp = stdin.readline().strip()\n\n\nmain()\n" }, { "alpha_fraction": 0.6321998834609985, "alphanum_fraction": 0.6412213444709778, "avg_line_length": 29.02083396911621, "blob_id": "655e7f0c2e2268694941c3270d3801035ec4395a", "content_id": "2274c590e4fd19c447ea516f96ef79d9250d4dfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1441, "license_type": "no_license", "max_line_length": 122, "num_lines": 48, "path": "/Arenas/Arena 2/powerSum.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\nimport math\n\nOBJETIVO = math.inf\nPOTENCIA_OBJVO = math.inf\nRAIZ_OBJVO = -1\n\n\n# def powerSum(suma, valorASumar):\n# if suma == OBJETIVO:\n# return 1\n# elif suma > OBJETIVO or valorASumar > RAIZ_OBJVO:\n# return 0\n# return powerSum(suma + valorASumar ** POTENCIA_OBJVO, valorASumar + 1) + powerSum(suma, valorASumar + 1)\n\n\ndef memoPowerSum(suma, valorASumar, M):\n if (suma, valorASumar, OBJETIVO, POTENCIA_OBJVO) in M:\n return M[(suma, valorASumar, OBJETIVO, POTENCIA_OBJVO)]\n M[(suma, valorASumar, OBJETIVO, POTENCIA_OBJVO)] = powerSumP(suma, valorASumar, M)\n return M[(suma, valorASumar, OBJETIVO, POTENCIA_OBJVO)]\n\n\ndef powerSumP(suma, valorASumar, M):\n if suma == OBJETIVO:\n return 1\n elif suma > OBJETIVO or valorASumar > RAIZ_OBJVO:\n return 0\n return memoPowerSum(suma + valorASumar ** POTENCIA_OBJVO, valorASumar + 1, M) + memoPowerSum(suma, valorASumar + 1, M)\n\n\ndef main():\n global OBJETIVO, POTENCIA_OBJVO, RAIZ_OBJVO\n M = {}\n OBJETIVO = stdin.readline().strip()\n while OBJETIVO != '':\n OBJETIVO = int(OBJETIVO)\n POTENCIA_OBJVO = int(stdin.readline().strip())\n RAIZ_OBJVO = int(OBJETIVO**(1 / POTENCIA_OBJVO)) + 1\n casosExitosos = memoPowerSum(0, 1, M)\n # for key, value in M.items():\n # print(key, value)\n print(casosExitosos)\n\n OBJETIVO = stdin.readline().strip()\n\n\nmain()\n" }, { "alpha_fraction": 0.48978644609451294, "alphanum_fraction": 0.5571030378341675, "avg_line_length": 18.94444465637207, "blob_id": "9cd1371e6b4866683176929ab3aace0fba18b640", "content_id": "b0cc075e39db34a4d7f24c8744846753fded9a01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2154, "license_type": "no_license", "max_line_length": 104, "num_lines": 108, "path": "/Taller InsertionSort/InsertionSort.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n\n# Test Cases definition\n\n# Test framework with assertions\n\ndef test():\n # Test cases\n cases = [([], True),\n\n ([1, 2, 3, 4, 5], False),\n\n ([5, 4, 3, 2, 1], True),\n\n ([1, 2, 3, 10, 9, 8, 7, 20, 21, 22, 100, 99, 77, 65, 32], True),\n\n ]\n\n # Expected solutions\n solutions = [[],\n\n [5, 4, 3, 2, 1],\n\n [1, 2, 3, 4, 5],\n\n [1, 2, 3, 7, 8, 9, 10, 20, 21, 22, 32, 65, 77, 99, 100]\n\n ]\n\n # Test executions\n for case in range(len(cases)):\n assert insertionSort(cases[case][0], cases[case][1]) == solutions[case]\n\n return True\n\n\n# Insertion sort algorithm implementation\ndef insertionSort(sequence, ordDirection):\n result = sequence\n\n # If the sequence is ordered, then result is the sequence itself\n for i in range(1, len(result)):\n # Order the sub-sequence A[0..i-1] inserting A[i]\n j, change = i - 1, i\n while j >= 0 and (result[change] < result[j] if (ordDirection) else result[change] > result[j]):\n result[change], result[j] = result[j], result[change]\n change -= 1\n j -= 1\n return result\n\n\ndef input():\n # Read the sequence separated by spaces\n sequence = [int(x) for x in stdin.readline().strip().split(' ')]\n\n if (sequence == ['']):\n return None\n\n # Read the direction of ordering\n direction = stdin.readline().strip()\n direction = True if (direction == 'ASC') else False\n\n # Return the tuple (A[i..N], Direction)\n return sequence, direction\n\n\n# Format the output from a sequence as a space separated string\ndef output(sequence):\n return ' '.join([str(x) for x in sequence])\n\n\n# main Entry\ndef main():\n userInput = input()\n\n while userInput != None:\n solution = insertionSort(userInput[0], userInput[1])\n\n userOutput = output(solution)\n\n print(userOutput)\n\n userInput = input()\n\n\nif (test()):\n main()\n\n\"\"\"Input examples\n\n1 2 3 4 5 6 10 2 3 4 5 6 100 99 98 97 96 95 94 93\n\nASC\n\n1 2 3 4 5\n\nASC\n\n4 3 2 1\n\nDESC\n\n1 2 3 10 9 8 7 20 21 22 100 99 77 65 32\n\nASC\n\n\"\"\"\n" }, { "alpha_fraction": 0.4751359820365906, "alphanum_fraction": 0.4848484992980957, "avg_line_length": 28.94186019897461, "blob_id": "5cb7dfb760f2a1018e430b2d63f65b10e71e0470", "content_id": "71562a812b8c0b32a572829e6775e998726b5a3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2574, "license_type": "no_license", "max_line_length": 77, "num_lines": 86, "path": "/Arenas/Arena 4/dicc.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\nclass HashNode:\n def __init__(self, key, value):\n self.next = None\n self.key = key\n self.value = value\n\n\nclass HashTable:\n def __init__(self):\n self.table = [None for _ in range(101)]\n\n def hash(self, key):\n # Generate hash from key.\n # Time O(N), Space O(1), where N is the length of key.\n hashed = 0\n for i in range(len(key)):\n hashed = (256 * hashed + ord(key[i])) % 101\n return hashed\n\n def add(self, key, value):\n # Add key, value.\n # Time O(1), Space O(1), where N is the num of elements in hashtable.\n bucket = self.hash(key)\n self.table[bucket] = HashNode(key, value)\n\n def find(self, key):\n # Find value from key.\n # Time O(1), Space O(1), where N is the num of elements in hashtable.\n bucket = self.hash(key)\n if not self.table[bucket]:\n return False\n else:\n temp = self.table[bucket]\n while temp:\n if temp.key == key:\n return temp.value\n temp = temp.next\n return False\n\n def delete(self, key):\n # Delete key, value.\n # Time O(1), Space O(1), where N is the num of elements in hashtable.\n bucket = self.hash(key)\n if not self.table[bucket]:\n return False\n else:\n if self.table[bucket].key == key:\n self.table[bucket] = None\n else:\n temp = self.table[bucket]\n while temp:\n if temp.next.key == key:\n temp.next = temp.next.next\n return\n temp = temp.next\n return False\n\n# import time\ndef main():\n # f = open('in.txt', 'r')\n instructions = int(stdin.readline().strip())\n # instructions = int(f.readline().strip())\n # it = time.time()\n ht = HashTable()\n # print('create table:', time.time() - it)\n # it = time.time()\n for i in range(instructions):\n instr = stdin.readline().strip().split()\n # instr = f.readline().strip().split()\n if instr[0] == 'ponga':\n ht.add(instr[1], instr[2])\n elif instr[0] == 'busque':\n res = ht.find(instr[1])\n if res == False:\n # pass\n print(instr[1] + '?')\n else:\n # pass\n print(instr[1] + ' vale ' + res)\n else:\n ht.delete(instr[1])\n # print('procedure:', time.time() - it)\n\n\nmain()" }, { "alpha_fraction": 0.4000000059604645, "alphanum_fraction": 0.4281249940395355, "avg_line_length": 16.77777862548828, "blob_id": "395e99c35851752002c9786dcf067417409c4c50", "content_id": "b2c37f2afdfc4271c0afde7729407dcc4b5ba3ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 320, "license_type": "no_license", "max_line_length": 50, "num_lines": 18, "path": "/Taller DyQ/Punto3.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "\ndef exp(a, n):\n if n == 1:\n return a\n elif n == 2:\n return a*a\n\n if n % 2 == 0:\n return exp(a, n//2) * exp(a, n//2)\n else:\n return exp(a, n // 2) * exp(a, n // 2 + 1)\n\n\ndef main():\n a = int(input('Numero base: '))\n n = int(input('Exponente: '))\n print(exp(a, n))\n\nmain()" }, { "alpha_fraction": 0.471059650182724, "alphanum_fraction": 0.4906500577926636, "avg_line_length": 27.794872283935547, "blob_id": "4f86b12bc367dce7f5609bf93f0f3186e99c83d8", "content_id": "f89c58579e236d30c67c806baf01747f7898861b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1123, "license_type": "no_license", "max_line_length": 115, "num_lines": 39, "path": "/Arenas/Parcial final/Kruskal.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "class Kruskal:\n def __init__(self, nodes, edges):\n self.nodes = nodes\n self.sets = []\n for node in self.nodes:\n self.makeSet(node)\n self.edges = edges\n\n def makeSet(self, node):\n self.sets.append({node})\n\n def findset(self, node):\n for set in self.sets:\n if node in set:\n return set\n\n def joinSets(self, s1, s2):\n print(self.sets)\n newSet = s1.union(s2)\n self.sets.remove(s1)\n self.sets.remove(s2)\n self.sets.append(newSet)\n print(self.sets)\n\n def kruskal(self):\n tempEdges = sorted(self.edges, key=lambda d: d[2])\n print(tempEdges)\n minTree = []\n while len(tempEdges) > 0:\n minEdge = tempEdges.pop(0)\n s1 = self.findset(minEdge[0])\n s2 = self.findset(minEdge[1])\n if s1 != s2:\n minTree.append(minEdge)\n self.joinSets(s1, s2)\n return minTree\n\na = Kruskal(['a', 'b', 'c', 'd', 'e'], [('a', 'b', 3), ('a', 'd', 2), ('d', 'c', 5), ('b', 'c', 1), ('c', 'e', 3)])\nprint(a.kruskal())\n" }, { "alpha_fraction": 0.6015727519989014, "alphanum_fraction": 0.6046308279037476, "avg_line_length": 31.46808433532715, "blob_id": "e24e92dc5bffa0a047410bffa84d11e3a6923217", "content_id": "ef2dd9c2a06ab79a6cadfa495cee040e66bed0ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4605, "license_type": "no_license", "max_line_length": 112, "num_lines": 141, "path": "/Lab S12 - Heap/Control de lectura/heap.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n#Clase que representa un heap en una lsita\nclass Heap:\n\n #Función que inicializa las variables necesarias para la clase\n #Param: -policy: #True: max-heap; False: min-heap\n def __init__(self, policy):\n self.heap = []\n self.heap_size = None\n self.policy = policy\n\n # Función que calcula la posición del padre del elemento i\n # param: i: posición en la lista del elemento\n # Return : posición en la lista del padre del elemento i\n def parent(self, i):\n return i // 2\n\n # Función que calcula la posición del hijo izquierdo del elemento i\n # param: i: posición en la lista del elemento\n # Return : posición en la lista del hijo izquierdo del elemento i\n def left(self, i):\n return 2 * i\n\n # Función que calcula la posición del hijo derecho del elemento i\n # param: i: posición en la lista del elemento\n # Return : posición en la lista del hijo derecho del elemento i\n def right(self, i):\n return 2 * i + 1\n\n # Función que, según la política llama a max_heapify o min_heapify\n # param: i: Elemento para enviar a la función llamada\n def heapify (self, i):\n if self.policy is True:\n self.max_heapify(i)\n else:\n self.min_heapify(i)\n\n # Función que verifica si el elememto i es mayor a sus hijos, y si es menor lo intercambia con el hijo mayor\n # param: i: el elemento a comparar con sus dos hijos\n def max_heapify(self, i):\n l = self.left(i)\n r = self.right(i)\n if l <= self.heap_size and self.heap[l] > self.heap[i]:\n largest = l\n else:\n largest = i\n if r <= self.heap_size and self.heap[r] > self.heap[largest]:\n largest = r\n if largest != i:\n self.heap[i], self.heap[largest] = self.heap[largest], self.heap[i]\n self.heapify(largest)\n\n # Función que verifica si el elememto i es menor a sus hijos, y si es mayor lo intercambia con el hijo menor\n # param: i: el elemento a comparar con sus dos hijos\n def min_heapify(self, i):\n l = self.left(i)\n r = self.right(i)\n if l <= self.heap_size and self.heap[l] < self.heap[i]:\n largest = l\n else:\n largest = i\n if r <= self.heap_size and self.heap[r] < self.heap[largest]:\n largest = r\n if largest != i:\n self.heap[i], self.heap[largest] = self.heap[largest], self.heap[i]\n self.heapify(largest)\n\n # Función que se encarga de aplicar heapify para cada elemento del heap\n # param: A: la lista que se desea convertir en heap\n def build_heap(self, A):\n self.heap = A\n self.heap_size = len(A) - 1\n for i in range(self.heap_size, -1, -1):\n self.heapify(i)\n\n def maximum(self):\n return max(self.heap)\n\n def minimum(self):\n return min(self.heap)\n\n def extract_max(self):\n if self.policy == True:\n if self.heap_size < 1:\n return None\n max = self.heap[0]\n self.delete(0)\n return max\n\n def extract_min(self):\n if self.policy == False:\n if self.heap_size < 1:\n return None\n min = self.heap[0]\n self.delete(0)\n return min\n\n # función que retorna la lista que representa al heap\n def getHeap(self):\n return self.heap\n\n # función que inserta el elemento 'el' a la lista y reconstruye el heap\n # param : el: elemento a insertar\n def insert(self, el):\n self.heap.append(el)\n self.build_heap(self.heap)\n\n # función que borra el elemento ubicado en el indice 'index' y reconstruye el heap\n # param : index: indice del elemento a eliminar\n def delete(self, index):\n self.build_heap(self.heap[:index]+self.heap[index+1:])\n\n # Función que actualiza el valor del elemento en la posición 'index' y reconstruye el heap\n # param: index: indice del elemento a actualizar\n # newVal: valor nuevo del elemento en el indice 'index'\n def update(self, index, newVal):\n self.heap[index] = newVal\n self.build_heap(self.heap)\n\n\n# función encargada de generar una lista a partir de una entrada\n# return: lista de elementos\ndef entrada():\n return [int(x) for x in stdin.readline().strip().split()]\n\n\n# función principal\ndef main():\n lista = entrada()\n\n heap = Heap(True)\n heap.build_heap(lista)\n print(heap.getHeap())\n\n heap = Heap(False)\n heap.build_heap(lista)\n print(heap.getHeap())\n\n\nmain()\n" }, { "alpha_fraction": 0.5304384827613831, "alphanum_fraction": 0.5342699289321899, "avg_line_length": 25.69318199157715, "blob_id": "31caff64755906bb1a77d7d0f026b28a8c6128c5", "content_id": "b9516186e823bdca7e61ad31fb425d7e2db4a4f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2349, "license_type": "no_license", "max_line_length": 86, "num_lines": 88, "path": "/Arenas/Parcial 2/deque1.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n\nclass Node:\n def __init__(self, value, prev):\n self.prev = prev\n self.value = value\n\n def setPrev(self, node):\n self.prev = node\n\n def getPrev(self):\n return self.prev\n\n def getValue(self):\n return self.value\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def addNodeBottom(self, value):\n if self.tail == None:\n node = Node(value, None)\n self.head = node\n self.tail = node\n else:\n node = Node(value, None)\n self.head.setPrev(node)\n self.head = node\n\n def removeNodeTop(self):\n if self.tail == None:\n return None\n val = self.tail.getValue()\n newTail = self.tail.getPrev()\n if newTail is not None:\n self.tail = newTail\n else:\n self.tail = None\n self.head = None\n return val\n\n def isEmpty(self):\n return self.head == None\n\n def oneElement(self):\n return self.head == self.tail\n\n\n# import time\ndef main():\n # startTime = time.time()\n # input = open('input.txt', 'r')\n # output = open('output.txt', 'w')\n n = int(stdin.readline().strip())\n # n = int(input.readline().strip())\n while n != 0:\n deck = LinkedList()\n discarded = []\n for i in range(1, n + 1):\n deck.addNodeBottom(i)\n while not deck.oneElement():\n discarded.append(deck.removeNodeTop())\n savedCard = deck.removeNodeTop()\n deck.addNodeBottom(savedCard)\n discarded.append(deck.removeNodeTop())\n\n print('Discarded cards: ', end='')\n for i in range(len(discarded) - 2):\n print(str(discarded[i]) + ', ', end='')\n print( str(discarded[len(discarded) - 2]))\n print('Remaining card: ' + str(discarded[len(discarded) - 1]))\n\n # output.write('Discarded cards: ')\n # for i in range(len(discarded) - 2):\n # output.write(str(discarded[i]) + ', ')\n # output.write(str(discarded[len(discarded) - 2]) + '\\n')\n # output.write('Remaining card: ' + str(discarded[len(discarded) - 1]) + '\\n')\n\n n = int(stdin.readline().strip())\n # n = int(input.readline().strip())\n # print(time.time() - startTime)\n\n\nmain()\n" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.543987512588501, "avg_line_length": 24.972972869873047, "blob_id": "25770ad2a34b167888f56f6b0c73020c5f9d1380", "content_id": "5eefdfd47eb4f369960115cbb472e31d8457e785", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1923, "license_type": "no_license", "max_line_length": 103, "num_lines": 74, "path": "/Lab S14 - Arboles de expansion/DisjointSests.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "# Daniel Felipe Rincón Muñoz\nfrom sys import stdin\n\n# Clase DisjointSet\nclass DisjointSet:\n def __init__(self):\n self.setList = []\n\n def makeSet(self, rep):\n self.setList.append({rep})\n\n def findSet(self, rep):\n for st in self.setList:\n if rep in st:\n return st\n return set()\n\n def union(self, st1, st2):\n self.setList.remove(st1)\n if( st1 != st2 ):\n self.setList.remove(st2)\n self.setList.append(st1.union(st2))\n\n def getSetList(self):\n return self.setList\n\n def joinSets(self, r1, r2):\n self.union(self.findSet(r1), self.findSet(r2))\n\n def findConnectedRegions(self, graph):\n #Make Sets\n for v in graph['V']:\n self.makeSet(v)\n #For each relationship, Join Sets\n for arc in graph['E']:\n print('Before Join', arc, self.getSetList())\n self.joinSets(arc[0], arc[1])\n print('After Join', arc, self.getSetList())\n\n print(self.getSetList())\n\n# Funcion que se encarga de encontrar el grafo de menor costo en donde todos los nodos estén conectados\ndef MST_Kruskal(graph):\n mst = {'V': graph['V'], 'E': []}\n ds = DisjointSet()\n graph['E'] = sorted(graph['E'], key=lambda arc: (arc[2], arc[0], arc[1]))\n for vertex in graph['V']:\n ds.makeSet(vertex)\n for edge in graph['E']:\n s1, s2 = ds.findSet(edge[0]), ds.findSet(edge[1])\n if s1 != s2:\n mst['E'].append(edge)\n ds.union(s1,s2)\n return mst\n\n#Función principal\ndef main():\n graph = {'V': [], 'E': []}\n\n v = stdin.readline().strip().split()\n graph['V'] = v\n\n n = int(stdin.readline().strip())\n for i in range(n):\n e = stdin.readline().strip().split()\n edge = (e[0], e[1], int(e[2]))\n graph['E'].append(edge)\n mst = MST_Kruskal(graph)\n\n print(mst['V'])\n for e in mst['E']:\n print(e)\n\nmain()" }, { "alpha_fraction": 0.5050535202026367, "alphanum_fraction": 0.512485146522522, "avg_line_length": 22.858156204223633, "blob_id": "eaa0bf22537a17f77692530be83545946d1deaaf", "content_id": "9dcf922be4051fc24c131647cc97cf2f60b00551", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3364, "license_type": "no_license", "max_line_length": 70, "num_lines": 141, "path": "/Lab S07 - Lineales/4 - Double linkedList.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "# Linked List Node definition\nclass Node:\n def __init__(self, initdata):\n self.data = initdata\n self.prev = None\n self.next = None\n\n def getData(self):\n return self.data\n\n def getPrev(self):\n return self.prev\n\n def getNext(self):\n return self.next\n\n def setData(self, newdata):\n self.data = newdata\n\n def setPrev(self, newprev):\n self.prev = newprev\n\n def setNext(self, newnext):\n self.next = newnext\n\n\n# Linked List definition\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def isEmpty(self):\n return self.head == None\n\n def add(self, item):\n node = Node(item)\n node.setNext(self.head)\n if self.head != None:\n self.head.setPrev(node)\n self.head = node\n\n def size(self):\n current = self.head\n count = 0\n while current != None:\n count = count + 1\n current = current.getNext()\n return count\n\n def printList(self):\n current = self.head\n while current != None:\n if current.getPrev() != None:\n print('prev =', current.getPrev().getData(), end=', ')\n else:\n print('prev = ', end=', ')\n if current.getData() is not None:\n print('current =', current.getData(), end=', ')\n if current.getNext() != None:\n print('next =', current.getNext().getData())\n else:\n print('next = ')\n\n current = current.getNext()\n\n def search(self, item):\n current = self.head\n found = False\n while current != None and not found:\n if current.getData() == item:\n found = True\n else:\n current = current.getNext()\n\n return found\n\n def getHead(self):\n return self.head\n\n def getTail(self):\n current = self.head\n next = current.getNext()\n while next != None:\n current = next\n next = current.getNext()\n return current\n\n def remove(self, item):\n try:\n current = self.head\n previous = None\n next = None\n found = False\n while current != None and not found:\n if current.getData() == item:\n found = True\n else:\n previous = current\n current = current.getNext()\n\n if previous == None:\n self.head = current.getNext()\n else:\n previous.setNext(current.getNext())\n current.getNext().setPrev(previous)\n except:\n return -1\n\n\ndef join(lili1 : LinkedList, lili2 : LinkedList):\n tail = lili1.getTail()\n head = lili2.getHead()\n tail.setNext(head)\n head.setPrev(tail)\n return lili1\n\n\ndef main():\n lili = LinkedList()\n lili2 = LinkedList()\n\n #Insertar un nuevo elemento\n elements = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n for i in range(len(elements) - 1, -1, -1):\n lili.add(elements[i])\n lili.printList()\n print()\n\n #Eliminar un elemento\n lili.remove(5)\n lili.printList()\n print()\n\n #Unir dos listas\n for elt in elements:\n lili2.add(elt)\n joinedlili = join(lili, lili2)\n joinedlili.printList()\n\n\nmain()\n" }, { "alpha_fraction": 0.6387434601783752, "alphanum_fraction": 0.6387434601783752, "avg_line_length": 28.384614944458008, "blob_id": "7192b3f768adcafacae9e28d08561db33d7b7a8a", "content_id": "b4b3810ca79580de7d891846c1c5d87b87663283", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1532, "license_type": "no_license", "max_line_length": 105, "num_lines": 52, "path": "/Arenas/Arena 3/pilaocola.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "#Solución del problema pila o cola - Daniel Rincón\nfrom sys import stdin\nimport queue\n\n\n#Función que se encarga de iterar sobre la salida y verificar que los valores de pila y cola correspondan\n#Param: -pila: la pila construida con la entrada\n# -cola: la cola construida con la entrada\n# -salida: la salida que entrega el problema\n#Return: 'pila' si la salida corresponde a una pila, 'cola' si corresponde a una cola, y 'ambas'\ndef analize(pila, cola, salida):\n esPila = True\n esCola = True\n for item in salida:\n if pila.get() != item:\n esPila = False\n if cola.get() != item:\n esCola = False\n if esPila and esCola:\n return 'ambas'\n elif esPila:\n return 'pila'\n elif esCola:\n return 'cola'\n else:\n return 'ninguna'\n\n\n#Función que se encarga de construir una pila y una cola con la entrada\n#Param: -entrada: una lista de enteros que corresponden a la entrada\n#Return: la pila y la cola construidas\ndef buildDataStructures(entrada):\n pila = queue.LifoQueue()\n cola = queue.Queue()\n for item in entrada:\n pila.put(item)\n cola.put(item)\n return pila, cola\n\n\n#Función principal\ndef main():\n n = stdin.readline().strip()\n while n != '':\n entrada = [int(x) for x in stdin.readline().strip().split()]\n salida = [int(x) for x in stdin.readline().strip().split()]\n pila, cola = buildDataStructures(entrada)\n print(analize(pila, cola, salida))\n n = stdin.readline().strip()\n\n\nmain()\n" }, { "alpha_fraction": 0.5200246572494507, "alphanum_fraction": 0.5200246572494507, "avg_line_length": 22.521739959716797, "blob_id": "c112d422496ca86c9bb87ed5c8f06d22ea692dd3", "content_id": "07a5e0c2ec8ced9c7bbe6a5f8d00783d3dd3d3e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1623, "license_type": "no_license", "max_line_length": 52, "num_lines": 69, "path": "/Arenas/Parcial 2/banco.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "from sys import stdin\n\n\nclass Node:\n def __init__(self, value, prev, next):\n self.prev = prev\n self.next = next\n self.value = value\n\n def setNext(self, node):\n self.next = node\n\n def getNext(self):\n return self.next\n\n def setPrev(self, node):\n self.prev = node\n\n def getPrev(self):\n return self.prev\n\n def getValue(self):\n return self.value\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def addNodeBack(self, value):\n if self.head == None:\n node = Node(value, None, None)\n self.head = node\n self.tail = node\n else:\n node = Node(value, self.tail, None)\n self.tail.setNext(node)\n self.tail = node\n\n def removeNodeFront(self):\n if self.head == None:\n return None\n val = self.head.getValue()\n newHead = self.head.getNext()\n if newHead is not None:\n newHead.setPrev(None)\n self.head = newHead\n return val\n\n\ndef main():\n cases = int(stdin.readline().strip())\n for case in range(cases):\n bankQueue = LinkedList()\n instructions = int(stdin.readline().strip())\n for i in range(instructions):\n instruc = stdin.readline().strip()\n if instruc == 'Siguiente':\n next = bankQueue.removeNodeFront()\n if next == None:\n print('No hay fila')\n else:\n print(next)\n else:\n bankQueue.addNodeBack(instruc)\n\n\nmain()\n" }, { "alpha_fraction": 0.5849359035491943, "alphanum_fraction": 0.5865384340286255, "avg_line_length": 29.20967674255371, "blob_id": "e2e7bc8a4f7b41a64d89e04fb7f9385834bacd9f", "content_id": "367580091f3c3283c50aadd85240f90d9172a244", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1882, "license_type": "no_license", "max_line_length": 153, "num_lines": 62, "path": "/Lab S07 - Lineales/2 - parentesis.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "#SOLUCION AL PROBLEMA DE EMPAREJAMIENTO DE PARÉNTESIS - Daniel Rincón\nfrom sys import stdin\nimport queue\n\n\n#Función que se encarga de intentar colocar todos los parentesis de apertura en una pila, e intentar sacarlos cuando su correspondiente de cierre aparece\n#Param: -secuencia: cadena de parentesis\n# -pila:pila que se utilizará para operar\n# -parejas: diccionario que contiene las parejas válidas\n#Return: True si la cadena es válida, False si la cadena está incompleta, el índice en donde falla si la cadena es incorrecta\ndef analizar(secuencia, pila, parejas):\n for i in range(len(secuencia)):\n llave = secuencia[i]\n if llave in parejas:\n if pila.empty():\n return i\n top = pila.get()\n if top != parejas[llave]:\n return i\n else:\n pila.put(llave)\n if pila.empty():\n return True\n return False\n\n\n#Función principal\ndef main():\n secuencia = stdin.readline().strip()\n pila = queue.LifoQueue()\n parejas = {\")\":\"(\",\n \"}\":\"{\",\n \"]\":\"[\"}\n result = analizar(secuencia, pila, parejas)\n if result == True:\n print(True)\n elif result == False:\n print('Incomplete parenthesis (no se cerraron todos los abiertos)')\n else:\n print('Failed at:', result)\n\n\nmain()\n\n\n#función para validar los posibles casos\ndef pruebas():\n #Se usan los 3 casos de prueba de los que se pueden obtener las 3 posibles respuestas de la función\n inputs = ['{}[](){{}}', '{{{{]}))', '{{[[]]}']\n results = [True, 4, False]\n pila = queue.LifoQueue()\n parejas = {\")\": \"(\",\n \"}\": \"{\",\n \"]\": \"[\"}\n for i in range(len(inputs)):\n if analizar(inputs[i], pila, parejas) == results[i]:\n print('Caso correcto')\n else:\n print('Caso erroneo')\n\n\n#pruebas()" }, { "alpha_fraction": 0.4849267899990082, "alphanum_fraction": 0.49784669280052185, "avg_line_length": 26.66666603088379, "blob_id": "2c02af85fea54a59909c76729f30ec11fcea0364", "content_id": "d51a25eba6d11d5b6c7e0e003cc1208c1facf8b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1161, "license_type": "no_license", "max_line_length": 61, "num_lines": 42, "path": "/Arenas/Parcial final/Dijkstra.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "import math\n\nclass Dijkstra:\n def __init__(self, nodes, edges):\n self.nodes = {}\n for node in nodes:\n self.nodes[node] = [math.inf, None]\n self.edges = edges\n\n def relax(self, edge):\n if edge[1].d > edge[0].d + edge[2]:\n edge[1].d = edge[0].d + edge[2]\n edge[1].pi = edge[0]\n return True\n return False\n\n def init(self, source):\n for node in self.nodes:\n node = [math.inf, None]\n self.nodes[source] = [0, None]\n\n def neigbours(self, node):\n neigh = []\n for edge in self.edges:\n if edge[0] == node:\n neigh.append(edge)\n\n return neigh\n\n def dijkstra(self, source):\n self.init(source)\n tempNodes = []\n tempNodes.append((source, self.nodes[source][0]))\n while tempNodes:\n tempNodes = sorted(tempNodes, key=lambda d: d[1])\n minNode = tempNodes.pop(0)[0]\n neigh = self.neigbours(minNode)\n for edge in neigh:\n ans =self.relax(edge)\n if ans:\n tempNodes.append(edge[1])\n return" }, { "alpha_fraction": 0.5248041749000549, "alphanum_fraction": 0.5456919074058533, "avg_line_length": 20.22222137451172, "blob_id": "c21cb7abf3f7175b3460daeea77ba5663eb48e23", "content_id": "77cc85968a6a0822132e91b8006140cc5801a7b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 383, "license_type": "no_license", "max_line_length": 60, "num_lines": 18, "path": "/Taller DyQ/Punto1.py", "repo_name": "danielrincon-m/AYED", "src_encoding": "UTF-8", "text": "\ndef minimo(lista):\n n = len(lista)\n if n == 1:\n return lista[0]\n elif n == 2:\n return min(lista[0], lista[1])\n return min(minimo(lista[0:n//2]), minimo(lista[n//2:n]))\n\n\ndef main():\n n = int(input('Numero de elementos: '))\n lista = []\n for i in range(n):\n lista.append(int(input('Escriba un elemento: ')))\n print(minimo(lista))\n\n\nmain()\n" } ]
49
TomGurimu/pythontest
https://github.com/TomGurimu/pythontest
433b7a15ce969702c8d36b7e1a8340ea66783619
63dfaa4124716d17e502c7d761bef898da54d790
0080bf47a4bf12ecb2bb545d5d2c4b9c86604696
refs/heads/master
2023-03-03T18:48:50.471205
2020-06-21T04:10:57
2020-06-21T04:10:57
273,818,831
0
0
null
2020-06-21T02:17:44
2020-06-21T04:11:06
2021-02-17T15:38:57
Python
[ { "alpha_fraction": 0.4590163826942444, "alphanum_fraction": 0.49180328845977783, "avg_line_length": 14, "blob_id": "9b6ec66fb157483824b431aaeba6eab37b6197e2", "content_id": "6b91396e1cfc0b4abfab7928cdd215ee93c89a39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 61, "license_type": "no_license", "max_line_length": 17, "num_lines": 4, "path": "/test.py", "repo_name": "TomGurimu/pythontest", "src_encoding": "UTF-8", "text": "print(\"test\");\ntt = \"tt\";\nif(tt == \"tt\"):\n\tprint(\"tuka12\");\n\n" } ]
1
EthanZhu90/TALL_Copy
https://github.com/EthanZhu90/TALL_Copy
f92c708d4b1002032f5b0f7ea08186030f06f9e1
cee3a08271abdba05e453c94f7f8f7615f8d6f62
1e586f007eaf0d0af64479734a13d279f24ad5d9
refs/heads/master
2021-03-22T04:28:38.939904
2018-05-23T01:16:18
2018-05-23T01:16:18
123,799,655
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6029725670814514, "alphanum_fraction": 0.6169819235801697, "avg_line_length": 48.33333206176758, "blob_id": "c7dd78ee86d3c795b64c63bb206c6595f0291588", "content_id": "23e5d0d1d5c7622ac821861ff2aadf25b1dd6084", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14062, "license_type": "permissive", "max_line_length": 141, "num_lines": 285, "path": "/ctrl_model_noContext.py", "repo_name": "EthanZhu90/TALL_Copy", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow as tf\n# from tensorflow.python.framework import dtypes\nimport tensorflow.contrib.rnn as rnn\n\nfrom util.cnn import fc_layer as fc\nimport vs_multilayer \nfrom dataset_noContext import TestingDataSet\nfrom dataset_noContext import TrainingDataSet\n\nimport pickle\n\nclass CTRL_Model(object):\n def __init__(self, batch_size, train_csv_path, test_csv_path, test_visual_feature_dir, train_visual_feature_dir,\n word_vector_dir, useLSTM=True):\n \n self.batch_size = batch_size\n self.test_batch_size = 56\n self.vs_lr = 0.005\n self.lambda_regression = 0.01\n self.alpha = 1.0/batch_size\n self.semantic_size = 1024 # the size of visual and semantic comparison size\n self.sentence_embedding_size = 4800\n self.visual_feature_dim = 4096\n\n self.useLSTM = useLSTM\n self.max_words_q = 15 # check later.\n self.rnn_layer = 2\n self.lstm_input_size = 300\n self.lstm_hidden_size = 512\n self.drop_out_rate = 0.2\n\n\n # LSTM model structure\n # encoder: RNN body\n # input_size: Deprecated and unused.\n self.lstm_1 = rnn.LSTMCell(num_units=self.lstm_hidden_size, state_is_tuple=False)\n self.lstm_dropout_1 = rnn.DropoutWrapper(self.lstm_1, output_keep_prob=1 - self.drop_out_rate)\n self.lstm_2 = rnn.LSTMCell(num_units=self.lstm_hidden_size, state_is_tuple=False)\n self.lstm_dropout_2 = rnn.DropoutWrapper(self.lstm_2, output_keep_prob=1 - self.drop_out_rate)\n self.stacked_lstm = rnn.MultiRNNCell([self.lstm_dropout_1, self.lstm_dropout_2], state_is_tuple=False)\n\n # word embedding vector\n self.word2idx, self.idx2word, self.embed_ques_W = self.build_vocabulary(word_vector_dir)\n\n # # state-embedding\n # self.embed_state_W = tf.Variable(\n # tf.random_uniform([2 * self.lstm_hidden_size * self.rnn_layer, self.dim_hidden], -0.08, 0.08),\n # name='embed_state_W')\n # self.embed_state_b = tf.Variable(tf.random_uniform([self.dim_hidden], -0.08, 0.08), name='embed_state_b')\n\n self.train_set = TrainingDataSet(train_visual_feature_dir, train_csv_path, self.batch_size, self.word2idx, useLSTM)\n self.test_set = TestingDataSet(test_visual_feature_dir, test_csv_path, self.test_batch_size, self.word2idx, useLSTM)\n\n '''\n given the word vector dict, return the vocabulary\n '''\n def build_vocabulary(self, word_vector_dir):\n word_vector_dict = pickle.load(open(word_vector_dir, 'rb'))\n idx2word = list()\n word2idx = dict()\n embed = list()\n # the first word 'unk'\n word2idx['unk'] = 0\n idx2word.append('unk')\n embed.append(np.zeros(self.lstm_input_size))\n cnt = 1\n for term in word_vector_dict:\n idx2word.append(term)\n word2idx[term] = cnt\n embed.append(word_vector_dict[term])\n cnt += 1\n embed_tensor = np.vstack(embed).astype(np.float32)\n return word2idx, idx2word, embed_tensor\n\n \n '''\n used in training alignment model, CTRL(aln)\n '''\t\n def fill_feed_dict_train(self):\n image_batch,sentence_batch,offset_batch = self.train_set.next_batch()\n input_feed = {\n self.visual_featmap_ph_train: image_batch,\n self.sentence_ph_train: sentence_batch,\n self.offset_ph: offset_batch\n }\n\n return input_feed\n \n '''\n used in training alignment+regression model, CTRL(reg)\n '''\n def fill_feed_dict_train_reg(self):\n image_batch, sentence_batch, offset_batch, sent_len_batch = self.train_set.next_batch_iou()\n if self.useLSTM:\n input_feed = {\n self.visual_featmap_ph_train: image_batch,\n self.sentence_ph_train: sentence_batch,\n self.offset_ph: offset_batch,\n self.sentence_ph_train_len: sent_len_batch\n }\n else:\n input_feed = {\n self.visual_featmap_ph_train: image_batch,\n self.sentence_ph_train: sentence_batch,\n self.offset_ph: offset_batch\n }\n return input_feed\n\n \n '''\n cross modal processing module\n '''\n def cross_modal_comb(self, visual_feat, sentence_embed, batch_size):\n vv_feature = tf.reshape(tf.tile(visual_feat, [batch_size, 1]),\n [batch_size, batch_size, self.semantic_size])\n ss_feature = tf.reshape(tf.tile(sentence_embed,[1, batch_size]),[batch_size, batch_size, self.semantic_size])\n concat_feature = tf.reshape(tf.concat([vv_feature, ss_feature], 2),[batch_size, batch_size, self.semantic_size+self.semantic_size])\n print(concat_feature.get_shape().as_list())\n mul_feature = tf.multiply(vv_feature, ss_feature)\n add_feature = tf.add(vv_feature, ss_feature)\n \n comb_feature = tf.reshape(tf.concat([mul_feature, add_feature, concat_feature],2),[1, batch_size, batch_size, self.semantic_size*4])\n return comb_feature\n \n '''\n visual semantic inference, including visual semantic alignment and clip location regression\n '''\n def visual_semantic_infer(self, visual_feature_train, sentence_embed_train, visual_feature_test, sentence_embed_test,\n sentence_ph_train_len, sentence_ph_test_len):\n\n name=\"CTRL_Model\"\n with tf.variable_scope(name):\n print(\"Building training network...............................\\n\")\n transformed_clip_train = fc('v2s_lt', visual_feature_train, output_dim=self.semantic_size) \n transformed_clip_train_norm = tf.nn.l2_normalize(transformed_clip_train, dim=1)\n\n if self.useLSTM:\n sentence_embed_train = self.lstm_embed(sentence_embed_train, sentence_ph_train_len)\n\n transformed_sentence_train = fc('s2s_lt', sentence_embed_train, output_dim=self.semantic_size)\n transformed_sentence_train_norm = tf.nn.l2_normalize(transformed_sentence_train, dim=1) \n cross_modal_vec_train = self.cross_modal_comb(transformed_clip_train_norm, transformed_sentence_train_norm, self.batch_size)\n sim_score_mat_train = vs_multilayer.vs_multilayer(cross_modal_vec_train, \"vs_multilayer_lt\", middle_layer_dim=1000)\n sim_score_mat_train = tf.reshape(sim_score_mat_train,[self.batch_size, self.batch_size, 3])\n\n tf.get_variable_scope().reuse_variables()\n print(\"Building test network...............................\\n\")\n transformed_clip_test = fc('v2s_lt', visual_feature_test, output_dim=self.semantic_size)\n transformed_clip_test_norm = tf.nn.l2_normalize(transformed_clip_test, dim=1)\n\n if self.useLSTM:\n sentence_embed_test = self.lstm_embed(sentence_embed_test, sentence_ph_test_len)\n transformed_sentence_test = fc('s2s_lt', sentence_embed_test, output_dim=self.semantic_size)\n transformed_sentence_test_norm = tf.nn.l2_normalize(transformed_sentence_test, dim=1)\n\n cross_modal_vec_test = self.cross_modal_comb(transformed_clip_test_norm, transformed_sentence_test_norm, self.test_batch_size)\n sim_score_mat_test = vs_multilayer.vs_multilayer(cross_modal_vec_test, \"vs_multilayer_lt\", reuse=True, middle_layer_dim=1000)\n sim_score_mat_test = tf.reshape(sim_score_mat_test, [self.test_batch_size, self.test_batch_size, 3])\n\n cross_modal_vec_test_1 = self.cross_modal_comb(tf.reshape(transformed_clip_test_norm[1], shape=(1,1024)),\n tf.reshape(transformed_sentence_test_norm[1], shape=(1,1024)), 1)\n sim_score_mat_test_1 = vs_multilayer.vs_multilayer(cross_modal_vec_test_1, \"vs_multilayer_lt\", reuse=True, middle_layer_dim=1000)\n sim_score_mat_test_1 = tf.reshape(sim_score_mat_test_1, [3])\n return sim_score_mat_train, sim_score_mat_test, sim_score_mat_test_1\n\n def lstm_embed(self, sentences, sentence_ph_train_len):\n\n # state = [tf.zeros([self.batch_size, x]) for x in [self.lstm_hidden_size, self.lstm_hidden_size]]\n sent_1dim = tf.reshape(sentences, [-1, 1])\n sent_vector_2dim = tf.gather_nd(self.embed_ques_W, sent_1dim)\n sent_vector = tf.reshape(sent_vector_2dim, [int(sentences.shape[0]), int(sentences.shape[1]), -1])\n # embedding_lookup must contain a variable.\n # sent_vector = tf.nn.embedding_lookup(self.embed_ques_W, [int(sentences.shape[0]), int(sentences.shape[1]), -1])\n state = self.stacked_lstm.zero_state(sentences.shape[0], tf.float32)\n # inputs:[batch_size, max_time, size] if time_major = Flase.\n output, state = tf.nn.dynamic_rnn(self.stacked_lstm, inputs=sent_vector, sequence_length=sentence_ph_train_len,\n initial_state=state, dtype=tf.float32, time_major=False)\n\n state_drop = tf.nn.dropout(state, 1 - self.drop_out_rate)\n # state_linear = tf.nn.xw_plus_b(state_drop, self.embed_state_W, self.embed_state_b)\n # state_emb = tf.tanh(state_linear)\n\n return state_drop\n\n\n '''\n compute alignment and regression loss\n '''\n def compute_loss_reg(self, sim_reg_mat, offset_label):\n\n sim_score_mat, p_reg_mat, l_reg_mat = tf.split(sim_reg_mat, num_or_size_splits=3, axis=2)\n sim_score_mat = tf.reshape(sim_score_mat, [self.batch_size, self.batch_size])\n l_reg_mat = tf.reshape(l_reg_mat, [self.batch_size, self.batch_size])\n p_reg_mat = tf.reshape(p_reg_mat, [self.batch_size, self.batch_size])\n # unit matrix with -2\n I_2 = tf.diag(tf.constant(-2.0, shape=[self.batch_size]))\n all1 = tf.constant(1.0, shape=[self.batch_size, self.batch_size])\n # | -1 1 1... |\n\n # mask_mat = | 1 -1 -1... |\n\n # | 1 1 -1 ... |\n mask_mat = tf.add(I_2, all1)\n # loss cls, not considering iou\n I = tf.diag(tf.constant(1.0, shape=[self.batch_size]))\n batch_para_mat = tf.constant(self.alpha, shape=[self.batch_size, self.batch_size])\n\n para_mat = tf.add(I,batch_para_mat)\n loss_mat = tf.log(tf.add(all1, tf.exp(tf.multiply(mask_mat, sim_score_mat))))\n loss_mat = tf.multiply(loss_mat, para_mat)\n loss_align = tf.reduce_mean(loss_mat)\n # regression loss\n\n l_reg_diag = tf.matmul(tf.multiply(l_reg_mat, I), tf.constant(1.0, shape=[self.batch_size, 1]))\n p_reg_diag = tf.matmul(tf.multiply(p_reg_mat, I), tf.constant(1.0, shape=[self.batch_size, 1]))\n offset_pred = tf.concat((p_reg_diag, l_reg_diag), 1)\n loss_reg = tf.reduce_mean(tf.abs(tf.subtract(offset_pred, offset_label)))\n\n loss=tf.add(tf.multiply(self.lambda_regression, loss_reg), loss_align)\n return loss, offset_pred, loss_reg\n\n\n def init_placeholder(self):\n visual_featmap_ph_train = tf.placeholder(tf.float32, shape=(self.batch_size, self.visual_feature_dim))\n if self.useLSTM:\n # using LSTM, input is the idx of word\n sentence_ph_train = tf.placeholder(tf.int32, shape=(self.batch_size, self.max_words_q))\n sentence_ph_train_len = tf.placeholder(tf.int32, shape=(self.batch_size))\n else:\n sentence_ph_train = tf.placeholder(tf.float32, shape=(self.batch_size, self.sentence_embedding_size))\n sentence_ph_train_len = -1\n\n offset_ph = tf.placeholder(tf.float32, shape=(self.batch_size,2))\n visual_featmap_ph_test = tf.placeholder(tf.float32, shape=(self.test_batch_size, self.visual_feature_dim))\n\n if self.useLSTM:\n # using LSTM, input is the idx of word\n sentence_ph_test = tf.placeholder(tf.int32, shape=(self.test_batch_size, self.max_words_q))\n sentence_ph_test_len = tf.placeholder(tf.int32, shape=(self.test_batch_size))\n else:\n sentence_ph_test = tf.placeholder(tf.float32, shape=(self.test_batch_size, self.sentence_embedding_size))\n sentence_ph_test_len = -1\n return visual_featmap_ph_train,sentence_ph_train,offset_ph,visual_featmap_ph_test, sentence_ph_test, \\\n sentence_ph_train_len, sentence_ph_test_len\n \n\n def get_variables_by_name(self,name_list):\n v_list = tf.trainable_variables()\n v_dict = {}\n for name in name_list:\n v_dict[name] = []\n for v in v_list:\n for name in name_list:\n if name in v.name: v_dict[name].append(v)\n\n for name in name_list:\n print(\"Variables of <\"+name+\">\")\n for v in v_dict[name]:\n print(\" \"+v.name)\n return v_dict\n\n def training(self, loss):\n \n v_dict = self.get_variables_by_name([\"lt\"])\n vs_optimizer = tf.train.AdamOptimizer(self.vs_lr, name='vs_adam')\n vs_train_op = vs_optimizer.minimize(loss, var_list=v_dict[\"lt\"])\n return vs_train_op\n\n\n def construct_model(self):\n\n\n # initialize the placeholder\n self.visual_featmap_ph_train, self.sentence_ph_train, self.offset_ph, self.visual_featmap_ph_test, self.sentence_ph_test, \\\n self.sentence_ph_train_len, self.sentence_ph_test_len =self.init_placeholder()\n # build inference network\n sim_reg_mat, sim_reg_mat_test, sim_reg_mat_test_1 = self.visual_semantic_infer(self.visual_featmap_ph_train, self.sentence_ph_train,\n self.visual_featmap_ph_test, self.sentence_ph_test,\n self.sentence_ph_train_len, self.sentence_ph_test_len)\n # compute loss\n self.loss_align_reg, offset_pred, loss_reg = self.compute_loss_reg(sim_reg_mat, self.offset_ph)\n # optimize\n self.vs_train_op = self.training(self.loss_align_reg)\n return self.loss_align_reg, self.vs_train_op, sim_reg_mat_test, sim_reg_mat_test_1, offset_pred, loss_reg\n\n\n" }, { "alpha_fraction": 0.6235941648483276, "alphanum_fraction": 0.6343720555305481, "avg_line_length": 48.3294792175293, "blob_id": "de18b0e403d7e51add28180e2374a1ead13a060d", "content_id": "2a29fe05a0b291a9916ac7e9155856c567becf08", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8536, "license_type": "permissive", "max_line_length": 172, "num_lines": 173, "path": "/ctrl_model.py", "repo_name": "EthanZhu90/TALL_Copy", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.framework import dtypes\n\nfrom util.cnn import fc_layer as fc\nimport vs_multilayer \nfrom dataset import TestingDataSet\nfrom dataset import TrainingDataSet\n\n\nclass CTRL_Model(object):\n def __init__(self, batch_size, train_csv_path, test_csv_path, test_visual_feature_dir, train_visual_feature_dir):\n \n self.batch_size = batch_size\n self.test_batch_size = 1\n self.vs_lr = 0.005\n self.lambda_regression = 0.01\n self.alpha = 1.0/batch_size\n self.semantic_size = 1024 # the size of visual and semantic comparison size\n self.sentence_embedding_size = 4800\n self.visual_feature_dim = 4096*3\n self.train_set=TrainingDataSet(train_visual_feature_dir, train_csv_path, self.batch_size)\n self.test_set=TestingDataSet(test_visual_feature_dir, test_csv_path, self.test_batch_size)\n \n '''\n used in training alignment model, CTRL(aln)\n '''\t\n def fill_feed_dict_train(self):\n image_batch,sentence_batch,offset_batch = self.train_set.next_batch()\n input_feed = {\n self.visual_featmap_ph_train: image_batch,\n self.sentence_ph_train: sentence_batch,\n self.offset_ph: offset_batch\n }\n\n return input_feed\n \n '''\n used in training alignment+regression model, CTRL(reg)\n '''\n def fill_feed_dict_train_reg(self):\n image_batch, sentence_batch, offset_batch = self.train_set.next_batch_iou()\n input_feed = {\n self.visual_featmap_ph_train: image_batch,\n self.sentence_ph_train: sentence_batch,\n self.offset_ph: offset_batch\n }\n\n return input_feed\n\n \n '''\n cross modal processing module\n '''\n def cross_modal_comb(self, visual_feat, sentence_embed, batch_size):\n vv_feature = tf.reshape(tf.tile(visual_feat, [batch_size, 1]),\n [batch_size, batch_size, self.semantic_size])\n ss_feature = tf.reshape(tf.tile(sentence_embed,[1, batch_size]),[batch_size, batch_size, self.semantic_size])\n concat_feature = tf.reshape(tf.concat([vv_feature, ss_feature], 2),[batch_size, batch_size, self.semantic_size+self.semantic_size])\n print(concat_feature.get_shape().as_list())\n mul_feature = tf.multiply(vv_feature, ss_feature)\n add_feature = tf.add(vv_feature, ss_feature)\n \n comb_feature = tf.reshape(tf.concat([mul_feature, add_feature, concat_feature],2),[1, batch_size, batch_size, self.semantic_size*4])\n return comb_feature\n \n '''\n visual semantic inference, including visual semantic alignment and clip location regression\n '''\n def visual_semantic_infer(self, visual_feature_train, sentence_embed_train, visual_feature_test, sentence_embed_test):\n name=\"CTRL_Model\"\n with tf.variable_scope(name):\n print(\"Building training network...............................\\n\")\n transformed_clip_train = fc('v2s_lt', visual_feature_train, output_dim=self.semantic_size) \n transformed_clip_train_norm = tf.nn.l2_normalize(transformed_clip_train, dim=1)\n transformed_sentence_train = fc('s2s_lt', sentence_embed_train, output_dim=self.semantic_size)\n transformed_sentence_train_norm = tf.nn.l2_normalize(transformed_sentence_train, dim=1) \n cross_modal_vec_train = self.cross_modal_comb(transformed_clip_train_norm, transformed_sentence_train_norm, self.batch_size)\n sim_score_mat_train = vs_multilayer.vs_multilayer(cross_modal_vec_train, \"vs_multilayer_lt\", middle_layer_dim=1000)\n sim_score_mat_train = tf.reshape(sim_score_mat_train,[self.batch_size, self.batch_size, 3])\n\n tf.get_variable_scope().reuse_variables()\n print(\"Building test network...............................\\n\")\n transformed_clip_test = fc('v2s_lt', visual_feature_test, output_dim=self.semantic_size)\n transformed_clip_test_norm = tf.nn.l2_normalize(transformed_clip_test, dim=1)\n transformed_sentence_test = fc('s2s_lt', sentence_embed_test, output_dim=self.semantic_size)\n transformed_sentence_test_norm = tf.nn.l2_normalize(transformed_sentence_test, dim=1)\n cross_modal_vec_test = self.cross_modal_comb(transformed_clip_test_norm, transformed_sentence_test_norm, self.test_batch_size)\n sim_score_mat_test = vs_multilayer.vs_multilayer(cross_modal_vec_test, \"vs_multilayer_lt\", reuse=True, middle_layer_dim=1000)\n sim_score_mat_test = tf.reshape(sim_score_mat_test, [3])\n\n return sim_score_mat_train, sim_score_mat_test\n\n '''\n compute alignment and regression loss\n '''\n def compute_loss_reg(self, sim_reg_mat, offset_label):\n\n sim_score_mat, p_reg_mat, l_reg_mat = tf.split(sim_reg_mat, num_or_size_splits=3, axis=2)\n sim_score_mat = tf.reshape(sim_score_mat, [self.batch_size, self.batch_size])\n l_reg_mat = tf.reshape(l_reg_mat, [self.batch_size, self.batch_size])\n p_reg_mat = tf.reshape(p_reg_mat, [self.batch_size, self.batch_size])\n # unit matrix with -2\n I_2 = tf.diag(tf.constant(-2.0, shape=[self.batch_size]))\n all1 = tf.constant(1.0, shape=[self.batch_size, self.batch_size])\n # | -1 1 1... |\n\n # mask_mat = | 1 -1 -1... |\n\n # | 1 1 -1 ... |\n mask_mat = tf.add(I_2, all1)\n # loss cls, not considering iou\n I = tf.diag(tf.constant(1.0, shape=[self.batch_size]))\n batch_para_mat = tf.constant(self.alpha, shape=[self.batch_size, self.batch_size])\n\n para_mat = tf.add(I,batch_para_mat)\n loss_mat = tf.log(tf.add(all1, tf.exp(tf.multiply(mask_mat, sim_score_mat))))\n loss_mat = tf.multiply(loss_mat, para_mat)\n loss_align = tf.reduce_mean(loss_mat)\n # regression loss\n\n l_reg_diag = tf.matmul(tf.multiply(l_reg_mat, I), tf.constant(1.0, shape=[self.batch_size, 1]))\n p_reg_diag = tf.matmul(tf.multiply(p_reg_mat, I), tf.constant(1.0, shape=[self.batch_size, 1]))\n offset_pred = tf.concat((p_reg_diag, l_reg_diag), 1)\n loss_reg = tf.reduce_mean(tf.abs(tf.subtract(offset_pred, offset_label)))\n\n loss=tf.add(tf.multiply(self.lambda_regression, loss_reg), loss_align)\n return loss, offset_pred, loss_reg\n\n\n def init_placeholder(self):\n visual_featmap_ph_train = tf.placeholder(tf.float32, shape=(self.batch_size, self.visual_feature_dim))\n sentence_ph_train = tf.placeholder(tf.float32, shape=(self.batch_size, self.sentence_embedding_size))\n offset_ph = tf.placeholder(tf.float32, shape=(self.batch_size,2))\n visual_featmap_ph_test = tf.placeholder(tf.float32, shape=(self.test_batch_size, self.visual_feature_dim))\n sentence_ph_test = tf.placeholder(tf.float32, shape=(self.test_batch_size, self.sentence_embedding_size))\n\n return visual_featmap_ph_train,sentence_ph_train,offset_ph,visual_featmap_ph_test,sentence_ph_test\n \n\n def get_variables_by_name(self,name_list):\n v_list = tf.trainable_variables()\n v_dict = {}\n for name in name_list:\n v_dict[name] = []\n for v in v_list:\n for name in name_list:\n if name in v.name: v_dict[name].append(v)\n\n for name in name_list:\n print(\"Variables of <\"+name+\">\")\n for v in v_dict[name]:\n print(\" \"+v.name)\n return v_dict\n\n def training(self, loss):\n \n v_dict = self.get_variables_by_name([\"lt\"])\n vs_optimizer = tf.train.AdamOptimizer(self.vs_lr, name='vs_adam')\n vs_train_op = vs_optimizer.minimize(loss, var_list=v_dict[\"lt\"])\n return vs_train_op\n\n\n def construct_model(self):\n # initialize the placeholder\n self.visual_featmap_ph_train, self.sentence_ph_train, self.offset_ph, self.visual_featmap_ph_test, self.sentence_ph_test=self.init_placeholder()\n # build inference network\n sim_reg_mat, sim_reg_mat_test = self.visual_semantic_infer(self.visual_featmap_ph_train, self.sentence_ph_train, self.visual_featmap_ph_test, self.sentence_ph_test)\n # compute loss\n self.loss_align_reg, offset_pred, loss_reg = self.compute_loss_reg(sim_reg_mat, self.offset_ph)\n # optimize\n self.vs_train_op = self.training(self.loss_align_reg)\n return self.loss_align_reg, self.vs_train_op, sim_reg_mat_test, offset_pred, loss_reg\n\n\n" }, { "alpha_fraction": 0.7906976938247681, "alphanum_fraction": 0.7906976938247681, "avg_line_length": 20.5, "blob_id": "712085b7f193f767efd9a43595c21fdc138faada", "content_id": "3f89d6d836ace6d8d2acb52a4ed4260e2959120f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 43, "license_type": "permissive", "max_line_length": 30, "num_lines": 2, "path": "/README.md", "repo_name": "EthanZhu90/TALL_Copy", "src_encoding": "UTF-8", "text": "# TALL_Copy\na modification version of TALL\n" }, { "alpha_fraction": 0.6092320084571838, "alphanum_fraction": 0.621774435043335, "avg_line_length": 51.28494644165039, "blob_id": "ba34e42019b57c088bcf92649eaf28810c1746b8", "content_id": "0c7fa9ce04394ff2727c3f1b89c3bb8d6f05a00c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9727, "license_type": "permissive", "max_line_length": 152, "num_lines": 186, "path": "/ctrl_model_noContext_dtfv_covideo_clip.py", "repo_name": "EthanZhu90/TALL_Copy", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.framework import dtypes\n\nfrom util.cnn import fc_layer as fc\nimport vs_multilayer \nfrom dataset_noContext_dtfv_covideo_clip import TestingDataSet\nfrom dataset_noContext_dtfv_covideo_clip import TrainingDataSet\n\n\nclass CTRL_Model(object):\n def __init__(self, batch_size, train_csv_path, test_csv_path, test_visual_feature_dir, train_visual_feature_dir):\n \n self.batch_size = batch_size\n self.test_batch_size = 1\n self.vs_lr = 0.005\n self.lambda_regression = 0.01\n self.alpha = 1.0/batch_size\n self.semantic_size = 2048 # 3072 # 2048 # 2048 # the size of visual and semantic comparison size\n self.sentence_embedding_size = 4800\n self.visual_feature_dim = 21800 # 43600\n self.train_set=TrainingDataSet(train_visual_feature_dir, train_csv_path, self.batch_size)\n self.test_set=TestingDataSet(test_visual_feature_dir, test_csv_path, self.test_batch_size)\n \n '''\n used in training alignment model, CTRL(aln)\n '''\t\n def fill_feed_dict_train(self):\n image_batch,sentence_batch,offset_batch = self.train_set.next_batch()\n input_feed = {\n self.visual_featmap_ph_train: image_batch,\n self.sentence_ph_train: sentence_batch,\n self.offset_ph: offset_batch\n }\n\n return input_feed\n \n '''\n used in training alignment+regression model, CTRL(reg)\n '''\n def fill_feed_dict_train_reg(self):\n image_batch_pos, image_batch_neg, sentence_batch, offset_batch = self.train_set.next_batch_iou()\n input_feed = {\n self.visual_featmap_ph_train_pos: image_batch_pos,\n self.visual_featmap_ph_train_neg: image_batch_neg,\n self.sentence_ph_train: sentence_batch,\n self.offset_ph: offset_batch\n }\n\n return input_feed\n\n \n '''\n cross modal processing module\n '''\n def cross_modal_comb_toremve(self, visual_feat, sentence_embed, batch_size):\n vv_feature = tf.reshape(tf.tile(visual_feat, [batch_size, 1]),\n [batch_size, batch_size, self.semantic_size])\n ss_feature = tf.reshape(tf.tile(sentence_embed,[1, batch_size]),[batch_size, batch_size, self.semantic_size])\n concat_feature = tf.reshape(tf.concat([vv_feature, ss_feature], 2),[batch_size, batch_size, self.semantic_size+self.semantic_size])\n print(concat_feature.get_shape().as_list())\n mul_feature = tf.multiply(vv_feature, ss_feature)\n add_feature = tf.add(vv_feature, ss_feature)\n \n comb_feature = tf.reshape(tf.concat([mul_feature, add_feature, concat_feature],2),[1, batch_size, batch_size, self.semantic_size*4])\n return comb_feature\n\n\n\n def cross_modal_comb(self, visual_feat, sentence_embed, batch_size):\n concat_feature = tf.concat([visual_feat, sentence_embed], 1)\n # print(concat_feature.get_shape().as_list())\n mul_feature = tf.multiply(visual_feat, sentence_embed)\n add_feature = tf.add(visual_feat, sentence_embed)\n comb_feature = tf.reshape(tf.concat([mul_feature, add_feature, concat_feature], 1),\n [1, 1, visual_feat.get_shape().as_list()[0], self.semantic_size*4])\n\n return comb_feature\n '''\n visual semantic inference, including visual semantic alignment and clip location regression\n '''\n def visual_semantic_infer(self, visual_feature_train_pos, visual_feature_train_neg, sentence_embed_train, visual_feature_test, sentence_embed_test):\n name=\"CTRL_Model\"\n with tf.variable_scope(name):\n print(\"Building training network...............................\\n\")\n transformed_clip_train_mix = fc('v2s_lt', tf.concat([visual_feature_train_pos, visual_feature_train_neg], 0), output_dim=self.semantic_size)\n transformed_clip_train_norm_mix = tf.nn.l2_normalize(transformed_clip_train_mix, dim=1)\n\n transformed_sentence_train = fc('s2s_lt', sentence_embed_train, output_dim=self.semantic_size)\n transformed_sentence_train_norm = tf.nn.l2_normalize(transformed_sentence_train, dim=1)\n cross_modal_vec_train_mix = self.cross_modal_comb(transformed_clip_train_norm_mix,\n tf.tile(transformed_sentence_train_norm, [2,1]),\n self.batch_size)\n\n sim_score_mat_train_mix = vs_multilayer.vs_multilayer(cross_modal_vec_train_mix, \"vs_multilayer_lt\", middle_layer_dim=1000)\n sim_score_mat_train_mix = tf.reshape(sim_score_mat_train_mix, [self.batch_size*2, 3])\n\n tf.get_variable_scope().reuse_variables()\n print(\"Building test network...............................\\n\")\n transformed_clip_test = fc('v2s_lt', visual_feature_test, output_dim=self.semantic_size)\n transformed_clip_test_norm = tf.nn.l2_normalize(transformed_clip_test, dim=1)\n transformed_sentence_test = fc('s2s_lt', sentence_embed_test, output_dim=self.semantic_size)\n transformed_sentence_test_norm = tf.nn.l2_normalize(transformed_sentence_test, dim=1)\n cross_modal_vec_test = self.cross_modal_comb(transformed_clip_test_norm, transformed_sentence_test_norm, self.test_batch_size)\n sim_score_mat_test = vs_multilayer.vs_multilayer(cross_modal_vec_test, \"vs_multilayer_lt\", reuse=True, middle_layer_dim=1000)\n sim_score_mat_test = tf.reshape(sim_score_mat_test, [3])\n\n return sim_score_mat_train_mix, sim_score_mat_test\n\n '''\n compute alignment and regression loss\n '''\n def compute_loss_reg(self, sim_reg_mat_mix, offset_label):\n # sim_reg_mat_pos = sim_reg_mat_mix[:sim_reg_mat_mix.get_shape().as_list()[0]/2]\n # sim_reg_mat_neg = sim_reg_mat_mix[sim_reg_mat_mix.get_shape().as_list()[0]/2:]\n sim_score_mat, _, _ = tf.split(sim_reg_mat_mix, num_or_size_splits=3, axis=1)\n\n mask_mat = tf.concat((tf.constant(-1.0, shape=[self.batch_size]), tf.constant(1.0, shape=[self.batch_size])), 0)\n all1 = tf.constant(1.0, shape=[self.batch_size*2])\n loss_mat = tf.log(tf.add(all1, tf.exp(tf.multiply(mask_mat, tf.squeeze(sim_score_mat)))))\n loss_align = tf.reduce_mean(loss_mat)\n\n\n # regression loss\n _, p_reg_mat, l_reg_mat = tf.split(sim_reg_mat_mix[:self.batch_size], num_or_size_splits=3, axis=1)\n #I = tf.diag(tf.constant(1.0, shape=[self.batch_size]))\n #l_reg_diag = tf.matmul(tf.multiply(l_reg_mat, I), tf.constant(1.0, shape=[self.batch_size, 1]))\n #p_reg_diag = tf.matmul(tf.multiply(p_reg_mat, I), tf.constant(1.0, shape=[self.batch_size, 1]))\n offset_pred = tf.concat((p_reg_mat, l_reg_mat), 1)\n loss_reg = tf.reduce_mean(tf.abs(tf.subtract(offset_pred, offset_label)))\n\n loss_1 = tf.multiply(self.lambda_regression, loss_reg)\n loss=tf.add(loss_1, loss_align)\n return loss, loss_mat, loss_align, offset_pred, loss_reg\n\n\n def init_placeholder(self):\n visual_featmap_ph_train_pos = tf.placeholder(tf.float32, shape=(self.batch_size, self.visual_feature_dim))\n visual_featmap_ph_train_neg = tf.placeholder(tf.float32, shape=(self.batch_size, self.visual_feature_dim))\n sentence_ph_train = tf.placeholder(tf.float32, shape=(self.batch_size, self.sentence_embedding_size))\n offset_ph = tf.placeholder(tf.float32, shape=(self.batch_size,2))\n visual_featmap_ph_test = tf.placeholder(tf.float32, shape=(self.test_batch_size, self.visual_feature_dim))\n sentence_ph_test = tf.placeholder(tf.float32, shape=(self.test_batch_size, self.sentence_embedding_size))\n\n return visual_featmap_ph_train_pos, visual_featmap_ph_train_neg, \\\n sentence_ph_train,offset_ph, visual_featmap_ph_test, sentence_ph_test\n \n\n def get_variables_by_name(self,name_list):\n v_list = tf.trainable_variables()\n v_dict = {}\n for name in name_list:\n v_dict[name] = []\n for v in v_list:\n for name in name_list:\n if name in v.name: v_dict[name].append(v)\n\n for name in name_list:\n print(\"Variables of <\"+name+\">\")\n for v in v_dict[name]:\n print(\" \"+v.name)\n return v_dict\n\n def training(self, loss):\n \n v_dict = self.get_variables_by_name([\"lt\"])\n vs_optimizer = tf.train.AdamOptimizer(self.vs_lr, name='vs_adam')\n vs_train_op = vs_optimizer.minimize(loss, var_list=v_dict[\"lt\"])\n return vs_train_op\n\n\n def construct_model(self):\n # initialize the placeholder\n self.visual_featmap_ph_train_pos, self.visual_featmap_ph_train_neg, self.sentence_ph_train, self.offset_ph, \\\n self.visual_featmap_ph_test, self.sentence_ph_test=self.init_placeholder()\n\n # build inference network\n sim_reg_mat_mix, sim_reg_mat_test = self.visual_semantic_infer(self.visual_featmap_ph_train_pos,\n self.visual_featmap_ph_train_neg,\n self.sentence_ph_train,\n self.visual_featmap_ph_test, self.sentence_ph_test)\n # compute loss\n self.loss_align_reg, loss_1, loss_align, offset_pred, loss_reg = self.compute_loss_reg(sim_reg_mat_mix, self.offset_ph)\n # optimize\n self.vs_train_op = self.training(self.loss_align_reg)\n return self.loss_align_reg, loss_1, loss_align, self.vs_train_op, sim_reg_mat_test, offset_pred, loss_reg\n\n\n" }, { "alpha_fraction": 0.5489454865455627, "alphanum_fraction": 0.5591490864753723, "avg_line_length": 51.01658630371094, "blob_id": "dabdf8245daf1b4f5d63e0067a67c4d38a360d2f", "content_id": "7d5a2315e78d23335e1460be165f6306930f7427", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21953, "license_type": "permissive", "max_line_length": 151, "num_lines": 422, "path": "/dataset_noContext.py", "repo_name": "EthanZhu90/TALL_Copy", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom math import sqrt\nimport os\nimport random\nimport pickle\n\nfrom sklearn.preprocessing import normalize\n\n\n'''\ncalculate temporal intersection over union\n'''\ndef calculate_IoU(i0, i1):\n union = (min(i0[0], i1[0]), max(i0[1], i1[1]))\n inter = (max(i0[0], i1[0]), min(i0[1], i1[1]))\n iou = 1.0*(inter[1]-inter[0])/(union[1]-union[0])\n return iou\n\n'''\ncalculate the non Intersection part over Length ratia, make sure the input IoU is larger than 0\n'''\ndef calculate_nIoL(base, sliding_clip):\n inter = (max(base[0], sliding_clip[0]), min(base[1], sliding_clip[1]))\n inter_l = inter[1]-inter[0]\n length = sliding_clip[1]-sliding_clip[0]\n nIoL = 1.0*(length-inter_l)/length\n return nIoL\n\nclass TrainingDataSet(object):\n def __init__(self, sliding_dir, it_path, batch_size, word2idx, useLSTM=True):\n\n self.useLSTM = useLSTM\n self.counter = 0\n self.batch_size = batch_size\n self.context_num = 1\n self.context_size = 128\n\n print(\"Reading training data list from \"+it_path)\n # cs = pickle.load(open(it_path, 'rb'), encoding='bytes')\n cs = pickle.load(open(it_path, 'rb'))\n movie_length_info = pickle.load(open(\"./video_allframes_info.pkl\", 'rb'))\n self.clip_sentence_pairs = []\n for l in cs:\n clip_name = l[0]\n sent_vecs = l[1]\n for sent_vec in sent_vecs:\n self.clip_sentence_pairs.append((clip_name, sent_vec))\n\n movie_names_set = set()\n self.movie_clip_names = {}\n # read groundtruth sentence-clip pairs\n for k in range(len(self.clip_sentence_pairs)):\n clip_name = self.clip_sentence_pairs[k][0]\n movie_name = clip_name.split(\"_\")[0]\n if not movie_name in movie_names_set:\n movie_names_set.add(movie_name)\n self.movie_clip_names[movie_name] = []\n self.movie_clip_names[movie_name].append(k)\n self.movie_names = list(movie_names_set)\n\n self.word2idx = word2idx\n self.max_words_q = 15\n self.visual_feature_dim = 4096\n self.sent_vec_dim = 4800\n self.num_samples = len(self.clip_sentence_pairs)\n self.sliding_clip_path = sliding_dir\n print(str(len(self.clip_sentence_pairs))+\" clip-sentence pairs are readed\")\n\n if not useLSTM:\n # read sliding windows, and match them with the groundtruths to make training samples\n sliding_clips_tmp = os.listdir(self.sliding_clip_path)\n if os.path.exists('clip_sentence_pairs_iou.pkl'):\n print(\"Loading data from {}\".format('clip_sentence_pairs_iou.pkl'))\n with open('clip_sentence_pairs_iou.pkl', 'rb') as input:\n self.clip_sentence_pairs_iou = pickle.load(input)\n self.num_samples_iou = len(self.clip_sentence_pairs_iou)\n print(str(len(self.clip_sentence_pairs_iou)) + \" iou clip-sentence pairs are readed\")\n return\n\n self.clip_sentence_pairs_iou = []\n for clip_name in sliding_clips_tmp:\n if clip_name.split(\".\")[2]==\"npy\":\n movie_name = clip_name.split(\"_\")[0]\n for clip_sentence in self.clip_sentence_pairs:\n original_clip_name = clip_sentence[0]\n original_movie_name = original_clip_name.split(\"_\")[0]\n if original_movie_name==movie_name:\n start = int(clip_name.split(\"_\")[1])\n end = int(clip_name.split(\"_\")[2].split(\".\")[0])\n o_start = int(original_clip_name.split(\"_\")[1])\n o_end = int(original_clip_name.split(\"_\")[2].split(\".\")[0])\n iou = calculate_IoU((start, end), (o_start, o_end))\n if iou>0.5:\n nIoL=calculate_nIoL((o_start, o_end), (start, end))\n if nIoL<0.15:\n movie_length = movie_length_info[movie_name.split(\".\")[0]]\n start_offset =o_start-start\n end_offset = o_end-end\n self.clip_sentence_pairs_iou.append((clip_sentence[0], clip_sentence[1], clip_name, start_offset, end_offset))\n self.num_samples_iou = len(self.clip_sentence_pairs_iou)\n print(str(len(self.clip_sentence_pairs_iou))+\" iou clip-sentence pairs are readed\")\n with open('clip_sentence_pairs_iou.pkl', 'wb') as output:\n print(\"Saving clip_sentence_pairs_iou\")\n pickle.dump(self.clip_sentence_pairs_iou, output)\n else:\n\n # read sliding windows, and match them with the groundtruths to make training samples\n sliding_clips_tmp = os.listdir(self.sliding_clip_path)\n if os.path.exists('clip_sentence_pairs_iou_LSTM.pkl'):\n print(\"Loading data from {}\".format('clip_sentence_pairs_iou_LSTM.pkl'))\n with open('clip_sentence_pairs_iou_LSTM.pkl', 'rb') as input:\n self.clip_sentence_pairs_iou = pickle.load(input)\n self.num_samples_iou = len(self.clip_sentence_pairs_iou)\n print(str(len(self.clip_sentence_pairs_iou)) + \" iou clip-sentence pairs are readed\")\n return\n print('Preparing clip_sentence_pairs_iou_LSTM.pkl')\n self.clip_sentence_pairs_iou = []\n for idx, clip_name in enumerate(sliding_clips_tmp):\n if idx%1000 == 0 and idx:\n print(\"processing [{}/{}]\".format(idx, len(sliding_clips_tmp)))\n if clip_name.split(\".\")[2] == \"npy\":\n movie_name = clip_name.split(\"_\")[0]\n for clip_sentence in self.clip_sentence_pairs:\n original_clip_name = clip_sentence[0]\n original_movie_name = original_clip_name.split(\"_\")[0]\n if original_movie_name == movie_name:\n start = int(clip_name.split(\"_\")[1])\n end = int(clip_name.split(\"_\")[2].split(\".\")[0])\n o_start = int(original_clip_name.split(\"_\")[1])\n o_end = int(original_clip_name.split(\"_\")[2].split(\".\")[0])\n iou = calculate_IoU((start, end), (o_start, o_end))\n if iou > 0.5:\n nIoL = calculate_nIoL((o_start, o_end), (start, end))\n if nIoL < 0.15:\n movie_length = movie_length_info[movie_name.split(\".\")[0]]\n start_offset = o_start - start\n end_offset = o_end - end\n self.clip_sentence_pairs_iou.append(\n (clip_sentence[0], clip_sentence[1], clip_name, start_offset, end_offset))\n self.num_samples_iou = len(self.clip_sentence_pairs_iou)\n print(str(len(self.clip_sentence_pairs_iou)) + \" iou clip-sentence pairs are readed\")\n with open('clip_sentence_pairs_iou_LSTM.pkl', 'wb') as output:\n print(\"Saving clip_sentence_pairs_iou\")\n pickle.dump(self.clip_sentence_pairs_iou, output)\n # exit()\n a = 1\n '''\n compute left (pre) and right (post) context features\n '''\n def get_context_window(self, clip_name, win_length):\n movie_name = clip_name.split(\"_\")[0]\n start = int(clip_name.split(\"_\")[1])\n end = int(clip_name.split(\"_\")[2].split(\".\")[0])\n clip_length = self.context_size\n left_context_feats = np.zeros([win_length, 4096], dtype=np.float32)\n right_context_feats = np.zeros([win_length, 4096], dtype=np.float32)\n last_left_feat = np.load(self.sliding_clip_path+clip_name)\n last_right_feat = np.load(self.sliding_clip_path+clip_name)\n for k in range(win_length):\n left_context_start = start-clip_length*(k+1)\n left_context_end = start-clip_length*k\n right_context_start = end+clip_length*k\n right_context_end = end+clip_length*(k+1)\n left_context_name = movie_name+\"_\"+str(left_context_start)+\"_\"+str(left_context_end)+\".npy\"\n right_context_name = movie_name+\"_\"+str(right_context_start)+\"_\"+str(right_context_end)+\".npy\"\n if os.path.exists(self.sliding_clip_path+left_context_name):\n left_context_feat = np.load(self.sliding_clip_path+left_context_name)\n last_left_feat = left_context_feat\n else:\n left_context_feat = last_left_feat\n if os.path.exists(self.sliding_clip_path+right_context_name):\n right_context_feat = np.load(self.sliding_clip_path+right_context_name)\n last_right_feat = right_context_feat\n else:\n right_context_feat = last_right_feat\n left_context_feats[k] = left_context_feat\n right_context_feats[k] = right_context_feat\n return np.mean(left_context_feats, axis=0), np.mean(right_context_feats, axis=0)\n \n '''\n read next batch of training data, this function is used for training CTRL-aln\n '''\n def next_batch(self):\n \n random_batch_index = random.sample(range(self.num_samples), self.batch_size)\n image_batch = np.zeros([self.batch_size, self.visual_feature_dim])\n sentence_batch = np.zeros([self.batch_size, self.sent_vec_dim])\n offset_batch = np.zeros([self.batch_size, 2], dtype=np.float32) # this one is actually useless\n index = 0\n clip_set=set()\n while index < self.batch_size:\n k = random_batch_index[index]\n clip_name = self.clip_sentence_pairs[k][0]\n if not clip_name in clip_set: \n clip_set.add(clip_name)\n feat_path = self.image_dir+self.clip_sentence_pairs[k][0]+\".npy\"\n featmap = np.load(feat_path)\n image_batch[index,:] = featmap\n sentence_batch[index,:] = self.clip_sentence_pairs[k][1][:self.sent_vec_dim]\n\n index+=1\n else:\n r = random.choice(range(self.num_samples))\n random_batch_index[index] = r\n continue \n \n return image_batch, sentence_batch, offset_batch\n\n '''\n read next batch of training data, this function is used for training CTRL-reg\n '''\n\n\n def next_batch_iou(self):\n\n random_batch_index = random.sample(range(self.num_samples_iou), self.batch_size)\n image_batch = np.zeros([self.batch_size, self.visual_feature_dim])\n if self.useLSTM:\n # input is word index\n sentence_len_batch= np.zeros(self.batch_size, dtype=np.int32)\n sentence_batch = np.zeros([self.batch_size, self.max_words_q],dtype=np.int32)\n offset_batch = np.zeros([self.batch_size, 2], dtype=np.float32)\n index = 0\n clip_set = set()\n while index < self.batch_size:\n k = random_batch_index[index]\n clip_name = self.clip_sentence_pairs_iou[k][0]\n if not clip_name in clip_set:\n clip_set.add(clip_name)\n feat_path = self.sliding_clip_path + self.clip_sentence_pairs_iou[k][2]\n featmap = np.load(feat_path)\n # read context features\n # left_context_feat, right_context_feat = self.get_context_window(self.clip_sentence_pairs_iou[k][2], self.context_num)\n image_batch[index, :] = featmap # .hstack((left_context_feat, featmap, right_context_feat))\n sent_idx_vector = [self.word2idx[_i] for _i in self.clip_sentence_pairs_iou[k][1].split()[:self.max_words_q]]\n sentence_len_batch[index] = len(sent_idx_vector)\n # padding with 0 to max length(15)\n sent_idx_vector += [0]*(self.max_words_q - len(sent_idx_vector))\n sentence_batch[index, :] = np.asarray(sent_idx_vector)\n\n p_offset = self.clip_sentence_pairs_iou[k][3]\n l_offset = self.clip_sentence_pairs_iou[k][4]\n offset_batch[index, 0] = p_offset\n offset_batch[index, 1] = l_offset\n index += 1\n else:\n r = random.choice(range(self.num_samples_iou))\n random_batch_index[index] = r\n continue\n\n return image_batch, sentence_batch, offset_batch, sentence_len_batch\n else:\n # input is the sentence vector from skip-thought\n sentence_batch = np.zeros([self.batch_size, self.sent_vec_dim])\n offset_batch = np.zeros([self.batch_size, 2], dtype=np.float32)\n index = 0\n clip_set = set()\n while index < self.batch_size:\n k = random_batch_index[index]\n clip_name = self.clip_sentence_pairs_iou[k][0]\n if not clip_name in clip_set:\n clip_set.add(clip_name)\n feat_path = self.sliding_clip_path+self.clip_sentence_pairs_iou[k][2]\n featmap = np.load(feat_path)\n # read context features\n # left_context_feat, right_context_feat = self.get_context_window(self.clip_sentence_pairs_iou[k][2], self.context_num)\n image_batch[index,:] = featmap # .hstack((left_context_feat, featmap, right_context_feat))\n sentence_batch[index,:] = self.clip_sentence_pairs_iou[k][1][:self.sent_vec_dim]\n p_offset = self.clip_sentence_pairs_iou[k][3]\n l_offset = self.clip_sentence_pairs_iou[k][4]\n offset_batch[index,0] = p_offset\n offset_batch[index,1] = l_offset\n index+=1\n else:\n r = random.choice(range(self.num_samples_iou))\n random_batch_index[index] = r\n continue\n\n\n return image_batch, sentence_batch, offset_batch, -1\n\n\nclass TestingDataSet(object):\n def __init__(self, img_dir, csv_path, batch_size, word2idx, useLSTM=True):\n #il_path: image_label_file path\n #self.index_in_epoch = 0\n #self.epochs_completed = 0\n self.batch_size = batch_size\n self.image_dir = img_dir\n print(\"Reading testing data list from \"+csv_path)\n self.semantic_size = 4800\n csv = pickle.load(open(csv_path))\n self.clip_sentence_pairs = []\n for l in csv:\n clip_name = l[0]\n sent_vecs = l[1]\n for sent_vec in sent_vecs:\n self.clip_sentence_pairs.append((clip_name, sent_vec))\n print(str(len(self.clip_sentence_pairs))+\" pairs are readed\")\n movie_names_set = set()\n self.movie_clip_names = {}\n for k in range(len(self.clip_sentence_pairs)):\n clip_name = self.clip_sentence_pairs[k][0]\n movie_name = clip_name.split(\"_\")[0]\n if not movie_name in movie_names_set:\n movie_names_set.add(movie_name)\n self.movie_clip_names[movie_name] = []\n self.movie_clip_names[movie_name].append(k)\n self.movie_names = list(movie_names_set)\n \n self.clip_num_per_movie_max = 0\n for movie_name in self.movie_clip_names:\n if len(self.movie_clip_names[movie_name])>self.clip_num_per_movie_max: self.clip_num_per_movie_max = len(self.movie_clip_names[movie_name])\n print(\"Max number of clips in a movie is \"+str(self.clip_num_per_movie_max))\n \n self.sliding_clip_path = img_dir\n sliding_clips_tmp = os.listdir(self.sliding_clip_path)\n self.sliding_clip_names = []\n for clip_name in sliding_clips_tmp:\n if clip_name.split(\".\")[2]==\"npy\":\n movie_name = clip_name.split(\"_\")[0]\n if movie_name in self.movie_clip_names:\n self.sliding_clip_names.append(clip_name.split(\".\")[0]+\".\"+clip_name.split(\".\")[1])\n self.num_samples = len(self.clip_sentence_pairs)\n print(\"sliding clips number: \"+str(len(self.sliding_clip_names)))\n assert self.batch_size <= self.num_samples\n \n\n def get_clip_sample(self, sample_num, movie_name, clip_name):\n length=len(os.listdir(self.image_dir+movie_name+\"/\"+clip_name))\n sample_step=1.0*length/sample_num\n sample_pos=np.floor(sample_step*np.array(range(sample_num)))\n sample_pos_str=[]\n img_names=os.listdir(self.image_dir+movie_name+\"/\"+clip_name)\n # sort is very important! to get a correct sequence order\n img_names.sort()\n # print img_names\n for pos in sample_pos:\n sample_pos_str.append(self.image_dir+movie_name+\"/\"+clip_name+\"/\"+img_names[int(pos)])\n return sample_pos_str\n \n def get_context_window(self, clip_name, win_length):\n movie_name = clip_name.split(\"_\")[0]\n start = int(clip_name.split(\"_\")[1])\n end = int(clip_name.split(\"_\")[2].split(\".\")[0])\n clip_length = 128#end-start\n left_context_feats = np.zeros([win_length,4096], dtype=np.float32)\n right_context_feats = np.zeros([win_length,4096], dtype=np.float32)\n last_left_feat = np.load(self.sliding_clip_path+clip_name)\n last_right_feat = np.load(self.sliding_clip_path+clip_name)\n for k in range(win_length):\n left_context_start = start-clip_length*(k+1)\n left_context_end = start-clip_length*k\n right_context_start = end+clip_length*k\n right_context_end = end+clip_length*(k+1)\n left_context_name = movie_name+\"_\"+str(left_context_start)+\"_\"+str(left_context_end)+\".npy\"\n right_context_name = movie_name+\"_\"+str(right_context_start)+\"_\"+str(right_context_end)+\".npy\"\n if os.path.exists(self.sliding_clip_path+left_context_name):\n left_context_feat = np.load(self.sliding_clip_path+left_context_name)\n last_left_feat = left_context_feat\n else:\n left_context_feat = last_left_feat\n if os.path.exists(self.sliding_clip_path+right_context_name):\n right_context_feat = np.load(self.sliding_clip_path+right_context_name)\n last_right_feat = right_context_feat\n else:\n right_context_feat = last_right_feat\n left_context_feats[k] = left_context_feat\n right_context_feats[k] = right_context_feat\n\n return np.mean(left_context_feats, axis=0), np.mean(right_context_feats, axis=0)\n\n\n def load_movie(self, movie_name):\n movie_clip_sentences=[]\n for k in range(len(self.clip_names)):\n if movie_name in self.clip_names[k]:\n movie_clip_sentences.append((self.clip_names[k], self.sent_vecs[k][:2400], self.sentences[k]))\n\n movie_clip_imgs=[]\n for k in range(len(self.movie_frames[movie_name])):\n # print str(k)+\"/\"+str(len(self.movie_frames[movie_name])) \n if os.path.isfile(self.movie_frames[movie_name][k][1]) and os.path.getsize(self.movie_frames[movie_name][k][1])!=0:\n img=load_image(self.movie_frames[movie_name][k][1])\n movie_clip_imgs.append((self.movie_frames[movie_name][k][0],img))\n \n return movie_clip_imgs, movie_clip_sentences\n\n def load_movie_byclip(self,movie_name,sample_num):\n movie_clip_sentences=[]\n movie_clip_featmap=[]\n clip_set=set()\n for k in range(len(self.clip_sentence_pairs)):\n if movie_name in self.clip_sentence_pairs[k][0]:\n movie_clip_sentences.append((self.clip_sentence_pairs[k][0],self.clip_sentence_pairs[k][1][:self.semantic_size]))\n\n if not self.clip_sentence_pairs[k][0] in clip_set:\n clip_set.add(self.clip_sentence_pairs[k][0])\n # print str(k)+\"/\"+str(len(self.movie_clip_names[movie_name]))\n visual_feature_path=self.image_dir+self.clip_sentence_pairs[k][0]+\".npy\"\n feature_data=np.load(visual_feature_path)\n movie_clip_featmap.append((self.clip_sentence_pairs[k][0],feature_data))\n return movie_clip_featmap, movie_clip_sentences\n \n def load_movie_slidingclip(self, movie_name, sample_num):\n movie_clip_sentences = []\n movie_clip_featmap = []\n clip_set = set()\n for k in range(len(self.clip_sentence_pairs)):\n if movie_name in self.clip_sentence_pairs[k][0]:\n movie_clip_sentences.append((self.clip_sentence_pairs[k][0], self.clip_sentence_pairs[k][1][:self.semantic_size]))\n for k in range(len(self.sliding_clip_names)):\n if movie_name in self.sliding_clip_names[k]:\n # print str(k)+\"/\"+str(len(self.movie_clip_names[movie_name]))\n visual_feature_path = self.sliding_clip_path+self.sliding_clip_names[k]+\".npy\"\n #context_feat=self.get_context(self.sliding_clip_names[k]+\".npy\")\n # left_context_feat,right_context_feat = self.get_context_window(self.sliding_clip_names[k]+\".npy\",1)\n feature_data = np.load(visual_feature_path)\n #comb_feat=np.hstack((context_feat,feature_data))\n comb_feat = feature_data # np.hstack((left_context_feat,feature_data,right_context_feat))\n movie_clip_featmap.append((self.sliding_clip_names[k], comb_feat))\n return movie_clip_featmap, movie_clip_sentences\n\n\n" } ]
5
LIANGQINGYUAN/TextMining
https://github.com/LIANGQINGYUAN/TextMining
bba984ba461433341a10e68312196b0290134916
b93bb90dbf4a5ecca4cddf311a62a0102e67a021
2259c52dfa5573fa6e71376ed9167c16ec0a7841
refs/heads/master
2021-03-30T23:14:21.892822
2018-03-11T01:30:38
2018-03-11T01:30:38
124,713,019
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6209507584571838, "alphanum_fraction": 0.6461926698684692, "avg_line_length": 32.02777862548828, "blob_id": "91920e5848cedc155b1ab7cde9f49fd2666688d1", "content_id": "0ab3cd8cb84e4d7cdd8b89d57fa344b714136ebf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2709, "license_type": "no_license", "max_line_length": 94, "num_lines": 72, "path": "/textClassfication__svmOptimize.py", "repo_name": "LIANGQINGYUAN/TextMining", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 9 08:52:18 2018\n\n@author: liang\n\"\"\"\n#训练\nfrom sklearn.svm import SVC\nimport numpy as np\nimport pandas as pd\n\nmodel = SVC(C=2, kernel='rbf', decision_function_shape='ovr')\ny=np.array(range(1,7))\n\n#读入数据\ndf_tfidf=pd.read_excel('df_tfidf_2.xlsx')\ndf_test=pd.read_excel('df_test_2.xlsx')\ndf_tfidf_extracted=pd.DataFrame()\n\n#选择提取出来的特征\nwith open(\"features.txt\", \"r\") as f:\n lines = f.readlines()\n print('最终的特征数量:%d',len(lines))\n for line in lines:\n word_extracted = line.split(' ')[0]\n #删除不在抽取出来的特征里面的无关特征\n if word_extracted in df_tfidf.columns.values:\n df_tfidf_extracted[word_extracted]=df_tfidf[word_extracted]\n f.close()\n\ndf_tfidf_extracted.to_excel('df_tfidf_extracted_2.xlsx')\n\n#训练\nmodel.fit(df_tfidf_extracted[:6], y)#前6行进行训练\npredicted=model.predict(df_tfidf_extracted[6:])#6行以后的进行预测\nprint('训练准确率-优化后:%s'%model.score(df_tfidf_extracted[:6],y))#测试准确率\nprint('测试准确率-优化后:%s'%model.score(df_tfidf_extracted[6:],df_test['types']))#测试准确率\n\n\nX_train=df_tfidf_extracted[:6]\ny_train=y\nX_test=df_tfidf_extracted[6:]\ny_test=df_test['types']\n#贝叶斯训练\nfrom sklearn.naive_bayes import MultinomialNB # 导入多项式贝叶斯算法\n# 训练分类器:输入词袋向量和分类标签,alpha:0.001 alpha越小,迭代次数越多,精度越高 \nclf = MultinomialNB(alpha=0.01).fit(X_train, y_train) \n\n#预测分类结果 \npredicted = clf.predict(X_test) \n# 计算分类精度: \nfrom sklearn import metrics \ndef metrics_result(actual, predict): \n print ('精度:{0:.3f}'.format(metrics.precision_score(actual, predict,average='weighted')) ) \n print ('召回:{0:0.3f}'.format(metrics.recall_score(actual, predict,average='weighted')) )\n print ('f1-score:{0:.3f}'.format(metrics.f1_score(actual, predict,average='weighted')) )\n\nmetrics_result(y_test,predicted)\n\n#决策树训练\nfrom sklearn.ensemble import ExtraTreesClassifier \nextraTrees=ExtraTreesClassifier(n_estimators=57,\n criterion='entropy',\n min_impurity_decrease=93*0.00001,\n max_depth=33,\n min_samples_split=27,\n min_samples_leaf=3,max_leaf_nodes=47,\n class_weight='balanced') \nextraTrees.fit(X_train, y_train)\n\nprint('extraTrees训练准确率:%s'%(extraTrees.score(X_train,y_train)))\nprint('***********extraTrees测试准确率***********:%s'%(extraTrees.score(X_test,y_test)))" }, { "alpha_fraction": 0.6220245957374573, "alphanum_fraction": 0.6421658396720886, "avg_line_length": 27.751880645751953, "blob_id": "0cbef35e4e64ac2b20edf2ac6d7ce4a2314d9c2b", "content_id": "d2a81ad2948c9d48a43285f72e3bbc004d626e97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4351, "license_type": "no_license", "max_line_length": 109, "num_lines": 133, "path": "/textClassfication_ETs_Bayes.py", "repo_name": "LIANGQINGYUAN/TextMining", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 8 17:24:30 2018\n\n@author: liang\n\"\"\"\n# -*- coding: utf-8 -*-\n\nimport jieba\nimport pandas as pd\nimport re\n\n'''\n读取数据\n'''\ndf = pd.read_excel('原始整理数据(改).xlsx',encoding = \"gb18030\")\n\n'''\n分词\n'''\nmycut=lambda s:' '.join(jieba.cut(s))\ndocuments=df[\"自述\"].apply(mycut)\n#去停用词\nimport codecs\nwith codecs.open(\"stopwords.txt\", \"r\", encoding=\"utf-8\") as f:\n text = f.read()\nstoplists=text.splitlines()\ntexts = [[word for word in document.split()if word not in stoplists] for document in documents]\n\nlist_str_texts=[]\n\nfor i in range(len(texts)):\n s=str(texts[i])\n s=re.sub(\"[',]\",'',s)\n list_str_texts.append(s)\n\nstr_texts=pd.DataFrame(list_str_texts)\nstr_texts['types']=df['类别']\nstr_texts.to_excel('分词结果.xlsx')\n\n'''\n计算词频 TF-IDF\n'''\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nvectorizer=CountVectorizer() #该类会将文本中的词语转换为词频矩阵,矩阵元素a[i][j] 表示j词在i类文本下的词频 \ntransformer=TfidfTransformer() #该类会统计每个词语的tf-idf权值 \n\ntfidf=transformer.fit_transform(vectorizer.fit_transform(list_str_texts))\n\n\n#第一个fit_transform是计算tf-idf,第二个fit_transform是将文本转为词频矩阵 \nword=vectorizer.get_feature_names()#获取词袋模型中的所有词语 \nweight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 \n\ndf_tfidf=pd.DataFrame(weight)\ndf_tfidf.cloumns=word\ndf_tfidf.to_excel('df_tfidf.xlsx')\ndf_word=pd.DataFrame(word)\ndf_word.to_excel('df_word.xlsx')\n\ndf_data = pd.read_excel('df_tfidf.xlsx')\ndf_data['types']=df['类别']\nx=df_data.iloc[:,:-1]\ny=df_data['types']\n\n'''\n抽取重要特征\n'''\nfrom sklearn.ensemble import ExtraTreesClassifier \nmodel=ExtraTreesClassifier(n_estimators=57,\n criterion='entropy',\n min_impurity_decrease=93*0.00001,\n max_depth=33,\n min_samples_split=27,\n min_samples_leaf=3,max_leaf_nodes=47,\n class_weight='balanced') \nmodel.fit(x,y)\nimportances=model.feature_importances_\n\ndata_extracted=pd.DataFrame()\nfor i in range(len(importances)):\n if importances[i]!=0:\n data_extracted[i]=df_data[i]\n\ndata_extracted['types']=df_data['types']\n\n\n'''\n训练\n'''\n#分训练集和测试集\nfrom sklearn.model_selection import train_test_split\n\nX, y = data_extracted.iloc[:, :-1], data_extracted['types']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)#随机选择30%作为测试集,剩余作为训练集\n\n#SVM训练\nfrom sklearn.svm import SVC\nsvc = SVC()\nsvc.fit(X_train, y_train)\nprint('svc训练准确率:%s'%svc.score(X_train,y_train))#测试准确率\n\n#决策树训练\nextraTrees=ExtraTreesClassifier(n_estimators=57,\n criterion='entropy',\n min_impurity_decrease=93*0.00001,\n max_depth=33,\n min_samples_split=27,\n min_samples_leaf=3,max_leaf_nodes=47,\n class_weight='balanced') \nextraTrees.fit(X_train, y_train)\n\nprint('extraTrees训练准确率:%s'%(extraTrees.score(X_train,y_train)))\nprint('***********extraTrees测试准确率***********:%s'%(extraTrees.score(X_test,y_test)))\n\n#贝叶斯训练\nfrom sklearn.naive_bayes import MultinomialNB # 导入多项式贝叶斯算法\n# 训练分类器:输入词袋向量和分类标签,alpha:0.001 alpha越小,迭代次数越多,精度越高 \nclf = MultinomialNB(alpha=0.01).fit(X_train, y_train) \n\n#预测分类结果 \npredicted = clf.predict(X_test) \n# 计算分类精度: \nfrom sklearn import metrics \ndef metrics_result(actual, predict): \n print('贝叶斯结果:')\n print ('精度:{0:.3f}'.format(metrics.precision_score(actual, predict,average='weighted')) ) \n print ('召回:{0:0.3f}'.format(metrics.recall_score(actual, predict,average='weighted')) )\n print ('f1-score:{0:.3f}'.format(metrics.f1_score(actual, predict,average='weighted')) )\n\nmetrics_result(y_test,predicted)" }, { "alpha_fraction": 0.6079724431037903, "alphanum_fraction": 0.6303412914276123, "avg_line_length": 26.25, "blob_id": "0042cb07cea9b300ffd1a42de796f8b6c06c83cf", "content_id": "31748c622f42632ffe09eb28a44572a776f722d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3979, "license_type": "no_license", "max_line_length": 111, "num_lines": 128, "path": "/textClassfication__SVM.py", "repo_name": "LIANGQINGYUAN/TextMining", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 8 17:50:04 2018\n\n@author: liang\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\ndf=pd.read_excel('分词结果.xlsx')\n\nX, y = df.iloc[:, 1:], df.iloc[:, 0]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)#随机选择30%作为测试集,剩余作为训练集\n\n\n#对训练集进行合并,进行TF-IDF\ndf_train=pd.DataFrame()\ndf_train['types']=y_train\ndf_train['content']=X_train\n\n#对测试集进行合并\ndf_test=pd.DataFrame()\ndf_test['types']=y_test\ndf_test['content']=X_test\n\n\n#训练集类别分开\ncatagory01=df_train.loc[df_train['types']==1]['content'].tolist()\ncatagory02=df_train.loc[df_train['types']==2]['content'].tolist()\ncatagory03=df_train.loc[df_train['types']==3]['content'].tolist()\ncatagory04=df_train.loc[df_train['types']==4]['content'].tolist()\ncatagory05=df_train.loc[df_train['types']==5]['content'].tolist()\ncatagory06=df_train.loc[df_train['types']==6]['content'].tolist()\n\ncorpus=[]\ncorpus.append(catagory01)\ncorpus.append(catagory02)\ncorpus.append(catagory03)\ncorpus.append(catagory04)\ncorpus.append(catagory05)\ncorpus.append(catagory06)\n\n#将测试数据加入已分类的行后\nfor i in df_test['content']:\n corpus.append(i)\n\n#转化成【‘ ’,‘ ’,‘ ’】\nlist_str_texts=[]\nfor i in range(len(corpus)):\n s=str(corpus[i])\n list_str_texts.append(s)\n \n#TF-IDF\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nvectorizer=CountVectorizer() #该类会将文本中的词语转换为词频矩阵,矩阵元素a[i][j] 表示j词在i类文本下的词频 \ntransformer=TfidfTransformer() #该类会统计每个词语的tf-idf权值 \n\ntfidf=transformer.fit_transform(vectorizer.fit_transform(list_str_texts))\n\n\n#第一个fit_transform是计算tf-idf,第二个fit_transform是将文本转为词频矩阵 \nword=vectorizer.get_feature_names()#获取词袋模型中的所有词语 \nweight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 \n\n#将每类文档的词频写入文件\nfor index in range(0, 6):\n with open(\"tfidf_%d\" % index, \"w\") as f:\n \n for i in np.argsort(-tfidf.toarray()[index]):\n if tfidf.toarray()[index][i] > 0:\n f.write(\"%f %s\\n\" % (tfidf.toarray()[index][i], word[i]))\n f.close()\n\n\n#关键词提取\ndef feature_extraction():\n d = {}\n for index in range(0, 6):\n with open(\"tfidf_%d\" % index, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n word = line.split(' ')[1][:-1]\n tfidf = line.split(' ')[0]\n if word in d:\n d[word] = np.append(d[word], tfidf)\n else:\n d[word] = np.array(tfidf)\n f.close();\n with open(\"features.txt\", \"w\") as f: \n for word in d:\n if d[word].size >= 2 :\n index = np.argsort(d[word])\n if float(d[word][index[d[word].size-0-1]]) - float(d[word][index[d[word].size-1-1]]) > 0.00005:\n f.write(\"%s %s\\n\" % (word, d[word]))\n f.close()\n return d\n \nd=feature_extraction() \n\nwith open(\"features.txt\", \"r\") as f:\n lines = f.readlines()\n print('最终的特征数量:%d',len(lines))\n f.close()\n\n\n#训练\nfrom sklearn.svm import SVC\n\n#根据提取的关键词删除掉无用的关键词\nmodel = SVC()\ny=np.array(range(1,7))\nmodel.fit(tfidf[:6], y)#前6行进行训练\npredicted=model.predict(tfidf[6:])#6行以后的进行预测\nprint('测试准确率:%s'%model.score(tfidf[6:],df_test['types']))#测试准确率\n\n'''\nsvm优化-抽取特征\n'''\ndf_tfidf=pd.DataFrame(weight)\ndf_tfidf.cloumns=word\ndf_tfidf.to_excel('df_tfidf_2.xlsx')\ndf_word=pd.DataFrame(word)\ndf_word.to_excel('df_word_2.xlsx')\ndf_test.to_excel('df_test_2.xlsx')" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 20, "blob_id": "1371c8991e3a0e8fb0047708098fb6a7b9103291", "content_id": "f407de64cdfbdc4faa60ea071e0c0400a38650d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 42, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/README.md", "repo_name": "LIANGQINGYUAN/TextMining", "src_encoding": "UTF-8", "text": "# TextMining\nNotes of TextMining learning\n" } ]
4
gazeti/aleph
https://github.com/gazeti/aleph
0305db3fac56fd914e2e09972d479b859ab403ec
f6714c4be038471cfdc6408bfe88dc9e2ed28452
34ff697bd7ef10e5586875c0f7eadaaaa46c7350
refs/heads/master
2021-06-27T11:10:40.798378
2017-12-26T19:27:40
2017-12-26T19:27:40
113,964,932
0
0
MIT
2017-12-12T08:39:08
2019-03-31T10:37:39
2019-07-04T04:00:20
Python
[ { "alpha_fraction": 0.6603139042854309, "alphanum_fraction": 0.6603139042854309, "avg_line_length": 34.68000030517578, "blob_id": "6101edb5618b7be62beebe6941968bd4ec6fc86a", "content_id": "3731378486a92a5c9e41a3f3ae28bc4985d8295d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 892, "license_type": "permissive", "max_line_length": 75, "num_lines": 25, "path": "/aleph/logic/permissions.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import logging\nfrom flask import render_template\n\nfrom aleph.core import db, app_url, app_title\nfrom aleph.notify import notify_role\nfrom aleph.model import Permission\n\nlog = logging.getLogger(__name__)\n\n\ndef update_permission(role, collection, read, write):\n \"\"\"Update a roles permission to access a given collection.\"\"\"\n pre = Permission.by_collection_role(collection.id, role)\n post = Permission.grant_collection(collection.id, role, read, write)\n db.session.commit()\n\n try:\n url = '%scollections/%s' % (app_url, collection.id)\n html = render_template('email/permission.html', role=role, url=url,\n collection=collection, pre=pre, post=post,\n app_url=app_url, app_title=app_title)\n notify_role(role, collection.label, html)\n except Exception as ex:\n log.exception(ex)\n return post\n" }, { "alpha_fraction": 0.6926187872886658, "alphanum_fraction": 0.6966633200645447, "avg_line_length": 31.96666717529297, "blob_id": "1b51e6344a72c8e00a91f049d7afb005858494a3", "content_id": "c5b9fada80d87fd49de25aa75e0d4feaca8190a9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 989, "license_type": "permissive", "max_line_length": 75, "num_lines": 30, "path": "/aleph/tests/test_models.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from aleph.core import db\nfrom aleph.model import Collection, Entity\nfrom aleph.logic import delete_pending\nfrom aleph.tests.factories.models import EntityFactory, CollectionFactory\nfrom aleph.tests.util import TestCase\n\n\nclass EntityModelTest(TestCase):\n\n def setUp(self):\n super(EntityModelTest, self).setUp()\n\n self.pending_col = CollectionFactory.create()\n self.col = CollectionFactory.create()\n db.session.flush()\n\n self.pending_ent = EntityFactory.create(state=Entity.STATE_PENDING)\n self.pending_ent.collections = [self.pending_col]\n self.ent = EntityFactory.create(state=Entity.STATE_ACTIVE)\n self.ent = [self.col]\n db.session.flush()\n\n def test_delete_pending_entities(self):\n self.assertEqual(Entity.query.count(), 2)\n self.assertEqual(Collection.query.count(), 2)\n\n delete_pending()\n\n self.assertEqual(Entity.query.count(), 1)\n self.assertEqual(Collection.query.count(), 2)\n" }, { "alpha_fraction": 0.7232142686843872, "alphanum_fraction": 0.7232142686843872, "avg_line_length": 21.399999618530273, "blob_id": "252411f64bf9c1917dc90b7f0c71f226ec228d89", "content_id": "9c0dd9828f9c3807664d04e46dff32212e1e256b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 112, "license_type": "permissive", "max_line_length": 50, "num_lines": 5, "path": "/aleph/static/js/loaders/loadDatasets.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "var loadDatasets = ['Dataset', function(Dataset) {\n return Dataset.search();\n}];\n\nexport default loadDatasets;\n" }, { "alpha_fraction": 0.5909376740455627, "alphanum_fraction": 0.5972309708595276, "avg_line_length": 31.428571701049805, "blob_id": "af51bb5c458dfcc5525d36ba324b60c27eeba570", "content_id": "294a6c646a14f85177d0933cd9c1ca0f6c44a12f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1589, "license_type": "permissive", "max_line_length": 73, "num_lines": 49, "path": "/aleph/analyze/polyglot_entity.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import\n\nimport logging\nfrom polyglot.text import Text\n\nfrom aleph.analyze.analyzer import Analyzer\nfrom aleph.model import DocumentTag, DocumentTagCollector\n\nlog = logging.getLogger(__name__)\n\n\nclass PolyglotEntityAnalyzer(Analyzer):\n ORIGIN = 'polyglot'\n MIN_LENGTH = 100\n TYPES = {\n 'I-PER': DocumentTag.TYPE_PERSON,\n 'I-ORG': DocumentTag.TYPE_ORGANIZATION,\n 'I-LOC': DocumentTag.TYPE_LOCATION\n }\n\n def prepare(self):\n self.disabled = self.document.type != self.document.TYPE_TEXT\n self.collector = DocumentTagCollector(self.document, self.ORIGIN)\n\n def on_text(self, text):\n if text is None or len(text) <= self.MIN_LENGTH:\n return\n try:\n hint_language_code = None\n if len(self.document.languages) == 1:\n hint_language_code = self.document.languages[0]\n text = Text(text, hint_language_code=hint_language_code)\n for entity in text.entities:\n if entity.tag == 'I-LOC' or len(entity) == 1:\n continue\n\n label = ' '.join(entity)\n if len(label) < 4 or len(label) > 200:\n continue\n self.collector.emit(label, self.TYPES.get(entity.tag))\n\n except ValueError as ve:\n log.info('NER value error: %r', ve)\n except Exception as ex:\n log.warning('NER failed: %r', ex)\n\n def finalize(self):\n log.info('Polyglot extracted %s entities.', len(self.collector))\n self.collector.save()\n" }, { "alpha_fraction": 0.5929203629493713, "alphanum_fraction": 0.5943176746368408, "avg_line_length": 35.38983154296875, "blob_id": "372e724f0bb908028f72ccbb33801a85f0f15b86", "content_id": "662dc2c6b5f2277abb26161233f1d5054d3d390a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2147, "license_type": "permissive", "max_line_length": 79, "num_lines": 59, "path": "/aleph/search/leads.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import logging\n\nfrom aleph.core import es, es_index\nfrom aleph.index import TYPE_LEAD, TYPE_ENTITY\nfrom aleph.search.util import execute_basic\nfrom aleph.search.fragments import filter_query, authz_filter, aggregate\nfrom aleph.search.entities import facet_collections\nfrom aleph.search.facet import parse_facet_result\n\nlog = logging.getLogger(__name__)\n\n\ndef leads_query(collection_id, state):\n q = {'term': {'entity_collection_id': collection_id}}\n q = authz_filter(q, state.authz, roles=True)\n aggs = {'scoped': {'global': {}, 'aggs': {}}}\n facets = list(state.facet_names)\n if 'collections' in facets:\n aggs = facet_collections(state, q, aggs)\n facets.remove('collections')\n aggs = aggregate(state, q, aggs, facets)\n\n q = {\n 'sort': [{'judgement': 'asc'}, {'score': 'desc'}, {'match_id': 'asc'}],\n 'query': filter_query(q, state.filters),\n 'aggregations': aggs,\n 'size': state.limit,\n 'from': state.offset\n }\n result, hits, output = execute_basic(TYPE_LEAD, q)\n output['facets'] = parse_facet_result(state, result)\n entities = set([])\n for doc in hits.get('hits', []):\n link = doc.get('_source')\n link['id'] = doc.get('_id')\n entities.add(link.get('entity_id'))\n entities.add(link.get('match_id'))\n output['results'].append(link)\n\n q = {'terms': {'_id': list(entities)}}\n q = {'query': q, 'size': len(entities) + 2}\n _, hits, _ = execute_basic(TYPE_ENTITY, q)\n for doc in hits.get('hits', []):\n entity = doc.get('_source')\n entity['id'] = doc.get('_id')\n for result in output['results']:\n if result.get('match_id') == entity['id']:\n result['match'] = entity\n if result.get('entity_id') == entity['id']:\n result['entity'] = entity\n return output\n\n\ndef lead_count(collection_id):\n \"\"\"Inaccurate, as it does not reflect auth.\"\"\"\n q = {'term': {'entity_collection_id': collection_id}}\n q = {'size': 0, 'query': q}\n result = es.search(index=es_index, doc_type=TYPE_LEAD, body=q)\n return result.get('hits', {}).get('total', 0)\n" }, { "alpha_fraction": 0.7345767617225647, "alphanum_fraction": 0.7360115051269531, "avg_line_length": 28.04166603088379, "blob_id": "471405124d42bd5bf5d1989d79c12cbc0e0b3021", "content_id": "61618be53cbac8398c0cdb66326e42783ce2d64e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 697, "license_type": "permissive", "max_line_length": 62, "num_lines": 24, "path": "/aleph/views/search_api.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from flask import Blueprint, request\nfrom apikit import jsonify\n\nfrom aleph.core import url_for\nfrom aleph.views.cache import enable_cache\nfrom aleph.events import log_event\nfrom aleph.search import QueryState\nfrom aleph.search import documents_query\nfrom aleph.search.util import next_params\n\n\nblueprint = Blueprint('search_api', __name__)\n\n\[email protected]('/api/1/query')\ndef query():\n enable_cache(vary_user=True)\n state = QueryState(request.args, request.authz)\n result = documents_query(state)\n params = next_params(request.args, result)\n log_event(request)\n if params is not None:\n result['next'] = url_for('search_api.query', **params)\n return jsonify(result)\n" }, { "alpha_fraction": 0.706677258014679, "alphanum_fraction": 0.708267092704773, "avg_line_length": 33, "blob_id": "684b51e9beb1d39622702a19752b9b118d80f6e9", "content_id": "842dccb30ab3e9580743aff9910903283bf6ff63", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1258, "license_type": "permissive", "max_line_length": 68, "num_lines": 37, "path": "/aleph/model/validate.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import os\nimport json\nfrom dalet import is_country_code, is_partial_date, is_language_code\nfrom dalet import is_domain, is_url\nfrom jsonschema import Draft4Validator, FormatChecker, RefResolver\n\nfrom aleph.core import get_config\n\nresolver = RefResolver('core.json#', {})\n\nSCHEMA_DIR = os.path.join(os.path.dirname(__file__), 'validation')\n\nfor (root, dirs, files) in os.walk(SCHEMA_DIR):\n for schema_file in files:\n with open(os.path.join(root, schema_file), 'r') as fh:\n schema = json.load(fh)\n resolver.store[schema['id']] = schema\n\nformat_checker = FormatChecker()\nformat_checker.checks('country-code')(is_country_code)\nformat_checker.checks('partial-date')(is_partial_date)\nformat_checker.checks('language-code')(is_language_code)\nformat_checker.checks('url')(is_url)\nformat_checker.checks('domain')(is_domain)\n\n\n@format_checker.checks('collection-category')\ndef is_collection_category(cat):\n categories = get_config('COLLECTION_CATEGORIES', {})\n return cat in categories.keys()\n\n\ndef validate(data, schema):\n _, schema = resolver.resolve(schema)\n validator = Draft4Validator(schema, resolver=resolver,\n format_checker=format_checker)\n return validator.validate(data, schema)\n" }, { "alpha_fraction": 0.6873994469642639, "alphanum_fraction": 0.6873994469642639, "avg_line_length": 28.140625, "blob_id": "3c3894a994198de1f121bcf1b5b67712a99d03d3", "content_id": "059733b02b948e0ed79a4b4bdec3d82eb8219686", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1865, "license_type": "permissive", "max_line_length": 104, "num_lines": 64, "path": "/aleph/static/js/loaders/loadCollections.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "var loadProjectCollections = ['Collection', function(Collection) {\n return Collection.search({\n managed: false,\n counts: true,\n facet: ['countries']\n });\n}];\n\nvar loadSourceCollections = ['Collection', function(Collection) {\n return Collection.search({\n managed: true,\n counts: true,\n facet: ['countries', 'category']\n });\n}];\n\nvar loadCollection = ['$route', 'Collection', function($route, Collection) {\n var collectionId = $route.current.params.collection_id;\n return Collection.get(collectionId);\n}];\n\nvar loadCollectionDeep = ['$route', '$q', 'Collection', 'Role', function($route, $q, Collection, Role) {\n var dfd = $q.defer();\n var collectionId = $route.current.params.collection_id;\n\n Collection.get(collectionId).then(function(collection) {\n collection.creator = {};\n if (!collection.creator_id) {\n dfd.resolve(collection);\n } else {\n Role.get(collection.creator_id).then(function(role) {\n collection.creator = role;\n dfd.resolve(collection);\n }, function(err) {\n dfd.reject(err);\n });\n }\n }, function(err) {\n dfd.reject(err);\n });\n return dfd.promise;\n}];\n\nvar loadCollectionDocuments = ['$route', 'Document', function($route, Document) {\n var collectionId = $route.current.params.collection_id;\n return Document.search(collectionId);\n}];\n\nvar loadCollectionEntities = ['$route', 'Entity', function($route, Entity) {\n var collectionId = $route.current.params.collection_id;\n return Entity.searchCollection(collectionId);\n}];\n\nvar loadCollectionLeads = ['$route', 'Lead', function($route, Lead) {\n var collectionId = $route.current.params.collection_id;\n return Lead.search(collectionId);\n}];\n\n\nexport {\n loadProjectCollections, loadSourceCollections, loadCollection,\n loadCollectionDocuments, loadCollectionEntities, loadCollectionLeads,\n loadCollectionDeep\n};\n" }, { "alpha_fraction": 0.6170212626457214, "alphanum_fraction": 0.6202449798583984, "avg_line_length": 17.247058868408203, "blob_id": "40c0d53753347fb3c8185d1f919a5ea20d24eff8", "content_id": "4172c0b71109df26a90c8303ba71f4ee0df04749", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1551, "license_type": "permissive", "max_line_length": 61, "num_lines": 85, "path": "/aleph/static/js/filters.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import moment from 'moment';\nimport aleph from './aleph';\n\naleph.filter('date', function() {\n return function(val) {\n return moment(val).format('YYYY-MM-DD');\n };\n});\n\naleph.filter('percentage', function() {\n return function(val) {\n return parseInt(val * 100, 10);\n };\n});\n\n\naleph.filter('relativeDate', function() {\n return function(val) {\n return moment(val).fromNow();\n };\n});\n\n\naleph.filter('calendar', function() {\n return function(val) {\n return moment(val).calendar();\n };\n});\n\naleph.filter('urlencode', function() {\n return window.encodeURIComponent;\n});\n\n\naleph.filter('host', function() {\n return function(val) {\n var l = document.createElement(\"a\");\n l.href = val;\n return l.hostname;\n };\n});\n\n\naleph.filter('country', ['Metadata', function(Metadata) {\n var countries = {};\n\n Metadata.get().then(function(md) {\n countries = md.countries;\n });\n\n return function(code) {\n var label = countries[code];\n return label || code;\n };\n}]);\n\n\naleph.filter('language', ['Metadata', function(Metadata) {\n var languages = {};\n\n Metadata.get().then(function(md) {\n languages = md.languages;\n });\n\n return function(code) {\n var label = languages[code];\n return label || code;\n };\n}]);\n\n\naleph.filter('schemaLabel', ['Metadata', function(Metadata) {\n var schemata = {};\n\n Metadata.get().then(function(md) {\n schemata = md.schemata;\n });\n\n return function(schema, plural) {\n var obj = schemata[schema];\n if (!obj) return schema;\n return plural ? obj.plural : obj.label;\n };\n\n}]);\n" }, { "alpha_fraction": 0.6599496006965637, "alphanum_fraction": 0.6931150555610657, "avg_line_length": 54.395347595214844, "blob_id": "22c67e499ef14e8e9f1b77ffed8d47720313d99d", "content_id": "b11c3673eda68f6e8a90827f9a3217ad552e00c9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 2382, "license_type": "permissive", "max_line_length": 152, "num_lines": 43, "path": "/contrib/base/Dockerfile", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "FROM debian:stretch\nMAINTAINER Friedrich Lindenberg <[email protected]>\nENV DEBIAN_FRONTEND noninteractive\n\n\n# Enable non-free archive for `unrar`.\nRUN echo \"deb http://http.us.debian.org/debian stretch non-free\" >/etc/apt/sources.list.d/nonfree.list\n# Enable Node.js archive\nRUN echo 'deb http://deb.nodesource.com/node_9.x stretch main' > /etc/apt/sources.list.d/nodesource.list\nRUN apt-get update -y -q && apt-get install -y -q gnupg\nRUN apt-key adv --keyserver keyserver.ubuntu.com --recv 68576280\n\nRUN apt-get update -y -q && apt-get -y dist-upgrade && apt-get install -y -q \\\n curl git python-pip python-virtualenv build-essential python-dev \\\n libxml2-dev libxslt1-dev libpq-dev apt-utils ca-certificates less \\\n postgresql-client unrar unrar-free unzip locales libreoffice \\\n libtiff5-dev libjpeg-dev zlib1g-dev libfreetype6-dev \\\n liblcms2-dev poppler-utils poppler-data unrtf pstotext python-numpy \\\n default-jdk libwebp-dev tcl8.6-dev tk8.6-dev python-tk python-pil \\\n imagemagick-common imagemagick unoconv mdbtools \\\n cabextract p7zip-full libboost-python-dev libgsf-1-dev \\\n libtesseract-dev vim libicu-dev libldap2-dev libsasl2-dev \\\n tesseract-ocr tesseract-ocr-all \\\n nodejs djvulibre-bin \\\n && apt-get -y autoremove && apt-get clean\n\nRUN curl -s /tmp/pst.tgz http://www.five-ten-sg.com/libpst/packages/libpst-0.6.69.tar.gz > /tmp/pst.tgz \\\n && cd /tmp && tar xvfz pst.tgz && cd libpst-0.6.69 && ln -s /usr/bin/python /usr/bin/python2.7.10 \\\n && ./configure && make && make install\n\nRUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \\\n sed -i -e 's/# en_GB.UTF-8 UTF-8/en_GB.UTF-8 UTF-8/' /etc/locale.gen && \\\n sed -i -e 's/# en_GB.ISO-8859-15 ISO-8859-15/en_GB.ISO-8859-15 ISO-8859-15/' /etc/locale.gen && \\\n locale-gen\nENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8'\n\n# WebKit HTML to X install since the one that comes with distros is hellishly outdated.\nRUN curl -s -L https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.4/wkhtmltox-0.12.4_linux-generic-amd64.tar.xz > /tmp/wkhtmltox.tar.xv \\\n && tar -xf /tmp/wkhtmltox.tar.xv -C /opt && rm -f /tmp/wkhtmltox.tar.xv\nENV WKHTMLTOPDF_BIN /opt/wkhtmltox/bin/wkhtmltopdf\nENV TESSDATA_PREFIX /usr/share/tesseract-ocr\n\nRUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*\n" }, { "alpha_fraction": 0.6494464874267578, "alphanum_fraction": 0.6559040546417236, "avg_line_length": 36.379310607910156, "blob_id": "2cd88572a1080bfe782fc9c36082b703f695c533", "content_id": "663b2ed15cc7f9a3110ba7ee145f24df489bacac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1084, "license_type": "permissive", "max_line_length": 70, "num_lines": 29, "path": "/aleph/views/crawlers_api.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from flask import Blueprint, request\nfrom apikit import request_data, jsonify, Pager\n\nfrom aleph.crawlers import get_exposed_crawlers, execute_crawler\n\nblueprint = Blueprint('crawlers_api', __name__)\n\n\[email protected]('/api/1/crawlers', methods=['GET'])\ndef index():\n request.authz.require(request.authz.is_admin)\n crawlers = list(sorted(get_exposed_crawlers(),\n key=lambda c: c.CRAWLER_NAME))\n return jsonify(Pager(crawlers, limit=20))\n\n\[email protected]('/api/1/crawlers', methods=['POST', 'PUT'])\ndef queue():\n request.authz.require(request.authz.session_write())\n request.authz.require(request.authz.is_admin)\n data = request_data()\n crawler_id = data.get('crawler_id')\n for cls in get_exposed_crawlers():\n if crawler_id == cls.get_id():\n incremental = bool(data.get('incremental', False))\n execute_crawler.delay(crawler_id, incremental=incremental)\n return jsonify({'status': 'queued'})\n return jsonify({'status': 'error', 'message': 'No such crawler'},\n status=400)\n" }, { "alpha_fraction": 0.6753029823303223, "alphanum_fraction": 0.6794818043708801, "avg_line_length": 31.78082275390625, "blob_id": "89eb6f7a171fe360a877222ca4c577bbb21e1179", "content_id": "0d70cbfc66091590d918d250f886ce84f0bfed11", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4786, "license_type": "permissive", "max_line_length": 98, "num_lines": 146, "path": "/aleph/views/entities_api.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from flask import Blueprint, request\nfrom werkzeug.exceptions import BadRequest\nfrom apikit import obj_or_404, jsonify, request_data, arg_bool\n\nfrom aleph.core import db, schemata\nfrom aleph.model import Entity, Collection\nfrom aleph.logic import update_entity, delete_entity, combined_entity\nfrom aleph.events import log_event\nfrom aleph.search import QueryState\nfrom aleph.search import entities_query, links_query, entity_documents\nfrom aleph.search import suggest_entities, similar_entities\nfrom aleph.views.util import get_entity\nfrom aleph.views.cache import enable_cache\n\nblueprint = Blueprint('entities_api', __name__)\n\n\[email protected]('/api/1/entities', methods=['GET'])\ndef index():\n enable_cache(vary_user=True)\n state = QueryState(request.args, request.authz)\n doc_counts = state.getbool('doc_counts')\n res = entities_query(state, doc_counts=doc_counts)\n return jsonify(res)\n\n\[email protected]('/api/1/entities/_all', methods=['GET'])\ndef all():\n collection_id = request.args.getlist('collection_id')\n collection_id = request.authz.collections_intersect(request.authz.READ, collection_id) # noqa\n q = Entity.all_ids()\n q = q.filter(Entity.state == Entity.STATE_ACTIVE)\n q = q.filter(Entity.deleted_at == None) # noqa\n q = q.filter(Entity.collection_id.in_(collection_id))\n return jsonify({'results': [r[0] for r in q]})\n\n\[email protected]('/api/1/entities/_suggest', methods=['GET'])\ndef suggest():\n enable_cache(vary_user=True, server_side=False)\n prefix = request.args.get('prefix')\n min_count = int(request.args.get('min_count', 0))\n return jsonify(suggest_entities(prefix, request.authz, min_count))\n\n\[email protected]('/api/1/entities', methods=['POST', 'PUT'])\ndef create():\n data = request_data()\n collection_id = data.get('collection_id')\n try:\n collection_id = int(collection_id)\n except (ValueError, TypeError) as ve:\n raise BadRequest(\"Invalid collection_id\")\n collection = obj_or_404(Collection.by_id(collection_id))\n request.authz.require(request.authz.collection_write(collection.id))\n\n try:\n entity = Entity.save(data, collection)\n except (ValueError, TypeError) as ve:\n raise BadRequest(ve.message)\n\n entity.collection.touch()\n db.session.commit()\n log_event(request, entity_id=entity.id)\n update_entity(entity)\n return view(entity.id)\n\n\[email protected]('/api/1/entities/<id>', methods=['GET'])\ndef view(id):\n entity, obj = get_entity(id, request.authz.READ)\n log_event(request, entity_id=id)\n return jsonify(entity)\n\n\[email protected]('/api/1/entities/<id>/links', methods=['GET'])\ndef links(id):\n entity, obj = get_entity(id, request.authz.READ)\n state = QueryState(request.args, request.authz)\n return jsonify(links_query(entity, state))\n\n\[email protected]('/api/1/entities/<id>/similar', methods=['GET'])\ndef similar(id):\n entity, _ = get_entity(id, request.authz.READ)\n schema = schemata.get(entity.get('schema'))\n if not schema.fuzzy:\n return jsonify({\n 'status': 'ignore',\n 'results': [],\n 'total': 0\n })\n state = QueryState(request.args, request.authz)\n combined = combined_entity(entity)\n return jsonify(similar_entities(combined, state))\n\n\[email protected]('/api/1/entities/<id>/documents', methods=['GET'])\ndef documents(id):\n entity, _ = get_entity(id, request.authz.READ)\n state = QueryState(request.args, request.authz)\n combined = combined_entity(entity)\n return jsonify(entity_documents(combined, state))\n\n\[email protected]('/api/1/entities/<id>', methods=['POST', 'PUT'])\ndef update(id):\n _, entity = get_entity(id, request.authz.WRITE)\n\n try:\n entity = Entity.save(request_data(), entity.collection,\n merge=arg_bool('merge'))\n except (ValueError, TypeError) as ve:\n raise BadRequest(ve.message)\n\n entity.collection.touch()\n db.session.commit()\n log_event(request, entity_id=entity.id)\n update_entity(entity)\n return view(entity.id)\n\n\[email protected]('/api/1/entities/<id>/merge/<other_id>', methods=['DELETE'])\ndef merge(id, other_id):\n _, entity = get_entity(id, request.authz.WRITE)\n _, other = get_entity(other_id, request.authz.WRITE)\n\n try:\n entity.merge(other)\n except ValueError as ve:\n raise BadRequest(ve.message)\n\n db.session.commit()\n log_event(request, entity_id=entity.id)\n update_entity(entity)\n update_entity(other)\n return view(entity.id)\n\n\[email protected]('/api/1/entities/<id>', methods=['DELETE'])\ndef delete(id):\n _, entity = get_entity(id, request.authz.WRITE)\n delete_entity(entity)\n db.session.commit()\n log_event(request, entity_id=entity.id)\n return jsonify({'status': 'ok'})\n" }, { "alpha_fraction": 0.5865632891654968, "alphanum_fraction": 0.5922480821609497, "avg_line_length": 31.79660987854004, "blob_id": "92a1020ec970e66cb9a228a517a9c729a221403f", "content_id": "a46b08c43a3a8d297412d785924b51a067e09418", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1935, "license_type": "permissive", "max_line_length": 96, "num_lines": 59, "path": "/aleph/model/entity_identity.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import logging\n\nfrom aleph.core import db\nfrom aleph.model.common import IdModel, DatedModel\n\nlog = logging.getLogger(__name__)\n\n\nclass EntityIdentity(db.Model, IdModel, DatedModel):\n CONFIRMED = 1\n REJECTED = 2\n UNDECIDED = 3\n\n JUDGEMENTS = [1, 2, 3]\n\n entity_id = db.Column(db.String(32), db.ForeignKey('entity.id'), index=True) # noqa\n entity = db.relationship('Entity', backref=db.backref('identities', lazy='dynamic')) # noqa\n match_id = db.Column(db.String(254), index=True, nullable=False)\n judgement = db.Column(db.Integer(), nullable=False)\n judge_id = db.Column(db.Integer, db.ForeignKey('role.id'), nullable=True)\n\n @classmethod\n def judgements_by_entity(cls, entity_id):\n q = db.session.query(cls.match_id, cls.judgement)\n q = q.filter(cls.entity_id == entity_id)\n return {k: v for k, v in q.all()}\n\n @classmethod\n def entity_ids(cls, entity_id):\n q = db.session.query(cls.match_id)\n q = q.filter(cls.entity_id == entity_id)\n q = q.filter(cls.judgement == cls.CONFIRMED)\n ids = [entity_id]\n for mapped_id, in q.all():\n ids.append(mapped_id)\n return ids\n\n @classmethod\n def by_entity_match(cls, entity_id, match_id):\n q = db.session.query(cls)\n q = q.filter(cls.entity_id == entity_id)\n q = q.filter(cls.match_id == match_id)\n return q.first()\n\n @classmethod\n def save(cls, entity_id, match_id, judgement, judge=None):\n obj = cls.by_entity_match(entity_id, match_id)\n if obj is None:\n obj = cls()\n obj.entity_id = entity_id\n obj.match_id = match_id\n obj.judgement = judgement\n obj.judge = judge\n db.session.add(obj)\n return obj\n\n def __repr__(self):\n return 'EntityIdentity(%r, %r, %r)' % (self.entity_id, self.match_id,\n self.judgement)\n" }, { "alpha_fraction": 0.5905873775482178, "alphanum_fraction": 0.5918848514556885, "avg_line_length": 33.46341323852539, "blob_id": "8d3db8b6b8d8b829c0f292c6210b4f3fe6bb0c06", "content_id": "19864b0ccdeb7f35a95472f50dcc42a6c97138a6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8478, "license_type": "permissive", "max_line_length": 100, "num_lines": 246, "path": "/aleph/model/entity.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import logging\nfrom datetime import datetime\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.dialects.postgresql import JSONB, ARRAY\n\nfrom aleph.core import db, schemata\nfrom aleph.text import match_form, string_value\nfrom aleph.util import ensure_list\nfrom aleph.model.collection import Collection\nfrom aleph.model.reference import Reference\nfrom aleph.model.entity_identity import EntityIdentity\nfrom aleph.model.common import SoftDeleteModel, UuidModel\nfrom aleph.model.common import make_textid, merge_data\n\nlog = logging.getLogger(__name__)\n\n\nclass Entity(db.Model, UuidModel, SoftDeleteModel):\n STATE_ACTIVE = 'active'\n STATE_PENDING = 'pending'\n STATE_DELETED = 'deleted'\n\n name = db.Column(db.Unicode)\n type = db.Column(db.String(255), index=True)\n state = db.Column(db.String(128), nullable=True, default=STATE_ACTIVE, index=True) # noqa\n foreign_ids = db.Column(ARRAY(db.Unicode()))\n data = db.Column('data', JSONB)\n\n collection_id = db.Column(db.Integer, db.ForeignKey('collection.id'), index=True) # noqa\n collection = db.relationship(Collection, backref=db.backref('entities', lazy='dynamic')) # noqa\n\n def delete_references(self, origin=None):\n pq = db.session.query(Reference)\n pq = pq.filter(Reference.entity_id == self.id)\n if origin is not None:\n pq = pq.filter(Reference.origin == origin)\n pq.delete(synchronize_session='fetch')\n db.session.refresh(self)\n\n def delete_identities(self):\n pq = db.session.query(EntityIdentity)\n pq = pq.filter(EntityIdentity.entity_id == self.id)\n pq.delete(synchronize_session='fetch')\n db.session.refresh(self)\n\n def delete(self, deleted_at=None):\n self.delete_references()\n self.delete_identities()\n deleted_at = deleted_at or datetime.utcnow()\n for alert in self.alerts:\n alert.delete(deleted_at=deleted_at)\n self.state = self.STATE_DELETED\n super(Entity, self).delete(deleted_at=deleted_at)\n\n @classmethod\n def delete_dangling(cls, collection_id):\n \"\"\"Delete dangling entities.\n\n Entities can dangle in pending state while they have no references\n pointing to them, thus making it impossible to enable them. This is\n a routine cleanup function.\n \"\"\"\n q = db.session.query(cls)\n q = q.filter(cls.collection_id == collection_id)\n q = q.filter(cls.state == cls.STATE_PENDING)\n q = q.outerjoin(Reference)\n q = q.group_by(cls)\n q = q.having(func.count(Reference.id) == 0)\n for entity in q.all():\n entity.delete()\n\n def merge(self, other):\n if self.id == other.id:\n raise ValueError(\"Cannot merge an entity with itself.\")\n if self.collection_id != other.collection_id:\n raise ValueError(\"Cannot merge entities from different collections.\") # noqa\n\n data = merge_data(self.data, other.data)\n if self.name.lower() != other.name.lower():\n data = merge_data(data, {'alias': [other.name]})\n\n self.data = data\n self.state = self.STATE_ACTIVE\n self.foreign_ids = self.foreign_ids or []\n self.foreign_ids += other.foreign_ids or []\n self.created_at = min((self.created_at, other.created_at))\n self.updated_at = datetime.utcnow()\n\n # update alerts\n from aleph.model.alert import Alert\n q = db.session.query(Alert).filter(Alert.entity_id == other.id)\n q.update({'entity_id': self.id})\n\n # update document references\n from aleph.model.reference import Reference\n q = db.session.query(Reference).filter(Reference.entity_id == other.id)\n q.update({'entity_id': self.id})\n\n # delete source entities\n other.delete()\n db.session.add(self)\n db.session.commit()\n db.session.refresh(other)\n\n def update(self, entity):\n data = entity.get('data') or {}\n data['name'] = entity.get('name')\n self.data = self.schema.validate(data)\n self.name = self.data.pop('name')\n fid = [string_value(f) for f in entity.get('foreign_ids') or []]\n self.foreign_ids = list(set([f for f in fid if f is not None]))\n self.state = entity.pop('state', self.STATE_ACTIVE)\n self.updated_at = datetime.utcnow()\n db.session.add(self)\n\n @classmethod\n def save(cls, data, collection, merge=False):\n ent = cls.by_id(data.get('id'))\n if ent is None:\n ent = cls()\n ent.type = data.pop('schema', None)\n if ent.type is None:\n raise ValueError(\"No schema provided.\")\n ent.id = make_textid()\n\n if merge:\n data = merge_data(data, ent.to_dict())\n\n if collection is None:\n raise ValueError(\"No collection specified.\")\n\n ent.collection = collection\n ent.update(data)\n return ent\n\n @classmethod\n def filter_collections(cls, q, collections=None):\n if collections is None:\n return q\n collection_ids = []\n for collection in collections:\n if isinstance(collection, Collection):\n collection = collection.id\n collection_ids.append(collection)\n q = q.filter(Entity.collection_id.in_(collection_ids))\n return q\n\n @classmethod\n def by_id_set(cls, ids, collections=None):\n if not len(ids):\n return {}\n q = cls.all()\n q = cls.filter_collections(q, collections=collections)\n q = q.options(joinedload('collection'))\n q = q.filter(cls.id.in_(ids))\n entities = {}\n for ent in q:\n entities[ent.id] = ent\n return entities\n\n @classmethod\n def by_foreign_id(cls, foreign_id, collection_id, deleted=False):\n foreign_id = string_value(foreign_id)\n if foreign_id is None:\n return None\n q = cls.all(deleted=deleted)\n q = q.filter(Entity.collection_id == collection_id)\n foreign_id = func.cast([foreign_id], ARRAY(db.Unicode()))\n q = q.filter(cls.foreign_ids.contains(foreign_id))\n q = q.order_by(Entity.deleted_at.desc().nullsfirst())\n return q.first()\n\n @classmethod\n def latest(cls):\n q = db.session.query(func.max(cls.updated_at))\n q = q.filter(cls.state == cls.STATE_ACTIVE)\n return q.scalar()\n\n @property\n def schema(self):\n return schemata.get(self.type)\n\n @property\n def terms(self):\n terms = set([self.name])\n for alias in ensure_list(self.data.get('alias')):\n if alias is not None and len(alias):\n terms.add(alias)\n return terms\n\n @property\n def regex_terms(self):\n # This is to find the shortest possible regex for each entity.\n # If, for example, and entity matches both \"Al Qaeda\" and\n # \"Al Qaeda in Iraq, Syria and the Levant\", it is useless to\n # search for the latter.\n terms = set([match_form(t) for t in self.terms])\n regex_terms = set()\n for term in terms:\n if term is None or len(term) < 4 or len(term) > 120:\n continue\n contained = False\n for other in terms:\n if other is None or other == term:\n continue\n if other in term:\n contained = True\n if not contained:\n regex_terms.add(term)\n return regex_terms\n\n def to_dict(self):\n data = super(Entity, self).to_dict()\n data.update({\n 'schema': self.type,\n 'name': self.name,\n 'state': self.state,\n 'data': self.data,\n 'foreign_ids': self.foreign_ids or [],\n 'collection_id': self.collection_id\n })\n return data\n\n def to_index(self):\n entity = self.to_dict()\n entity['properties'] = {'name': [self.name]}\n for k, v in self.data.items():\n v = ensure_list(v)\n if len(v):\n entity['properties'][k] = v\n return entity\n\n def to_ref(self):\n return {\n 'id': self.id,\n 'label': self.name,\n 'schema': self.type,\n 'collection_id': self.collection_id\n }\n\n def __unicode__(self):\n return self.name\n\n def __repr__(self):\n return '<Entity(%r, %r)>' % (self.id, self.name)\n" }, { "alpha_fraction": 0.5845780372619629, "alphanum_fraction": 0.5855115652084351, "avg_line_length": 31.658536911010742, "blob_id": "b7e55af933432ca7df8bb3ba2feec371c2bd11dc", "content_id": "bf7a2e4fb91c8b8a635d47806effb5e073321422", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5356, "license_type": "permissive", "max_line_length": 79, "num_lines": 164, "path": "/aleph/datasets/mapper.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import logging\nimport fingerprints\nfrom hashlib import sha1\nfrom pprint import pprint # noqa\n\nfrom aleph.core import schemata\nfrom aleph.schema import Schema\nfrom aleph.util import dict_list, unique_list\nfrom aleph.text import string_value\nfrom aleph.datasets.formatting import Formatter\nfrom aleph.datasets.util import finalize_index\n\nlog = logging.getLogger(__name__)\n\n\nclass MapperProperty(object):\n\n def __init__(self, mapper, name, data, schema):\n self.mapper = mapper\n self.name = name\n self.data = data\n self.schema = schema\n self.refs = dict_list(data, 'column', 'columns')\n self.literals = dict_list(data, 'literal', 'literals')\n self.join = data.get('join')\n\n # this is hacky, trying to generate refs from template\n self.template = data.get('template')\n if self.template is not None:\n self.formatter = Formatter(self.template)\n self.refs.extend(self.formatter.refs)\n\n def get_values(self, record):\n values = []\n if self.template is not None:\n values.append(self.formatter.apply(record))\n else:\n for ref in self.refs:\n values.append(record.get(ref))\n values.extend(self.literals)\n values = [self.schema.type.clean(v, record, self.data) for v in values]\n values = [v for v in values if v is not None]\n\n if self.join is not None:\n values = [self.join.join(values)]\n\n return unique_list(values)\n\n def __repr__(self):\n return '<MapperProperty(%r, %r, %r)>' % (self.mapper, self.name,\n self.schema)\n\n\nclass Mapper(object):\n\n def __init__(self, query, data):\n self.query = query\n self.data = data\n self.keys = dict_list(data, 'keys', 'key')\n self.key_fingerprint = data.get('key_fingerprint', False)\n\n self.schema = schemata.get(data.get('schema'))\n if self.schema is None or self.schema.section != self.section:\n raise TypeError(\"Invalid schema: %r\" % data.get('schema'))\n\n self.properties = []\n for name, prop in data.get('properties', {}).items():\n schema = self.schema.get(name)\n self.properties.append(MapperProperty(self, name, prop, schema))\n\n @property\n def refs(self):\n for key in self.keys:\n yield key\n for prop in self.properties:\n for ref in prop.refs:\n yield ref\n\n def compute_properties(self, record):\n return {p.name: p.get_values(record) for p in self.properties}\n\n def compute_key(self, record):\n digest = sha1(self.query.dataset.name.encode('utf-8'))\n has_key = False\n for key in self.keys:\n value = record.get(key)\n if self.key_fingerprint:\n value = fingerprints.generate(value)\n else:\n value = string_value(value)\n if value is None:\n continue\n digest.update(value.encode('utf-8'))\n has_key = True\n if has_key:\n return digest.hexdigest()\n\n def to_index(self, record):\n return {\n 'dataset': self.query.dataset.name,\n 'roles': self.query.dataset.roles,\n 'properties': self.compute_properties(record)\n }\n\n def __repr__(self):\n return '<Mapper(%r)>' % self.query\n\n\nclass EntityMapper(Mapper):\n section = Schema.ENTITY\n\n def __init__(self, query, name, data):\n self.name = name\n super(EntityMapper, self).__init__(query, data)\n if not len(self.keys):\n log.warning(\"No key criteria defined: %r\", data)\n\n def to_index(self, record):\n data = super(EntityMapper, self).to_index(record)\n data['id'] = self.compute_key(record)\n if data['id'] is None:\n return\n return finalize_index(data, self.schema)\n\n\nclass LinkMapper(Mapper):\n section = Schema.LINK\n\n def __init__(self, query, data):\n super(LinkMapper, self).__init__(query, data)\n\n def to_index(self, record, entities, inverted=False):\n data = super(LinkMapper, self).to_index(record)\n data['inverted'] = inverted\n\n source, target = self.data.get('source'), self.data.get('target')\n origin, remote = entities.get(source), entities.get(target)\n if inverted:\n origin, remote = remote, origin\n\n if origin is None or remote is None:\n # If data was missing for either the source or target entity\n # they will be None, and we should not create a link.\n return\n\n # We don't need to index the entity here, since it's already known\n # in the simplest case (entity profile pages).\n data['origin'] = {\n 'id': origin.get('id'),\n 'fingerprints': origin.get('fingerprints'),\n }\n # this is expanded post entity indexing.\n data['remote'] = remote.get('id')\n\n # Generate a link ID\n digest = sha1()\n digest.update(str(inverted))\n digest.update(origin['id'])\n digest.update(remote['id'])\n key_digest = self.compute_key(record)\n if key_digest is not None:\n digest.update(key_digest)\n data['id'] = digest.hexdigest()\n return finalize_index(data, self.schema)\n" }, { "alpha_fraction": 0.7565861940383911, "alphanum_fraction": 0.7716794610023499, "avg_line_length": 48.24324417114258, "blob_id": "ec36b493d1ab70aa46dea0b338e39fbe51c1e303", "content_id": "7bb9fe2f676bcf3915a65a5021d95b6cfacfde40", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3644, "license_type": "permissive", "max_line_length": 133, "num_lines": 74, "path": "/docs/resources.md", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "# External resources and links\n\nMostly external resources worth mentioning.\n\n## Articles\n\nBelow you can find a curated list of articles and blog posts about the purpose\nbehind this project.\n\n* [A little tour of aleph, a data search tool for reporters](http://pudo.org/blog/2016/06/29/aleph.html)\n* [Who's got dirt?](http://pudo.org/blog/2015/05/30/whos-got-dirt.html)\n* [Why influence mapping matters to journalism](http://pudo.org/blog/2014/12/13/influence-mapping-journalism.html)\n* [Data doesn't grow in tables: harvesting journalistic insight from documents](http://pudo.org/blog/2015/05/15/document-mining.html)\n* [Influence Mapping Group](http://www.influencemapping.org/)\n* [Tools for Influence Mapping](http://www.influencemapping.org/tools/)\n* [Technology for Investigative Journalism Conference](http://www.influencemapping.org/workshop/)\n\n## Alternatives and related projects\n\nAleph is one of many document processing and search tools targeted at\njournalists, activists etc. Many of these are similar in scope, Aleph aims\nto distinguish itself by providing entity cross-referencing and seamless\nsupport for both tabular and textual data.\n\n* [DocumentCloud](https://github.com/documentcloud), the biggest document\n hosting site for journalistic content, including OCR and\n organisation/project-level access control.\n* [DARPA MEMEX](http://opencatalog.darpa.mil/MEMEX.html#), a coordinated\n research to make domain-specific deep web search engines.\n* [Overview Project](http://overviewproject.org/), document mining tool with\n plugin architecture, both hosted & local\n* [Transparency Toolkit](https://github.com/TransparencyToolkit), LookingGlass\n is an indexing server for JSON documents with support for theming, used\n mainly for scraped social media profiles.\n* [resourcecontracts.org](https://github.com/developmentseed/rw-contracts),\n visual browser for resource (oil, mining, etc.) contract documents.\n* [ICIJ Extract](https://github.com/icij/extract), Java-based OCR and content\n extraction pipeline used for large-scale leaks.\n* [Hoover](https://github.com/hoover), Python-based search engine.\n* [mma-dexter](https://github.com/Code4SA/mma-dexter), used by Media Monitoring\n Africa to do content classification and guided entity extraction of South\n African media.\n* Omecca, eprints, fedora, dspace\n\n**Defunct but interesting**\n\n* [datawi.re](https://github.com/pudo/datawi.re), doc mining as a timeline\n* [analice.me](https://github.com/hhba/mapa76), document management and data\n extraction tool by Hacks/Hackers Buenos Aires.\n* [Unveillance](http://www.knightfoundation.org/grants/201550896/), harlo's\n git-annex-based topic modelling tool\n\n**Framework-ey stuff**\n\n* [OpenCalais](http://www.opencalais.com/), LingPipe, AlchemyAPI\n* [Apache Airflow](https://github.com/apache/incubator-airflow)\n* [nltk](http://www.nltk.org/), [patterns](http://www.clips.ua.ac.be/pattern)\n\n## Text mining bookmarks\n\nHere's some relevant Python text mining bookmarks:\n\n* https://bugzilla.redhat.com/show_bug.cgi?id=191060#c1\n* https://github.com/deanmalmgren/textract/blob/master/textract/parsers/pptx_parser.py\n* https://github.com/chardet/chardet\n* https://github.com/PyYoshi/cChardet (a faster character detection library for Python)\n* http://poppler.freedesktop.org/\n* http://www.unixuser.org/~euske/python/pdfminer/index.html\n* https://mstamy2.github.io/PyPDF2/#documentation\n* http://pybrary.net/pyPdf/pythondoc-pyPdf.pdf.html\n* https://svn.apache.org/viewvc/httpd/httpd/branches/2.2.x/docs/conf/mime.types?view=annotate\n\n* [pdfminer.six](https://github.com/goulu/pdfminer)\n* [tesserocr](https://github.com/sirfz/tesserocr)\n" }, { "alpha_fraction": 0.5529100298881531, "alphanum_fraction": 0.5529100298881531, "avg_line_length": 24.200000762939453, "blob_id": "280ac4ee7cb9dd7075c95a9b675c1da1bdbcfc61", "content_id": "b2c8e715918bacde53b59274cce4d9de18f16f11", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 756, "license_type": "permissive", "max_line_length": 80, "num_lines": 30, "path": "/aleph/static/js/directives/privacyIcon.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import aleph from '../aleph';\n\naleph.directive('privacyIcon', [function() {\n return {\n restrict: 'E',\n transclude: false,\n scope: {\n 'subject': '=',\n 'label': '='\n },\n templateUrl: 'templates/util/privacy_icon.html',\n link: function (scope, element, attrs, model) {\n scope.label = !!scope.label;\n scope.icon = 'fa-lock';\n scope.tooltip = 'Visible to some users';\n\n if (!angular.isUndefined(scope.subject)) {\n if (angular.isUndefined(scope.subject.public) || scope.subject.public) {\n scope.icon = 'fa-globe';\n scope.tooltip = null;\n }\n }\n\n if (scope.label) {\n scope.labelText = scope.tooltip || 'Public';\n scope.tooltip = null;\n }\n }\n };\n}]);\n" }, { "alpha_fraction": 0.5662500262260437, "alphanum_fraction": 0.5662500262260437, "avg_line_length": 23.24242401123047, "blob_id": "d1b2805ca2963046b24ffc98d9d259f461697092", "content_id": "5b2b66d17b7b18b43d586c3201ddbb7953961f7d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 800, "license_type": "permissive", "max_line_length": 51, "num_lines": 33, "path": "/aleph/model/cache.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from aleph.core import db\n\n\nclass Cache(db.Model):\n \"\"\"Store OCR computation results.\"\"\"\n __tablename__ = 'cache'\n\n id = db.Column(db.BigInteger, primary_key=True)\n key = db.Column(db.Unicode, index=True)\n value = db.Column(db.Unicode)\n\n @classmethod\n def get_cache(cls, key):\n q = db.session.query(cls.value)\n q = q.filter_by(key=key)\n cobj = q.first()\n if cobj is not None:\n return cobj.value\n\n @classmethod\n def set_cache(cls, key, value):\n session = db.sessionmaker(bind=db.engine)()\n cobj = cls()\n cobj.key = key\n cobj.value = value\n session.add(cobj)\n session.commit()\n\n def __repr__(self):\n return '<Cache(%r)>' % self.key\n\n def __unicode__(self):\n return self.key\n" }, { "alpha_fraction": 0.6614250540733337, "alphanum_fraction": 0.6614250540733337, "avg_line_length": 32.91666793823242, "blob_id": "f323414b4ac86d3fa92fbde62c36c3ced11f9c87", "content_id": "834f1f4d492b0f63da5d0bc6206cd3a650ffe58d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2035, "license_type": "permissive", "max_line_length": 190, "num_lines": 60, "path": "/aleph/static/js/controllers/EntitiesViewCtrl.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import aleph from '../aleph';\n\naleph.controller('EntitiesViewCtrl', ['$scope', '$route', '$location', '$anchorScroll', 'Authz', 'Title', 'Entity', 'Link', 'Document', 'entity', 'links', 'similar', 'documents', 'metadata',\n function($scope, $route, $location, $anchorScroll, Authz, Title, Entity, Link, Document, entity, links, similar, documents, metadata) {\n\n Title.set(entity.name, \"entities\");\n $scope.authz = Authz;\n $scope.metadata = metadata;\n $scope.entity = entity;\n $scope.links = links;\n $scope.documents = documents;\n $scope.similar = similar;\n\n $scope.showLinks = links.result.total || links.query.isFiltered();\n $scope.showLinksNav = links.result.total > links.result.limit || links.query.isFiltered();\n $scope.showDocuments = documents.result.total || documents.query.isFiltered();\n $scope.showDocumentsNav = documents.result.total > documents.result.limit || documents.query.isFiltered();\n $scope.showSimilar = similar.result.total || similar.query.isFiltered();\n\n $scope.loadLinksOffset = function(offset) {\n $scope.links.query.set('offset', offset);\n $location.hash('links')\n $anchorScroll();\n };\n\n $scope.loadDocumentsOffset = function(offset) {\n $scope.documents.query.set('offset', offset);\n $location.hash('documents')\n $anchorScroll();\n };\n\n $scope.loadSimilarOffset = function(offset) {\n $scope.similar.query.set('offset', offset);\n $location.hash('similar')\n $anchorScroll();\n };\n\n $scope.searchLinks = function(form) {\n $scope.links.query.update();\n };\n\n $scope.edit = function() {\n Entity.edit($scope.entity.id).then(function() {\n $route.reload();\n });\n };\n\n $scope.$on('$routeUpdate', function() {\n Link.search(entity.id, 'links_').then(function(links) {\n $scope.links = links;\n });\n Document.searchEntity(entity.id, 'documents_').then(function(documents) {\n $scope.documents = documents;\n });\n Entity.searchSimilar(entity.id, 'similar_').then(function(similar) {\n $scope.similar = similar;\n });\n });\n\n}]);\n" }, { "alpha_fraction": 0.6147186160087585, "alphanum_fraction": 0.6147186160087585, "avg_line_length": 21, "blob_id": "c7df33de6bb1b4bf292b7a5381f479f082d42453", "content_id": "3f8d6573f52d514527ef40380a4a851ca25cee6e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 462, "license_type": "permissive", "max_line_length": 68, "num_lines": 21, "path": "/aleph/static/js/directives/entityCountries.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import aleph from '../aleph';\n\naleph.directive('entityCountries', ['Metadata', function(Metadata) {\n var countries = {};\n\n Metadata.get().then(function(metadata) {\n countries = metadata.countries;\n });\n\n return {\n restrict: 'E',\n transclude: false,\n scope: {\n 'entity': '='\n },\n templateUrl: 'templates/entities/countries.html',\n link: function (scope, element, attrs, model) {\n scope.countryNames = countries;\n }\n };\n}]);\n" }, { "alpha_fraction": 0.6131504774093628, "alphanum_fraction": 0.6151673793792725, "avg_line_length": 36, "blob_id": "a2c81845fc04033024331329a8c01ca3512cd592", "content_id": "135777e365d57e225addf7d0635756f7f6190d04", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2479, "license_type": "permissive", "max_line_length": 93, "num_lines": 67, "path": "/aleph/logic/leads.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "# Lead generator\nimport logging\n\nfrom aleph.authz import Authz\nfrom aleph.core import db\nfrom aleph.index import delete_entity_leads, index_lead\nfrom aleph.search import QueryState\nfrom aleph.search.entities import load_entity, similar_entities\nfrom aleph.model import EntityIdentity\nfrom aleph.logic.distance import entity_distance\n\nlog = logging.getLogger(__name__)\n\n\ndef generate_leads(entity_id):\n \"\"\"Compute likely duplicates of a given entity and index these leads.\"\"\"\n # Get rid of everything, also for deleted entities etc.\n delete_entity_leads(entity_id)\n\n entity = load_entity(entity_id)\n if entity is None:\n # log.warning(\"[%r] not indexed, skip lead generation.\", entity_id)\n return\n if not entity.get('collection_id'):\n # log.warning(\"[%r] is not in a collecton, skip lead generation.\", entity_id) # noqa\n return\n\n log.debug(\"Generating leads for [%(id)s]: %(name)s\", entity)\n authz = Authz(override=True)\n judgements = EntityIdentity.judgements_by_entity(entity_id)\n state = QueryState({}, authz, limit=100)\n result = similar_entities(entity, state)\n for other in result.get('results', []):\n score = entity_distance(entity, other)\n log.debug(\" -[%.2f]-> %s\", score, other.get('name'))\n # TODO: implement some cut-off\n index_lead({\n 'entity_id': entity.get('id'),\n 'entity_collection_id': entity.get('collection_id'),\n 'score': score,\n 'judgement': judgements.get(other.get('id'), 0),\n 'match_id': other.get('id'),\n 'schema': other.get('schema'),\n 'schemata': other.get('schemata'),\n 'collection_id': other.get('collection_id'),\n 'dataset': other.get('dataset'),\n 'roles': other.get('roles')\n })\n\n\ndef update_lead(entity, match, judgement, judge=None):\n EntityIdentity.save(entity.get('id'), match.get('id'),\n judgement, judge=judge)\n db.session.commit()\n score = entity_distance(entity, match)\n index_lead({\n 'entity_id': entity.get('id'),\n 'entity_collection_id': entity.get('collection_id'),\n 'score': score,\n 'judgement': judgement,\n 'match_id': match.get('id'),\n 'schema': match.get('schema'),\n 'schemata': match.get('schemata'),\n 'collection_id': match.get('collection_id'),\n 'dataset': match.get('dataset'),\n 'roles': match.get('roles')\n })\n" }, { "alpha_fraction": 0.686011016368866, "alphanum_fraction": 0.6876400113105774, "avg_line_length": 31.523178100585938, "blob_id": "f754bf14f40b37c05ce43eb7b31d937cadb60866", "content_id": "a40ce4b5accfcf661af3e0bf4b86ae22f2ab5f47", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4911, "license_type": "permissive", "max_line_length": 113, "num_lines": 151, "path": "/docs/plugins.md", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "# Extending Aleph (Plugins)\n\nAleph's functionality can be extended via a system of plug-ins which are small\nPython modules.\n\nPlugins can be located both within the main aleph code base, or in an external\nPython module. They are registered and activated using a distutils entrypoint:\n\nAn example:\n\n```python\nfrom setuptools import setup, find_packages\n\nsetup(\n name='aleph_occrp',\n version='0.2',\n [...]\n entry_points={\n 'aleph.init': [\n 'occrpext = aleph_occrp:register'\n ]\n 'aleph.crawlers': [\n 'my_crawler = aleph_occrp.my_crawler:CrawlerClass'\n ]\n }\n)\n```\n\nSee the\n[main setup.py](https://github.com/alephdata/aleph/blob/master/setup.py) for a real\nexample.\n\nThe supported entry points include:\n\n* ``aleph.init``, for simple functions to be executed upon system startup.\n* ``aleph.crawlers``, for [Crawlers](#crawlers)\n* ``aleph.ingestors`` to support additional file type imports.\n* ``aleph.analyzers``, which are run to extract structured metadata from documents after they have been imported.\n\n## Signals\n\nThe documentation for this part is missing at the moment.\n\n## Custom SCSS\n\nAn additional environment variable, ``CUSTOM_SCSS_PATH``, can be used to\nspecify the path to a SCSS file which will be imported into the application\nupon start. The given path must be absolute, or relative to the run-time\nlocation of Aleph. An example would be:\n\n```bash\ndocker-compose run -e CUSTOM_SCSS_PATH=my.scss app make web\n```\n\n## Creating new crawlers\n\nCustom crawlers are useful to directly import large amounts of data into the\nsystem. This can make sense for custom scrapers or crawlers where the\nindirection of using [Metafolders](glossary.md#metafolders) is not desirable.\n\nCrawlers are Python classes and exposed via the `entry_point` of a Python\npackage. To develop a custom crawler, start by setting up a separate Python\npackage with it's own `setup.py` ([learn\nmore](https://python-packaging.readthedocs.io/en/latest/)).\n\nA basic crawler will extend the relevant `Crawler` class from Aleph and\nimplement its `crawl()` method, below you can find an example:\n\n```python\nfrom aleph.crawlers import DocumentCrawler\n\nclass ExampleCrawler(DocumentCrawler):\n COLLECTION_ID = 'example'\n\n def crawl(self):\n\t for i in range(0, 1000):\n\t\t meta = self.metadata()\n\t meta.foreign_id = 'example-doc:%s' % i\n meta.title = 'Document Number %s' % i\n meta.mime_type = 'application/pdf'\n url = 'https://example.com/documents/%s.pdf' % i\n self.emit_url(meta, url)\n```\n\nBesides `emit_url`, results can also be forwarded using the `emit_file(meta,\nfile_path)` method. If a crawler creates collections, it can use\n`emit_collection(collection, entity_search_terms)` which will start a partial\nre-index of documents.\n\nTo support indexing only new documents on incremental/update crawls, you can\nuse `self.skip_incremental`:\n\n```python\n if self.skip_incremental(foreign_id):\n logger.info(\"Skipping known %s\", foreign_id)\n return\n```\n\nIn order to make sure that Aleph can find the new crawler, it must be added\nto the `setup.py` of your package, see above how plugins work:\n\n```python\nsetup(\n name='mypackage',\n ...\n entry_points={\n 'aleph.crawlers': [\n 'example = mypackage.example:ExampleCrawler'\n ]\n }\n)\n```\n\nFinally, you must ensure that the plugin package is installed in your `aleph`\ndocker container (or using your deployment method), for example by extending\nthe `Dockerfile` to include the plugin package. Once this is ready, run the\ncrawler from inside the container:\n\n```bash\ndocker-compose run app python aleph/manage.py crawl example\n```\n\n## Custom OAuth\n\nIt's possible to hook into the login code to support other providers, but you\nneed to handle the creation of user and group roles through some specific code.\nThis is the code used at OCCRP for OAuth via the Investigative Dashboard (it\nrequires the use of plugins to be activated:\n\n```python\nfrom aleph import signals\n\[email protected]_oauth_session.connect\ndef handle_occrp_oauth(sender, provider=None, session=None):\n from aleph.model import Role\n if 'investigativedashboard.org' not in provider.base_url:\n return\n me = provider.get('api/2/accounts/profile/')\n user_id = 'idashboard:user:%s' % me.data.get('id')\n role = Role.load_or_create(user_id, Role.USER,\n me.data.get('display_name'),\n email=me.data.get('email'),\n is_admin=me.data.get('is_admin'))\n role.clear_roles()\n for group in me.data.get('groups', []):\n group_id = 'idashboard:%s' % group.get('id')\n group_role = Role.load_or_create(group_id, Role.GROUP,\n group.get('name'))\n role.add_role(group_role)\n session['user'] = role.id\n```\n" }, { "alpha_fraction": 0.6191999912261963, "alphanum_fraction": 0.6194666624069214, "avg_line_length": 40.20878982543945, "blob_id": "ea4c63ea52c9e17f9921453115a7302c41a1a88f", "content_id": "4fb0ad578984c844ebf244151b6d8f94c6ccb2fa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3750, "license_type": "permissive", "max_line_length": 76, "num_lines": 91, "path": "/aleph/ingest/result.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import logging\nfrom collections import OrderedDict\nfrom ingestors import Result\nfrom normality import stringify\n\nfrom aleph.core import db\nfrom aleph.model import Document, DocumentRecord\nfrom aleph.model import DocumentTag, DocumentTagCollector\n\nlog = logging.getLogger(__name__)\n\n\nclass DocumentResult(Result):\n \"\"\"Wrapper to link a Document to an ingestor result object.\"\"\"\n\n def __init__(self, manager, document, file_path=None, user_queue=False):\n self.manager = manager\n self.user_queue = user_queue\n self.document = document\n self.pdf_hash = document.pdf_version\n self.columns = OrderedDict()\n bind = super(DocumentResult, self)\n bind.__init__(id=document.foreign_id,\n checksum=document.content_hash,\n file_path=file_path,\n title=document.meta.get('title'),\n summary=document.meta.get('summary'),\n author=document.meta.get('author'),\n keywords=document.meta.get('keywords', []),\n file_name=document.meta.get('file_name'),\n mime_type=document.meta.get('mime_type'),\n encoding=document.meta.get('encoding'),\n languages=document.meta.get('languages', []),\n headers=document.meta.get('headers'),\n size=document.meta.get('file_size'))\n\n def emit_page(self, index, text):\n \"\"\"Emit a plain text page.\"\"\"\n self.document.type = Document.TYPE_TEXT\n record = DocumentRecord()\n record.document_id = self.document.id\n record.text = text\n record.index = index\n db.session.add(record)\n\n def _emit_iterator_rows(self, iterator):\n for row in iterator:\n for column in row.keys():\n self.columns[column] = None\n yield row\n\n def emit_rows(self, iterator):\n \"\"\"Emit rows of a tabular iterator.\"\"\"\n # TODO: also generate a tabular rep for the metadata\n self.document.type = Document.TYPE_TABULAR\n self.document.insert_records(0, self._emit_iterator_rows(iterator))\n\n def emit_pdf_alternative(self, file_path):\n self.pdf_hash = self.manager.archive.archive_file(file_path)\n\n def update(self):\n \"\"\"Apply the outcome of the result to the document.\"\"\"\n if self.status == self.STATUS_SUCCESS:\n self.document.status = Document.STATUS_SUCCESS\n self.document.error_message = None\n else:\n self.document.status = Document.STATUS_FAIL\n self.document.type = Document.TYPE_OTHER\n self.document.error_message = self.error_message\n self.document.foreign_id = stringify(self.id)\n if self.checksum:\n self.document.content_hash = self.checksum\n self.document.file_size = self.size\n self.document.file_name = self.file_name\n self.document.title = stringify(self.title)\n self.document.summary = stringify(self.summary)\n self.document.author = stringify(self.author)\n self.document.keywords = self.keywords\n self.document.mime_type = stringify(self.mime_type)\n self.document.encoding = self.encoding\n self.document.languages = self.languages\n self.document.headers = self.headers\n self.document.pdf_version = self.pdf_hash\n self.document.columns = self.columns.keys()\n\n collector = DocumentTagCollector(self.document, 'ingestors')\n for entity in self.entities:\n collector.emit(entity, DocumentTag.TYPE_PERSON)\n for email in self.emails:\n collector.emit(email, DocumentTag.TYPE_EMAIL)\n collector.save()\n" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.7124999761581421, "avg_line_length": 19, "blob_id": "b98a19e4abd3bff81048e34ef59e96ea1f82dc3a", "content_id": "4dd744cc402075c508a17b732c0d4c55cb78bf01", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 80, "license_type": "permissive", "max_line_length": 23, "num_lines": 4, "path": "/requirements-docs.txt", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "sphinx==1.4.9\nsphinx-autoapi==0.4.0\nsphinx_rtd_theme==0.1.9\nrecommonmark==0.4.0\n" }, { "alpha_fraction": 0.555232584476471, "alphanum_fraction": 0.5570493936538696, "avg_line_length": 34.28205108642578, "blob_id": "0771e2ec1483e9f3b944b2d93f3bedd7035516f0", "content_id": "01d75f523c1904cb0b802c9f1dc689f17b6d246b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2752, "license_type": "permissive", "max_line_length": 79, "num_lines": 78, "path": "/aleph/logic/alerts.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import logging\nfrom urllib import quote_plus\nfrom flask import render_template, current_app\n\nfrom aleph.authz import Authz\nfrom aleph.core import app_title, app_url, db, celery\nfrom aleph.model import Role, Alert, Collection\nfrom aleph.notify import notify_role\nfrom aleph.search import QueryState, documents_query\n\nlog = logging.getLogger(__name__)\n\n\[email protected]()\ndef check_alerts():\n \"\"\"Go through all users and execute their alerts.\"\"\"\n for role_id, in Role.notifiable():\n with current_app.test_request_context('/'):\n role = Role.by_id(role_id)\n authz = Authz(role=role)\n check_role_alerts(authz)\n\n\ndef format_results(alert, results):\n # used to activate highlighting in results pages:\n dq = alert.query_text or ''\n qs = 'dq=%s' % quote_plus(dq.encode('utf-8'))\n output = []\n for result in results['results']:\n collection_id = result.pop('collection_id', None)\n if not collection_id:\n continue\n result['collection'] = Collection.by_id(collection_id)\n\n # generate document URL:\n if 'tabular' == result.get('type'):\n result['url'] = '%stabular/%s/0?%s' % (app_url, result['id'], qs)\n else:\n result['url'] = '%stext/%s?%s' % (app_url, result['id'], qs)\n\n # preview snippets:\n result['snippets'] = []\n for record in result['records'].get('results', []):\n result['snippets'].append(record['text'])\n output.append(result)\n return output\n\n\ndef check_role_alerts(authz):\n alerts = Alert.by_role(authz.role).all()\n if not len(alerts):\n return\n log.info('Alerting %r, %d alerts...', authz.role, len(alerts))\n for alert in alerts:\n args = {\n 'q': alert.query_text,\n 'filter:entities.id': alert.entity_id,\n 'limit': 50\n }\n state = QueryState(args, authz)\n results = documents_query(state, since=alert.notified_at)\n if results['total'] == 0:\n continue\n log.info('Found %d new results for: %r', results['total'], alert.label)\n alert.update()\n try:\n subject = '%s (%s new results)' % (alert.label, results['total'])\n html = render_template('email/alert.html',\n alert=alert,\n role=authz.role,\n total=results.get('total'),\n results=format_results(alert, results),\n app_title=app_title,\n app_url=app_url)\n notify_role(authz.role, subject, html)\n except Exception as ex:\n log.exception(ex)\n db.session.commit()\n" }, { "alpha_fraction": 0.5573253035545349, "alphanum_fraction": 0.5574687719345093, "avg_line_length": 30.533937454223633, "blob_id": "f84f967631ed0b8103c3f179c9df70ce92edd12f", "content_id": "4702f5ffa8e558f949a33a1ede82cf45d4a7360f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6969, "license_type": "permissive", "max_line_length": 78, "num_lines": 221, "path": "/aleph/schema/__init__.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from aleph.util import dict_list, ensure_list\nfrom aleph.text import string_value\nfrom aleph.schema.types import resolve_type\n\n\nclass SchemaValidationException(Exception):\n \"\"\"Schema validation errors will be caught by the API.\"\"\"\n\n def __init__(self, errors):\n self.errors = errors\n super(SchemaValidationException, self).__init__(repr(errors))\n\n\nclass SchemaProperty(object):\n\n def __init__(self, schema, name, data):\n self.schema = schema\n self.name = name.strip()\n self.data = data\n self.label = data.get('label', name)\n self.hidden = data.get('hidden', False)\n self.is_multiple = data.get('multiple', False)\n self.is_label = name == 'name'\n cls = resolve_type(data.get('type', 'string'))\n self.type = cls()\n\n def validate(self, data):\n \"\"\"Validate that the data should be stored.\n\n Since the types system doesn't really have validation, this currently\n tries to normalize the value to see if it passes strict parsing.\n \"\"\"\n value, error = [], None\n for val in ensure_list(data):\n val = string_value(val)\n if val is None:\n continue\n val = val.strip()\n if self.type.normalize_value(val) is None:\n error = \"Invalid value\"\n value.append(val)\n if not self.is_multiple:\n value = value[0] if len(value) else None\n else:\n value = list(set(value))\n if self.is_label and (value is None or not len(value)):\n error = \"Field is required.\"\n return value, error\n\n def to_dict(self):\n return {\n 'name': self.name,\n 'label': self.label,\n 'hidden': self.hidden,\n 'type': self.type.name\n }\n\n def __repr__(self):\n return '<SchemaProperty(%r, %r)>' % (self.schema, self.name)\n\n\nclass Schema(object):\n \"\"\"Defines the abstract data model.\n\n Schema items define the entities and links available in the model.\n \"\"\"\n\n ENTITY = 'entities'\n LINK = 'links'\n SECTIONS = [ENTITY, LINK]\n\n def __init__(self, schemata, section, name, data):\n assert section in self.SECTIONS, section\n self._schemata = schemata\n self.section = section\n self.name = name\n self.data = data\n self.label = data.get('label', name)\n self.plural = data.get('plural', self.label)\n self.icon = data.get('icon')\n # Do not show in listings:\n self.hidden = data.get('hidden', False)\n # Try to perform fuzzy matching. Fuzzy similarity search does not\n # make sense for entities which have a lot of similar names, such\n # as land plots, assets etc.\n self.fuzzy = data.get('fuzzy', True)\n self._extends = dict_list(data, 'extends')\n\n self._own_properties = []\n for name, prop in data.get('properties', {}).items():\n self._own_properties.append(SchemaProperty(self, name, prop))\n\n self.forward = data.get('forward', self.label)\n self.reverse = data.get('reverse', self.label)\n\n @property\n def extends(self):\n \"\"\"Return the inherited schemata.\"\"\"\n for base in self._extends:\n yield self._schemata.get(base)\n\n @property\n def schemata(self):\n \"\"\"Return the full inheritance chain.\"\"\"\n yield self\n for base in self.extends:\n for schema in base.schemata:\n yield schema\n\n @property\n def properties(self):\n \"\"\"Return properties, those defined locally and in ancestors.\"\"\"\n names = set()\n for prop in self._own_properties:\n names.add(prop.name)\n yield prop\n for schema in self.extends:\n for prop in schema.properties:\n if prop.name in names:\n continue\n names.add(prop.name)\n yield prop\n\n def get(self, name):\n for prop in self.properties:\n if prop.name == name:\n return prop\n raise ValueError(\"[%r] missing property: %s\" % (self, name))\n\n def validate(self, data):\n \"\"\"Validate a dataset against the given schema.\n\n This will also drop keys which are not present as properties.\n \"\"\"\n result = {}\n errors = {}\n for prop in self.properties:\n value = data.get(prop.name)\n value, error = prop.validate(value)\n if error is not None:\n errors[prop.name] = error\n elif value is not None:\n result[prop.name] = value\n if len(errors):\n raise SchemaValidationException(errors)\n return result\n\n def to_dict(self):\n data = {\n 'type': self.section,\n 'label': self.label,\n 'plural': self.plural,\n 'icon': self.icon,\n 'hidden': self.hidden,\n 'fuzzy': self.fuzzy,\n 'properties': list(self.properties)\n }\n if self.section == Schema.LINK:\n data['forward'] = self.forward\n data['reverse'] = self.reverse\n return data\n\n def __repr__(self):\n return '<Schema(%r)>' % self.name\n\n\nclass SchemaSet(object):\n \"\"\"A collection of schemata.\"\"\"\n\n def __init__(self, data):\n self.schemata = {}\n\n for section in Schema.SECTIONS:\n for name, sconfig in data.get(section, {}).items():\n if name in self.schemata:\n raise TypeError(\"Duplicate schema name: %r\" % name)\n self.schemata[name] = Schema(self, section, name, sconfig)\n\n def get(self, name):\n schema = self.schemata.get(name)\n if schema is None:\n raise TypeError(\"No such schema: %r\" % name)\n return schema\n\n def merge_entity_schema(self, left, right):\n \"\"\"Select the most narrow of two schemata.\n\n When indexing data from a dataset, an entity may be declared as a\n LegalEntity in one query, and as a Person in another. This function\n will select the most specific of two schemata offered. In the example,\n that would be Person.\n \"\"\"\n if left == right:\n return left\n lefts = self.get(left)\n lefts = [s.name for s in lefts.schemata]\n if right in lefts:\n return left\n\n rights = self.get(right)\n rights = [s.name for s in rights.schemata]\n if left in rights:\n return right\n\n for left in lefts:\n for right in rights:\n if left == right:\n return left\n\n def to_dict(self):\n data = {}\n for name, schema in self.schemata.items():\n if not schema.hidden:\n data[name] = schema\n return data\n\n def __iter__(self):\n return iter(self.schemata.values())\n\n def __repr__(self):\n return '<SchemaSet(%r)>' % self.schemata\n" }, { "alpha_fraction": 0.5519630312919617, "alphanum_fraction": 0.5550423264503479, "avg_line_length": 23.05555534362793, "blob_id": "82b38e787ada3bfdaf1e7f6be174f8fa9141c23b", "content_id": "7b1f1d5de2e5a7c65e36669673918e3fa43c59d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1299, "license_type": "permissive", "max_line_length": 89, "num_lines": 54, "path": "/aleph/static/js/services/Metadata.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import aleph from '../aleph';\nimport alephCore from '../schema';\n\naleph.factory('Metadata', ['$http', '$q', '$rootScope', function($http, $q, $rootScope) {\n var dfd = null;\n\n var flush = function() {\n dfd = null;\n return get();\n };\n\n var load = function() {\n var dt = new Date();\n dfd = $q.defer();\n $q.all([\n $http.get('/api/1/sessions', {cache: false, params: {'_': dt.getTime()}}),\n $http.get('/api/1/metadata', {cache: true})\n ]).then(function(results) {\n var session = results[0].data,\n metadata = angular.extend(results[1].data, {session: session}),\n schemata = {};\n\n // TODO: move the schema stuff somewhere else?\n for (var name in metadata.schemata) {\n schemata[name] = new alephCore.Schema(name, metadata.schemata[name]);\n }\n metadata.schemata = schemata;\n metadata.bindSchema = function(obj) {\n obj.$schema = metadata.schemata[obj.schema];\n obj.binds = obj.$schema.bindData(obj);\n return obj;\n };\n\n $rootScope.session = session;\n\n dfd.resolve(metadata);\n }, function(err) {\n dfd.reject(err);\n });\n };\n\n var get = function() {\n if (dfd === null) {\n load();\n }\n return dfd.promise;\n };\n\n return {\n get: get,\n flush: flush\n };\n\n}]);\n" }, { "alpha_fraction": 0.5875440835952759, "alphanum_fraction": 0.5887191295623779, "avg_line_length": 26.45161247253418, "blob_id": "8802c0e990ef22906e8493483c6fa096d76630c8", "content_id": "ec6ae8370fb2073dc084a359cf48660d3572addb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4255, "license_type": "permissive", "max_line_length": 155, "num_lines": 155, "path": "/aleph/static/js/controllers/EntitiesEditCtrl.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import aleph from '../aleph';\n\naleph.controller('EntitiesEditCtrl', ['$scope', '$http', '$q', '$uibModalInstance', 'Metadata', 'Authz', 'Alert', 'Entity', 'entity', 'metadata', 'alerts',\n function($scope, $http, $q, $uibModalInstance, Metadata, Authz, Alert, Entity, entity, metadata, alerts) {\n\n $scope.blocked = false;\n $scope.entity = entity;\n $scope.entity.country = entity.country || null;\n $scope.originalName = entity.name + '';\n $scope.section = 'base';\n $scope.isEntity = entity.schema == 'LegalEntity';\n $scope.isPerson = entity.schema == 'Person';\n $scope.isCompany = entity.schema == 'Company';\n $scope.isOrganization = (entity.schema == 'Organization') || $scope.isCompany;\n $scope.newAlias = null;\n $scope.newAliasEditing = false;\n $scope.duplicateOptions = [];\n\n var initDedupe = function() {\n var url = '/api/1/entities/' + entity.id + '/similar',\n params = {\n strict: false,\n 'filter:collection_id': entity.collection_id\n };\n $http.get(url, {params: params}).then(function(res) {\n $scope.duplicateOptions = res.data.results;\n }, function(err) {\n console.log('Error', err);\n $scope.duplicateOptions = [];\n });\n };\n\n var initAlerts = function() {\n $scope.alertId = null;\n for (var i in alerts.results) {\n var alert = alerts.results[i];\n if (!alert.query_text && alert.entity_id == $scope.entity.id) {\n $scope.alertId = alert.id;\n }\n }\n $scope.entity.haveAlert = $scope.alertId != null;\n };\n\n initDedupe();\n initAlerts();\n\n $scope.editNewAlias = function(flag) {\n $scope.newAliasEditing = flag;\n };\n\n $scope.editAlias = function($index, value) {\n $scope.entity.data.alias[$index] = value;\n };\n\n $scope.addAlias = function() {\n var newAlias = angular.copy($scope.newAlias);\n $scope.newAlias = null;\n $scope.entity.data.alias = $scope.entity.data.alias || [];\n $scope.entity.data.alias.push(newAlias);\n };\n\n $scope.canAddAlias = function() {\n return $scope.newAlias && $scope.newAlias.length > 2;\n };\n\n $scope.removeAlias = function(alias) {\n var idx = $scope.entity.data.alias.indexOf(alias);\n if (idx != -1) {\n $scope.entity.data.alias.splice(idx, 1);\n };\n };\n\n $scope.editDuplicate = function(dup) {\n Entity.edit(dup.id).then(function() {\n initDedupe();\n }, function(err) {\n console.log('Error', err);\n });\n };\n\n $scope.canSave = function() {\n if ($scope.blocked) {\n return false;\n }\n return $scope.editEntity.$valid;\n };\n\n $scope.setSection = function(section) {\n $scope.section = section;\n };\n\n $scope.cancel = function() {\n $uibModalInstance.dismiss('cancel');\n };\n\n var updateAlert = function() {\n var done = $q.defer();\n if ($scope.entity.haveAlert && !$scope.alertId) {\n Alert.create({entity_id: entity.id}).then(function() {\n done.resolve();\n });\n } else if (!$scope.entity.haveAlert && $scope.alertId) {\n Alert.delete($scope.alertId).then(function() {\n done.resolve();\n });\n } else {\n done.resolve();\n }\n return done.promise;\n };\n\n var mergeDuplicates = function() {\n var done = $q.defer(),\n merges = [];\n for (var i in $scope.duplicateOptions) {\n var dup = $scope.duplicateOptions[i];\n if (dup.$merge) {\n var url = '/api/1/entities/' + $scope.entity.id + '/merge/' + dup.id;\n merges.push($http.delete(url));\n }\n }\n $q.all(merges).then(function() {\n done.resolve();\n })\n return done.promise;\n };\n\n $scope.save = function(form) {\n if (!$scope.canSave()) {\n return false;\n }\n\n // check that we're not in the process of adding alternate\n // names and accidentally submitting the form.\n if ($scope.newAliasEditing) {\n // todo, detect save button clicks.\n if ($scope.canAddAlias()) {\n $scope.addAlias();\n }\n return false;\n }\n\n $scope.blocked = true;\n Entity.save($scope.entity).then(function(entity) {\n updateAlert().then(function() {\n mergeDuplicates().then(function() {\n $uibModalInstance.close(entity);\n });\n });\n }, function(err) {\n console.log('Error', err);\n $scope.blocked = false;\n });\n };\n}]);\n" }, { "alpha_fraction": 0.7163504958152771, "alphanum_fraction": 0.7163504958152771, "avg_line_length": 33.06153869628906, "blob_id": "bbe72a0fdcf64fc08a8ecdef48ccb7dfb6f00d4c", "content_id": "acc987a6076ba039106a1f49b0bba4968b6c7782", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2214, "license_type": "permissive", "max_line_length": 77, "num_lines": 65, "path": "/aleph/logic/collections.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import logging\nfrom datetime import datetime\n\nfrom aleph.core import db, celery\nfrom aleph.model import Collection, Entity\nfrom aleph.index.collections import delete_collection as index_delete\nfrom aleph.analyze import analyze_documents\nfrom aleph.logic.entities import delete_entity\nfrom aleph.logic.entities import update_entity_full\nfrom aleph.logic.documents import delete_document\n\nlog = logging.getLogger(__name__)\n\n\ndef update_collection(collection):\n \"\"\"Create or update a collection.\"\"\"\n pass\n\n\[email protected]()\ndef analyze_collection(collection_id):\n \"\"\"Re-analyze the elements of this collection, documents and entities.\"\"\"\n Entity.delete_dangling(collection_id)\n db.session.commit()\n\n q = db.session.query(Collection).filter(Collection.id == collection_id)\n collection = q.first()\n if collection is None:\n log.error(\"No collection with ID: %r\", collection_id)\n\n # re-process the documents\n analyze_documents(collection.id)\n\n # re-process entities\n for entity in collection.entities:\n update_entity_full(entity.id)\n\n\[email protected]()\ndef delete_collection(collection_id):\n # Deleting a collection affects many associated objects and requires\n # checks, so this is done manually and in detail here.\n q = db.session.query(Collection).filter(Collection.id == collection_id)\n collection = q.first()\n if collection is None:\n log.error(\"No collection with ID: %r\", collection_id)\n return\n\n log.info(\"Deleting collection [%r]: %r\", collection.id, collection.label)\n index_delete(collection_id)\n deleted_at = datetime.utcnow()\n for entity in collection.entities:\n # TODO: consider hard-deleting entities because the polyglot tagger\n # cannot tell if a deleted match on a tagged term on a revived\n # collection means not to tag this entity any more.\n log.info(\"Delete entity: %r\", entity)\n delete_entity(entity, deleted_at=deleted_at)\n\n for document in collection.documents:\n log.info(\"Delete document: %r\", document)\n delete_document(document, deleted_at=deleted_at)\n\n db.session.refresh(collection)\n collection.delete(deleted_at=deleted_at)\n db.session.commit()\n" }, { "alpha_fraction": 0.675000011920929, "alphanum_fraction": 0.7749999761581421, "avg_line_length": 9, "blob_id": "dda150591f7fb3c6dfb48cecc7ba7a7221e8087d", "content_id": "92b45e16c4f6b4f333156392b885cadefc5d7389", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 40, "license_type": "permissive", "max_line_length": 16, "num_lines": 4, "path": "/requirements-testing.txt", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "faker\nfactory_boy\nnose\nflexmock==0.10.2\n" }, { "alpha_fraction": 0.5825627446174622, "alphanum_fraction": 0.5825627446174622, "avg_line_length": 29.280000686645508, "blob_id": "ccc946be16dbd57214b3885d2974055badc4ad72", "content_id": "3b9e9004205432626b02ba271960707b930ab84d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 757, "license_type": "permissive", "max_line_length": 57, "num_lines": 25, "path": "/aleph/datasets/formatting.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import re\nimport six\nfrom normality import collapse_spaces\n\nFORMAT_PATTERN = re.compile('{{([^(}})]*)}}')\n\n\nclass Formatter(object):\n\n def __init__(self, template):\n self.template = six.text_type(template)\n self.refs = []\n self.replacements = {}\n for ref in FORMAT_PATTERN.findall(self.template):\n self.refs.append(ref)\n repl = '{{%s}}' % ref\n self.replacements[repl] = ref\n\n def apply(self, record):\n value = six.text_type(self.template)\n for repl, ref in self.replacements.items():\n ref_value = record.get(ref) or ''\n ref_value = six.text_type(ref_value)\n value = value.replace(repl, ref_value)\n return collapse_spaces(value).strip()\n" }, { "alpha_fraction": 0.6156941652297974, "alphanum_fraction": 0.6167001724243164, "avg_line_length": 28.524751663208008, "blob_id": "61d5f1743492254d38d36f37653034e82fbf58b5", "content_id": "f9c785742b93b58637192580021fbb902b989dad", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2982, "license_type": "permissive", "max_line_length": 75, "num_lines": 101, "path": "/aleph/tests/test_role_model.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import ldap\nfrom flexmock import flexmock\n\nfrom aleph.core import db\nfrom aleph.model import Role\nfrom aleph.tests.factories.models import RoleFactory\n\nfrom aleph.core import get_config\nfrom aleph.model.role import LDAPException\nfrom aleph.tests.util import TestCase\n\n\nclass RoleModelTest(TestCase):\n\n def setUp(self):\n super(RoleModelTest, self).setUp()\n\n self.role = RoleFactory.create()\n db.session.commit()\n\n def test_password(self):\n password = self.fake.password()\n\n role = RoleFactory.create()\n self.assertFalse(role.check_password(password))\n\n role.set_password(password)\n self.assertTrue(role.check_password(password))\n\n role.set_password(self.fake.password())\n self.assertFalse(role.check_password(password))\n\n def test_authenticate_using_ldap_with_blank_password(self):\n secret = ''\n\n self.assertIsNone(\n Role.authenticate_using_ldap(self.role.email, secret)\n )\n\n def test_authenticate_using_ldap_with_bad_user_pass(self):\n secret = self.fake.password()\n email = self.fake.email()\n fake_ldap_conn = flexmock(set_option=lambda x, y: x)\n\n (flexmock(fake_ldap_conn)\n .should_receive('simple_bind_s')\n .with_args(get_config('LDAP_BASE_DN').format(email), secret)\n .and_raise(LDAPException('Failed auth.'))\n .times(1))\n\n (flexmock(ldap)\n .should_receive('initialize')\n .and_return(fake_ldap_conn))\n\n self.assertIsNone(\n Role.authenticate_using_ldap(email, secret)\n )\n\n def test_authenticate_using_ldap_with_good_user_pass(self):\n secret = self.fake.password()\n email = self.fake.email()\n fake_ldap_conn = flexmock(set_option=lambda x, y: x)\n\n (flexmock(fake_ldap_conn)\n .should_receive('simple_bind_s')\n .with_args(get_config('LDAP_BASE_DN').format(email), secret)\n .and_return(None)\n .times(1))\n\n (flexmock(fake_ldap_conn)\n .should_receive('unbind_s')\n .and_return(None)\n .times(1))\n\n (flexmock(ldap)\n .should_receive('initialize')\n .and_return(fake_ldap_conn))\n\n role = Role.authenticate_using_ldap(email, secret)\n self.assertIsInstance(role, Role)\n self.assertEqual(role.email, email)\n\n def test_by_email_when_blank_email(self):\n self.assertIsNone(Role.by_email(None))\n\n def test_by_email_does_not_match(self):\n self.assertIsNone(Role.by_email(self.fake.email()).first())\n\n def test_by_email_matches(self):\n self.assertEqual(Role.by_email(self.role.email).first(), self.role)\n\n def test_load_or_create_role_exists(self):\n self.assertEqual(\n Role.load_or_create(\n foreign_id=self.role.foreign_id,\n type=self.role.type,\n name=self.role.name,\n email=self.role.email\n ),\n self.role\n )\n" }, { "alpha_fraction": 0.6218655705451965, "alphanum_fraction": 0.6374122500419617, "avg_line_length": 37.346153259277344, "blob_id": "3afd967747b8c316a343eea035a7679a0a31f7e7", "content_id": "eacf6710becd83db72a583f8fb58a7ad13c4f0f8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1994, "license_type": "permissive", "max_line_length": 75, "num_lines": 52, "path": "/aleph/tests/test_sessions_api.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from aleph.tests.util import TestCase\nfrom aleph.tests.factories.models import RoleFactory\n\n\nclass SessionsApiTestCase(TestCase):\n\n def setUp(self):\n super(SessionsApiTestCase, self).setUp()\n\n self.role = RoleFactory.create()\n\n def test_status_get_with_password_registration_enabled(self):\n res = self.client.get('/api/1/sessions')\n assert res.status_code == 200, res\n assert len(res.json['providers']) == 1, res\n assert res.json['providers'][0]['name'] == 'password', res\n assert res.json['providers'][0]['registration'] == True, res\n\n def test_status_get_with_password_registration_disabled(self):\n self.app.config['PASSWORD_REGISTRATION'] = False\n\n res = self.client.get('/api/1/sessions')\n assert res.status_code == 200, res\n assert len(res.json['providers']) == 1, res\n assert res.json['providers'][0]['name'] == 'password', res\n assert res.json['providers'][0]['registration'] == False, res\n\n def test_status_get_without_password_login(self):\n self.app.config['PASSWORD_LOGIN'] = False\n\n res = self.client.get('/api/1/sessions')\n assert res.status_code == 200, res\n assert len(res.json['providers']) == 0, res\n\n def test_password_login_get(self):\n res = self.client.get('/api/1/sessions/login/password')\n assert res.status_code == 404, res\n\n def test_password_login_post_no_data(self):\n res = self.client.post('/api/1/sessions/login/password')\n assert res.status_code == 404, res\n\n def test_password_login_post_good_email_and_password(self):\n secret = self.fake.password()\n self.role.set_password(secret)\n data = dict(email=self.role.email, password=secret)\n\n res = self.client.post('/api/1/sessions/login/password', data=data)\n\n assert res.status_code == 200, res\n assert res.json['role']['id'] == self.role.id, res\n assert res.json['api_key'] == self.role.api_key, res\n" }, { "alpha_fraction": 0.5374800562858582, "alphanum_fraction": 0.540669858455658, "avg_line_length": 21.799999237060547, "blob_id": "1c4b6e01dedef85d64075bae4ea2842f2b3c0141", "content_id": "13df6c2f5ad965a2f9d39ad5494296c490aceaa5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1254, "license_type": "permissive", "max_line_length": 81, "num_lines": 55, "path": "/aleph/static/js/services/Role.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import aleph from '../aleph';\n\naleph.factory('Role', ['$http', '$q', 'Metadata', function($http, $q, Metadata) {\n\n var suggest = function(prefix) {\n var data = {params: {prefix: prefix.trim()}};\n return $http.get('/api/1/roles/_suggest', data).then(function(res) {\n return res.data.results;\n });\n };\n\n var save = function(role) {\n var dfd = $q.defer(),\n url = '/api/1/roles/' + role.id;\n $http.post(url, role).then(function(res) {\n Metadata.flush().then(function() {\n dfd.resolve(res.data);\n });\n }, function(err) {\n dfd.reject(err);\n });\n return dfd.promise;\n };\n\n var get = function(role_id) {\n var dfd = $q.defer();\n $http.get('/api/1/roles/' + role_id).then(function(res) {\n dfd.resolve(res.data);\n }, function(err) {\n dfd.reject(err);\n });\n return dfd.promise;\n };\n\n var create = function(role) {\n var dfd = $q.defer(),\n url = '/api/1/roles';\n\n $http.post(url, role).then(function(res) {\n Metadata.flush().then(function() {\n dfd.resolve(res.data);\n });\n }, function(err) {\n dfd.reject(err);\n });\n return dfd.promise;\n };\n\n return {\n suggest: suggest,\n get: get,\n save: save,\n create: create\n };\n}]);\n" }, { "alpha_fraction": 0.6186090707778931, "alphanum_fraction": 0.6211495995521545, "avg_line_length": 32.5, "blob_id": "eae79b695a53c6f369f17f89195fdcba6b3a0009", "content_id": "dac1f39cc27e338043c376f478a5e564821a5423", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3149, "license_type": "permissive", "max_line_length": 77, "num_lines": 94, "path": "/aleph/views/datasets_api.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import logging\nfrom collections import defaultdict\nfrom werkzeug.exceptions import NotFound\nfrom flask import Blueprint, request\nfrom apikit import jsonify\nfrom dalet import COUNTRY_NAMES\n\nfrom aleph.core import datasets, get_config\nfrom aleph.search import QueryState, entities_query\nfrom aleph.views.cache import enable_cache\n\nlog = logging.getLogger(__name__)\nblueprint = Blueprint('datasets_api', __name__)\n\n\[email protected]('/api/1/datasets', methods=['GET'])\ndef index():\n enable_cache(vary_user=True)\n results = [d for d in datasets if request.authz.check_roles(d.roles)]\n state = QueryState({\n 'filter:dataset': [d.name for d in results],\n 'facet': 'dataset',\n 'limit': 0\n }, request.authz)\n res = entities_query(state)\n values = res.get('facets', {}).get('dataset', {}).get('values', [])\n counts = {v.get('id'): v.get('count') for v in values}\n\n countries_facet = defaultdict(int)\n category_facet = defaultdict(int)\n countries_filter = set(request.args.getlist('filter:countries'))\n category_filter = set(request.args.getlist('filter:category'))\n\n filtered = []\n for dataset in results:\n dataset.entities_count = counts.get(dataset.name)\n if len(category_filter) and dataset.category not in category_filter:\n continue\n if len(countries_filter) and \\\n not len(countries_filter.intersection(dataset.countries)):\n continue\n for country in dataset.countries:\n countries_facet[country] += 1\n category_facet[dataset.category] += 1\n filtered.append(dataset)\n\n filtered = sorted(filtered, key=lambda d: d.entities_count, reverse=True)\n facets = {'countries': {'values': []}, 'category': {'values': []}}\n categories = get_config('COLLECTION_CATEGORIES', {})\n\n countries_facet = sorted(countries_facet.items(), key=lambda (k, c): c)\n for key, count in countries_facet[::-1]:\n facets['countries']['values'].append({\n 'id': key,\n 'count': count,\n 'label': COUNTRY_NAMES.get(key, key)\n })\n\n category_facet = sorted(category_facet.items(), key=lambda (k, c): c)\n for key, count in category_facet[::-1]:\n if key is None:\n continue\n facets['category']['values'].append({\n 'id': key,\n 'count': count,\n 'label': categories.get(key, key)\n })\n\n return jsonify({\n 'results': filtered,\n 'facets': facets,\n 'total': len(filtered),\n 'total_entities_count': res.get('total')\n })\n\n\[email protected]('/api/1/datasets/<name>')\ndef view(name):\n enable_cache(vary_user=True)\n try:\n dataset = datasets.get(name)\n except NameError:\n raise NotFound()\n request.authz.require(request.authz.check_roles(dataset.roles))\n state = QueryState({\n 'filter:dataset': dataset.name,\n 'facet': ['schema', 'countries'],\n 'limit': 0\n }, request.authz)\n res = entities_query(state)\n data = dataset.to_dict()\n data['facets'] = res.get('facets', {})\n data['doc_count'] = res.get('total')\n return jsonify(data)\n" }, { "alpha_fraction": 0.604938268661499, "alphanum_fraction": 0.6088368892669678, "avg_line_length": 28.596153259277344, "blob_id": "5764af74ba033f0060fdf6cb94e9460cbc75449f", "content_id": "240690a6d09f6ca766d8aaf6d39f2c8a0eca4604", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1539, "license_type": "permissive", "max_line_length": 66, "num_lines": 52, "path": "/aleph/index/records.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import six\nimport time\nimport logging\nfrom elasticsearch.helpers import BulkIndexError\n\nfrom aleph.core import es_index, db\nfrom aleph.index.mapping import TYPE_RECORD\nfrom aleph.model import DocumentRecord\nfrom aleph.index.util import bulk_op, query_delete\nfrom aleph.text import index_form\n\nlog = logging.getLogger(__name__)\n\n\ndef clear_records(document_id):\n \"\"\"Delete all records associated with the given document.\"\"\"\n q = {'term': {'document_id': document_id}}\n query_delete(q, doc_type=TYPE_RECORD)\n\n\ndef generate_records(document):\n \"\"\"Generate index records, based on document rows or pages.\"\"\"\n q = db.session.query(DocumentRecord)\n q = q.filter(DocumentRecord.document_id == document.id)\n for record in q.yield_per(1000):\n texts = [record.text]\n if record.data is not None:\n texts.extend(record.data.values())\n\n yield {\n '_id': record.id,\n '_type': TYPE_RECORD,\n '_index': six.text_type(es_index),\n '_source': {\n 'document_id': document.id,\n 'collection_id': document.collection_id,\n 'index': record.index,\n 'sheet': record.sheet,\n 'text': index_form(texts)\n }\n }\n\n\ndef index_records(document):\n clear_records(document.id)\n while True:\n try:\n bulk_op(generate_records(document))\n return\n except BulkIndexError as exc:\n log.warning('Indexing error: %s', exc)\n time.sleep(10)\n" }, { "alpha_fraction": 0.5963855385780334, "alphanum_fraction": 0.5987951755523682, "avg_line_length": 24.9375, "blob_id": "766ba9ba43689aae87b08d4c05f9e65b77099b5f", "content_id": "8819179d86bfe55b52bf6d1cf38efe3551cf5957", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 830, "license_type": "permissive", "max_line_length": 71, "num_lines": 32, "path": "/aleph/index/leads.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import\n\nimport logging\nfrom hashlib import sha1\n\nfrom aleph.core import es, es_index\nfrom aleph.index.mapping import TYPE_LEAD\nfrom aleph.index.util import query_delete\n\nlog = logging.getLogger(__name__)\n\n\ndef delete_entity_leads(entity_id):\n \"\"\"Delete all entity-related leads from the index.\"\"\"\n q = {\n 'bool': {\n 'should': [\n {'term': {'entity_id': entity_id}},\n {'term': {'match_id': entity_id}}\n ]\n }\n }\n query_delete(q, doc_type=TYPE_LEAD)\n\n\ndef index_lead(lead):\n \"\"\"Index a lead.\"\"\"\n hash_sum = sha1()\n hash_sum.update(lead.get('entity_id') or '')\n hash_sum.update(lead.get('match_id') or '')\n lead_id = hash_sum.hexdigest()\n es.index(index=es_index, doc_type=TYPE_LEAD, id=lead_id, body=lead)\n" }, { "alpha_fraction": 0.5045135617256165, "alphanum_fraction": 0.508525550365448, "avg_line_length": 30.15625, "blob_id": "c634bc42595e70905db12ba739f67a2db0e94e83", "content_id": "773abbe0310aa51b551fd3bb779ca98b37495b0a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 997, "license_type": "permissive", "max_line_length": 97, "num_lines": 32, "path": "/aleph/static/js/services/Link.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import aleph from '../aleph';\n\naleph.factory('Link', ['$q', '$http', 'Query', 'Metadata', function($q, $http, Query, Metadata) {\n return {\n search: function(entityId, prefix) {\n var query = Query.parse(prefix),\n state = angular.copy(query.state),\n dfd = $q.defer(),\n url = '/api/1/entities/' + entityId + '/links';\n state['limit'] = 10;\n state['facet'] = ['remote.countries', 'schema'];\n state['offset'] = state.offset || 0;\n Metadata.get().then(function(metadata) {\n $http.get(url, {params: state}).then(function(res) {\n var links = res.data;\n for (var i in links.results) {\n links.results[i] = metadata.bindSchema(links.results[i]);\n }\n dfd.resolve({\n query: query,\n result: links\n });\n }, function(err) {\n dfd.reject(err);\n });\n }, function(err) {\n dfd.reject(err);\n });\n return dfd.promise;\n }\n };\n}]);\n" }, { "alpha_fraction": 0.7148910164833069, "alphanum_fraction": 0.7215496301651001, "avg_line_length": 38.33333206176758, "blob_id": "c0db0e8813c9cb4aca37e49bc03f606d4398e007", "content_id": "940d7f2c0604d5dc817766e74887bb5858923c5d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1652, "license_type": "permissive", "max_line_length": 72, "num_lines": 42, "path": "/aleph/views/leads_api.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from flask import Blueprint, request\nfrom apikit import obj_or_404, jsonify, request_data\nfrom werkzeug.exceptions import BadRequest\n\nfrom aleph.model import Collection, EntityIdentity\nfrom aleph.search import QueryState\nfrom aleph.search.leads import leads_query\nfrom aleph.logic import update_entity, update_lead\nfrom aleph.events import log_event\nfrom aleph.views.util import get_entity\n\nblueprint = Blueprint('leads_api', __name__)\n\n\[email protected]('/api/1/collections/<int:collection_id>/leads',\n methods=['GET'])\ndef index(collection_id):\n collection = obj_or_404(Collection.by_id(collection_id))\n request.authz.require(request.authz.collection_read(collection))\n state = QueryState(request.args, request.authz)\n results = leads_query(collection_id, state)\n return jsonify(results)\n\n\[email protected]('/api/1/collections/<int:collection_id>/leads',\n methods=['POST', 'PUT'])\ndef update(collection_id):\n collection = obj_or_404(Collection.by_id(collection_id))\n request.authz.require(request.authz.collection_write(collection))\n data = request_data()\n entity, obj = get_entity(data.get('entity_id'), request.authz.WRITE)\n if obj.collection_id != collection_id:\n raise BadRequest(\"Entity does not belong to collection.\")\n\n match, _ = get_entity(data.get('match_id'), request.authz.READ)\n judgement = data.get('judgement')\n if judgement not in EntityIdentity.JUDGEMENTS:\n raise BadRequest(\"Invalid judgement.\")\n update_lead(entity, match, judgement, judge=request.authz.role)\n log_event(request)\n update_entity(obj)\n return jsonify({'status': 'ok'})\n" }, { "alpha_fraction": 0.6131771802902222, "alphanum_fraction": 0.6131771802902222, "avg_line_length": 30.620370864868164, "blob_id": "d075bbbbb1705767dccbb021b67595a3ed6079c2", "content_id": "8a71bbcdfa69898c7088bbb5a0c238c7bb013846", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3415, "license_type": "permissive", "max_line_length": 73, "num_lines": 108, "path": "/aleph/oauth.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import jwt\nimport logging\nfrom flask_oauthlib.client import OAuth\nfrom flask import session\n\nfrom aleph import signals\n\noauth = OAuth()\nlog = logging.getLogger(__name__)\n\n\ndef get_oauth_token():\n if 'oauth' in session:\n sig = session.get('oauth')\n return (sig.get('access_token'), '')\n\n\ndef setup_providers(app):\n # Reset the remote apps first!\n oauth.remote_apps = {}\n\n providers = app.config.get('OAUTH', [])\n if isinstance(providers, dict):\n providers = [providers]\n\n for provider in providers:\n # OAUTH providers from the config MUST have a name entry\n name = provider.get('name')\n label = provider.pop('label', name.capitalize())\n\n provider = oauth.remote_app(**provider)\n provider.label = label\n provider.tokengetter(get_oauth_token)\n\n\ndef configure_oauth(app):\n if not app.config.get('TESTING'):\n setup_providers(app)\n oauth.init_app(app)\n return oauth\n\n\[email protected]_oauth_session.connect\ndef handle_google_oauth(sender, provider=None, session=None):\n from aleph.model import Role\n\n # If you wish to use another OAuth provider with your installation of\n # aleph, you can create a Python extension package and include a\n # custom oauth handler like this, which will create roles and state\n # for your session.\n if 'googleapis.com' not in provider.base_url:\n return\n\n me = provider.get('userinfo')\n user_id = 'google:%s' % me.data.get('id')\n role = Role.load_or_create(user_id, Role.USER, me.data.get('name'),\n email=me.data.get('email'))\n session['user'] = role.id\n\n\[email protected]_oauth_session.connect\ndef handle_facebook_oauth(sender, provider=None, session=None):\n from aleph.model import Role\n\n if 'facebook.com' not in provider.base_url:\n return\n\n me = provider.get('me?fields=id,name,email')\n user_id = 'facebook:%s' % me.data.get('id')\n role = Role.load_or_create(user_id, Role.USER, me.data.get('name'),\n email=me.data.get('email'))\n session['user'] = role.id\n\n\[email protected]_oauth_session.connect\ndef handle_keycloak_oauth(sender, provider=None, session=None):\n from aleph.model import Role\n superuser_role = 'superuser'\n\n if 'secure.occrp.org' not in provider.base_url:\n return\n\n access_token = session.get('oauth', {}).get('access_token')\n access_token = jwt.decode(access_token, verify=False)\n clients = access_token.get('resource_access', {})\n client = clients.get(provider.consumer_key, {})\n roles = set(client.get('roles', []))\n\n user_id = 'kc:%s' % access_token.get('email')\n if access_token.get('idashboard'):\n user_id = 'idashboard:user:%s' % access_token.get('idashboard')\n\n role = Role.load_or_create(user_id, Role.USER,\n access_token.get('name'),\n email=access_token.get('email'),\n is_admin=superuser_role in roles)\n role.clear_roles()\n\n for role_name in roles:\n if role_name == superuser_role:\n continue\n group_role = Role.load_or_create('kc:%s' % role_name,\n Role.GROUP,\n role_name)\n role.add_role(group_role)\n log.debug(\"User %r is member of %r\", role, group_role)\n\n session['user'] = role.id\n" }, { "alpha_fraction": 0.5819838047027588, "alphanum_fraction": 0.5819838047027588, "avg_line_length": 23.09756088256836, "blob_id": "94c2acc0d949da16c4791a14019bb3a43644bde7", "content_id": "1ce8882ec870bd87a3d23684aaccb230dcda3293", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 988, "license_type": "permissive", "max_line_length": 81, "num_lines": 41, "path": "/aleph/static/js/directives/propertyValue.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import aleph from '../aleph';\n\naleph.directive('propertyValue', ['Metadata', function(Metadata) {\n var metadata = {};\n Metadata.get().then(function(md) {\n metadata = md;\n });\n\n return {\n restrict: 'E',\n scope: {\n 'value': '=',\n 'property': '='\n },\n templateUrl: 'templates/entities/property_value.html',\n link: function (scope, element, attrs, model) {\n scope.label = scope.value;\n scope.isUrl = scope.property.type == 'url' || scope.property.type == 'uri';\n scope.isAddress = scope.property.type == 'address';\n scope.isCountry = scope.property.type == 'country';\n scope.isText = !scope.isUrl;\n\n if (scope.isCountry) {\n scope.label = metadata.countries[scope.value] || scope.value;\n }\n\n }\n };\n}]);\n\n\naleph.directive('propertyValues', [function() {\n return {\n restrict: 'E',\n scope: {\n 'values': '=',\n 'property': '='\n },\n templateUrl: 'templates/entities/property_values.html'\n };\n}]);\n" }, { "alpha_fraction": 0.5723869800567627, "alphanum_fraction": 0.5723869800567627, "avg_line_length": 30.120878219604492, "blob_id": "3c0ddba5a7ed1dcf1285bde9d3f0728c7cfa8359", "content_id": "064e7010abbc84054a491e7a14b1d02f12062db5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2832, "license_type": "permissive", "max_line_length": 75, "num_lines": 91, "path": "/aleph/datasets/__init__.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import six\nimport logging\n\nfrom aleph.authz import get_public_roles\nfrom aleph.util import dict_list\nfrom aleph.model import Role\nfrom aleph.datasets.query import DBQuery, CSVQuery\n\nlog = logging.getLogger(__name__)\n\n\nclass Dataset(object):\n \"\"\"A dataset describes one set of data to be loaded.\"\"\"\n\n def __init__(self, name, data):\n self.name = six.text_type(name)\n self.data = data\n self.label = data.get('label', name)\n self.info_url = data.get('info_url')\n self.category = data.get('category')\n self.roles = []\n self.entities_count = None\n self.public = False\n\n for role in dict_list(data, 'roles', 'role'):\n role_id = Role.load_id(role)\n if role_id is not None:\n self.roles.append(role_id)\n else:\n log.warning(\"Could not find role: %s\", role)\n if role_id in get_public_roles():\n self.public = True\n\n if not len(self.roles):\n raise ValueError(\"No roles for dataset: %s\" % self.name)\n\n self._queries = dict_list(data, 'queries', 'query')\n\n @property\n def countries(self):\n # This is cached only once for each run-time, basically as a really\n # stupid cache. Perhaps configuring countries explicitly, or giving\n # this into a memoization tool that timeouts every N hours would be\n # a good idea.\n if not hasattr(self, '_countries'):\n from aleph.search.entities import get_dataset_countries\n self._countries = get_dataset_countries(self.name)\n return self._countries\n\n @property\n def queries(self):\n for query in self._queries:\n if 'database' in query or 'databases' in query:\n yield DBQuery(self, query)\n else:\n yield CSVQuery(self, query)\n\n def to_dict(self):\n return {\n 'name': self.name,\n 'label': self.label,\n 'info_url': self.info_url,\n 'roles': self.roles,\n 'public': self.public,\n 'category': self.category,\n 'countries': self.countries,\n 'entities_count': self.entities_count\n }\n\n def __repr__(self):\n return '<Dataset(%r, %r)>' % (self.name, self.label)\n\n\nclass DatasetSet(object):\n\n def __init__(self, datasets):\n self.datasets = []\n for name, dconfig in datasets.get('datasets', {}).items():\n self.datasets.append(Dataset(name, dconfig))\n\n def get(self, name):\n for dataset in self.datasets:\n if dataset.name == name:\n return dataset\n raise NameError(\"No such dataset: %s\" % name)\n\n def __iter__(self):\n return iter(self.datasets)\n\n def __repr__(self):\n return '<DatasetSet(%r)>' % self.datasets\n" }, { "alpha_fraction": 0.5575271844863892, "alphanum_fraction": 0.5586720108985901, "avg_line_length": 31.351852416992188, "blob_id": "34e8412b35f9a35c5e33fe83a80ec67bc0cf95bb", "content_id": "d883fc0a2d87a666bc983c13eed9cc02d116961b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1747, "license_type": "permissive", "max_line_length": 72, "num_lines": 54, "path": "/aleph/search/links.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from pprint import pprint # noqa\n\nfrom aleph.index import TYPE_LINK\nfrom aleph.search.util import execute_basic\nfrom aleph.search.fragments import match_all, filter_query, authz_filter\nfrom aleph.search.fragments import add_filter, aggregate\nfrom aleph.search.facet import parse_facet_result\n\nDEFAULT_FIELDS = ['roles', 'remote', 'origin', 'inverted', 'schema',\n 'schemata', 'properties']\n\n\ndef links_query(origin, state):\n \"\"\"Parse a user query string, compose and execute a query.\"\"\"\n q = match_all()\n if state.has_text:\n q = {\n \"query_string\": {\n \"query\": state.text,\n \"fields\": ['name^5', 'names^2', 'text'],\n \"default_operator\": \"AND\",\n \"use_dis_max\": True\n }\n }\n ids = origin.get('ids') or [origin.get('id')]\n q = add_filter(q, {'terms': {'origin.id': ids}})\n q = authz_filter(q, state.authz, roles=True)\n\n aggs = {'scoped': {'global': {}, 'aggs': {}}}\n aggs = aggregate(state, q, aggs, state.facet_names)\n\n if state.sort == 'score':\n sort = ['_score']\n else:\n sort = [{'properties.start_date': 'desc'},\n {'properties.end_date': 'desc'}]\n\n q = {\n 'sort': sort,\n 'query': filter_query(q, state.filters),\n 'aggregations': aggs,\n 'size': state.limit,\n 'from': state.offset,\n '_source': DEFAULT_FIELDS\n }\n\n result, hits, output = execute_basic(TYPE_LINK, q)\n output['facets'] = parse_facet_result(state, result)\n for doc in hits.get('hits', []):\n link = doc.get('_source')\n link['id'] = doc.get('_id')\n link['score'] = doc.get('_score')\n output['results'].append(link)\n return output\n" }, { "alpha_fraction": 0.5489010810852051, "alphanum_fraction": 0.5736263990402222, "avg_line_length": 29.847457885742188, "blob_id": "31d0a589f8549cf5fd6974f0b13b6ad8710dc6e0", "content_id": "f52c90fa42ee1697782d5ef639a7e9b837be92e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1820, "license_type": "permissive", "max_line_length": 230, "num_lines": 59, "path": "/aleph/analyze/regex.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import re\nimport logging\nfrom dalet import parse_phone\n\nfrom aleph.analyze.analyzer import Analyzer\nfrom aleph.model import DocumentTag, DocumentTagCollector\n\nlog = logging.getLogger(__name__)\n\n# URLs:\n# https://gist.github.com/uogbuji/705383\n# REGEX = ur'(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:\\'\".,<>?\\xab\\xbb\\u201c\\u201d\\u2018\\u2019]))' # noqa\n\n\nclass RegexAnalyzer(Analyzer):\n REGEX = None\n FLAG = None\n\n def prepare(self):\n # TODO: re-think this.\n self.disabled = self.document.type == self.document.TYPE_TABULAR\n self.collector = DocumentTagCollector(self.document, self.ORIGIN)\n self.regex = re.compile(self.REGEX, self.FLAG)\n\n def on_text(self, text):\n if not self.disabled:\n for mobj in self.regex.finditer(text):\n self.on_match(mobj)\n\n def finalize(self):\n self.collector.save()\n\n\nclass EMailAnalyzer(RegexAnalyzer):\n REGEX = '[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,}'\n FLAG = re.IGNORECASE\n ORIGIN = 'regex:email'\n\n def on_match(self, match):\n text = match.group(0)\n self.collector.emit(text, DocumentTag.TYPE_EMAIL)\n\n\nclass PhoneNumberAnalyzer(RegexAnalyzer):\n REGEX = r'(\\+?[\\d\\-\\(\\)\\/\\s]{5,})'\n CHARS = '+0123456789'\n FLAG = re.IGNORECASE\n ORIGIN = 'regex:phones'\n\n def on_match(self, match):\n match = match.group(0)\n match = ''.join([m for m in match if m in self.CHARS])\n if len(match) < 5:\n return\n for country in [None] + self.document.countries:\n num = parse_phone(match, country=country)\n if num is None:\n continue\n self.collector.emit(num, DocumentTag.TYPE_PHONE)\n" }, { "alpha_fraction": 0.6413461565971375, "alphanum_fraction": 0.6413461565971375, "avg_line_length": 25.66666603088379, "blob_id": "6cd93b0f6b0cb6ece09e340ab1477b5915027650", "content_id": "c0bdeba7ad0934e9245b175628385903bc74d891", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1040, "license_type": "permissive", "max_line_length": 141, "num_lines": 39, "path": "/aleph/static/js/controllers/DocumentsSourcesCtrl.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import aleph from '../aleph';\n\naleph.controller('DocumentsSourcesCtrl', ['$scope', '$route', '$location', '$anchorScroll', 'Title', 'Collection', 'collections', 'metadata',\n function($scope, $route, $location, $anchorScroll, Title, Collection, collections, metadata) {\n\n Title.set(\"Document sources\", \"documents\");\n\n $scope.search = {};\n $scope.query = collections.query;\n $scope.collections = collections.result;\n $scope.metadata = metadata;\n\n $scope.submitSearch = function(form) {\n $location.path('/documents');\n $location.search($scope.search);\n };\n\n $scope.$on('$routeUpdate', function() {\n Collection.search({\n managed: true,\n counts: true,\n facet: ['countries', 'category']\n }).then(function(data) {\n updateSearch(data)\n });\n });\n\n $scope.loadOffset = function(offset) {\n $scope.query.set('offset', offset);\n $anchorScroll();\n };\n\n var updateSearch = function(data) {\n $scope.query = data.query;\n $scope.collections = data.result;\n };\n\n updateSearch(collections);\n}]);\n" }, { "alpha_fraction": 0.6105675101280212, "alphanum_fraction": 0.6105675101280212, "avg_line_length": 25.894737243652344, "blob_id": "3bcad91287f5ad19206eba57134f54326ece9d25", "content_id": "047779046688f43048bf20d605249a4610c03b73", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 511, "license_type": "permissive", "max_line_length": 78, "num_lines": 19, "path": "/aleph/static/js/controllers/SignupCtrl.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import aleph from '../aleph';\n\naleph.controller('SignupCtrl', ['$scope', '$location', '$routeParams', 'Role',\n function($scope, $location, $routeParams, Role) {\n\n $scope.role = {};\n $scope.role.code = $routeParams.code;\n $scope.showSuccess = false;\n\n $scope.register = function() {\n Role.create($scope.role).then(function(role) {\n $scope.showError = false;\n $scope.showSuccess = true;\n }, function(err) {\n $scope.showError = true;\n $scope.showSuccess = false;\n });\n };\n}]);\n" }, { "alpha_fraction": 0.6624100804328918, "alphanum_fraction": 0.6640287637710571, "avg_line_length": 35.578948974609375, "blob_id": "4a5c6e80d5bef810e3036f269c1413e5f18f7958", "content_id": "f740eaca88eb56ec04ca0a55415f0a43b3932bf4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5560, "license_type": "permissive", "max_line_length": 79, "num_lines": 152, "path": "/aleph/views/documents_api.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import logging\nfrom werkzeug.exceptions import BadRequest, NotFound\nfrom flask import Blueprint, redirect, send_file, request\nfrom apikit import jsonify, Pager, request_data\n\nfrom aleph.core import archive, url_for, db\nfrom aleph.model import Document, DocumentRecord, Entity, Reference\nfrom aleph.logic import update_document\nfrom aleph.events import log_event\nfrom aleph.views.cache import enable_cache\nfrom aleph.search import QueryState\nfrom aleph.search import records_query, execute_records_query\nfrom aleph.search.util import next_params\nfrom aleph.views.util import get_document\nfrom aleph.util import PDF_MIME\n\n\nlog = logging.getLogger(__name__)\nblueprint = Blueprint('documents_api', __name__)\n\n\[email protected]('/api/1/documents', methods=['GET'])\ndef index():\n authz = request.authz\n collections = request.args.getlist('collection')\n collections = authz.collections_intersect(authz.READ, collections)\n q = Document.all()\n q = q.filter(Document.collection_id.in_(collections))\n hashes = request.args.getlist('content_hash')\n if len(hashes):\n q = q.filter(Document.content_hash.in_(hashes))\n return jsonify(Pager(q))\n\n\[email protected]('/api/1/documents/<int:document_id>')\ndef view(document_id):\n doc = get_document(document_id)\n enable_cache()\n data = doc.to_dict()\n if doc.parent is not None:\n data['parent'] = doc.parent.to_dict()\n log_event(request, document_id=doc.id)\n data['data_url'] = archive.generate_url(doc.content_hash)\n if data['data_url'] is None:\n data['data_url'] = url_for('documents_api.file',\n document_id=document_id)\n if doc.pdf_version:\n data['pdf_url'] = url_for('documents_api.pdf',\n document_id=document_id)\n return jsonify(data)\n\n\[email protected]('/api/1/documents/<int:document_id>', methods=['POST', 'PUT'])\ndef update(document_id):\n document = get_document(document_id, action=request.authz.WRITE)\n data = request_data()\n document.update(data)\n db.session.commit()\n log_event(request, document_id=document.id)\n update_document(document)\n return view(document_id)\n\n\[email protected]('/api/1/documents/<int:document_id>/references')\ndef references(document_id):\n doc = get_document(document_id)\n q = db.session.query(Reference)\n q = q.filter(Reference.document_id == doc.id)\n q = q.filter(Reference.origin == 'regex')\n q = q.join(Entity)\n q = q.filter(Entity.state == Entity.STATE_ACTIVE)\n q = q.filter(Entity.collection_id.in_(request.authz.collections_read))\n q = q.order_by(Reference.weight.desc())\n return jsonify(Pager(q, document_id=document_id))\n\n\[email protected]('/api/1/documents/<int:document_id>/file')\ndef file(document_id):\n document = get_document(document_id)\n enable_cache(server_side=True)\n log_event(request, document_id=document.id)\n url = archive.generate_url(document.content_hash,\n file_name=document.file_name,\n mime_type=document.mime_type)\n if url is not None:\n return redirect(url)\n\n local_path = archive.load_file(document.content_hash,\n file_name=document.file_name)\n if local_path is None:\n raise NotFound(\"File does not exist.\")\n\n fh = open(local_path, 'rb')\n return send_file(fh, as_attachment=True,\n attachment_filename=document.file_name,\n mimetype=document.mime_type)\n\n\[email protected]('/api/1/documents/<int:document_id>/pdf')\ndef pdf(document_id):\n document = get_document(document_id)\n enable_cache(server_side=True)\n log_event(request, document_id=document.id)\n if document.type != Document.TYPE_TEXT:\n raise BadRequest(\"PDF is only available for text documents\")\n url = archive.generate_url(document.pdf_version, mime_type=PDF_MIME)\n if url is not None:\n return redirect(url)\n\n path = archive.load_file(document.pdf_version,\n file_name=document.file_name)\n if path is None:\n raise NotFound(\"Missing PDF file.\")\n return send_file(open(path, 'rb'), mimetype=PDF_MIME)\n\n\[email protected]('/api/1/documents/<int:document_id>/tables/<int:table_id>')\ndef table(document_id, table_id):\n document = get_document(document_id)\n enable_cache(vary_user=True)\n try:\n return jsonify(document.tables[table_id])\n except IndexError:\n raise NotFound(\"No such table: %s\" % table_id)\n\n\[email protected]('/api/1/documents/<int:document_id>/records')\ndef records(document_id):\n document = get_document(document_id)\n enable_cache(vary_user=True)\n state = QueryState(request.args, request.authz)\n query = records_query(document.id, state)\n result = execute_records_query(document.id, state, query)\n params = next_params(request.args, result)\n if params is not None:\n result['next'] = url_for('documents_api.records',\n document_id=document_id,\n **params)\n return jsonify(result)\n\n\[email protected]('/api/1/documents/<int:document_id>/records/<int:index>')\ndef record(document_id, index):\n document = get_document(document_id)\n q = db.session.query(DocumentRecord)\n q = q.filter(DocumentRecord.document_id == document.id)\n q = q.filter(DocumentRecord.index == index)\n record = q.first()\n if record is None:\n raise NotFound(\"No such page: %s\" % index)\n enable_cache(server_side=True)\n return jsonify(record)\n" }, { "alpha_fraction": 0.793433666229248, "alphanum_fraction": 0.793433666229248, "avg_line_length": 47.733333587646484, "blob_id": "bef9aee2cad64a2b631cf2bb48dbcb289c1b27c7", "content_id": "22ec4a5ebba3a1cca202b2468c0f4ced5573dba5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 731, "license_type": "permissive", "max_line_length": 77, "num_lines": 15, "path": "/aleph/search/__init__.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import logging\n\nfrom aleph.index.mapping import TYPE_DOCUMENT, TYPE_RECORD # noqa\nfrom aleph.search.query import QueryState # noqa\nfrom aleph.search.documents import documents_query, documents_iter # noqa\nfrom aleph.search.documents import entity_documents # noqa\nfrom aleph.search.entities import entities_query # noqa\nfrom aleph.search.entities import suggest_entities, similar_entities # noqa\nfrom aleph.search.entities import load_entity # noqa\nfrom aleph.search.links import links_query # noqa\nfrom aleph.search.leads import leads_query, lead_count # noqa\nfrom aleph.search.records import records_query, execute_records_query # noqa\nfrom aleph.search.util import scan_iter # noqa\n\nlog = logging.getLogger(__name__)\n" }, { "alpha_fraction": 0.6346153616905212, "alphanum_fraction": 0.6370192170143127, "avg_line_length": 28.714284896850586, "blob_id": "4bf3a9c66c055fa821c728c2fd90a8c942b17f7f", "content_id": "c6b0e513807f494f42944502b053185c7f971393", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 416, "license_type": "permissive", "max_line_length": 77, "num_lines": 14, "path": "/aleph/static/js/loaders/loadPermissions.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "var loadPermissions = ['$q', '$route', '$http', function($q, $route, $http) {\n var dfd = $q.defer(),\n collectionId = $route.current.params.collection_id,\n permUrl = '/api/1/collections/' + collectionId + '/permissions';\n\n $http.get(permUrl).then(function(res) {\n dfd.resolve(res.data.results);\n }, function(err) {\n dfd.reject(err);\n });\n return dfd.promise;\n}];\n\nexport default loadPermissions;\n" }, { "alpha_fraction": 0.64462810754776, "alphanum_fraction": 0.663223147392273, "avg_line_length": 23.200000762939453, "blob_id": "767846f45a6664f4fbb320c620efed1bc56a7f6c", "content_id": "b899f39887cfff1210fe066501f2e00bf8d3f9ba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 484, "license_type": "permissive", "max_line_length": 74, "num_lines": 20, "path": "/contrib/devwrapper.sh", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho \"Aleph environment is being set up: $(hostname -i)\"\nif [ -d \"/aleph/node_modules\" ]; then\n echo 1>&2 \"You have a local node_modules directory. Please delete this.\"\n exit 127\nfi\n\nif [ -f \"/aleph/settings.py\" ]; then\n echo 1>&2 \"Found settings.py, adding to ALEPH_SETTINGS.\"\n export ALEPH_SETTINGS=/aleph/settings.py\nfi\n\npip install -q -e /aleph\nif [ -e \"/aleph/site\" ]; then\n echo 1>&2 \"Using site package: 'site'.\"\n pip install -q -e /aleph/site\nfi\n\nexec \"$@\"\n" }, { "alpha_fraction": 0.5991735458374023, "alphanum_fraction": 0.5991735458374023, "avg_line_length": 23.820512771606445, "blob_id": "dc65cecdf21a060ebc94932f2bc5857e51f3eceb", "content_id": "eb4a41b8cdc8c2d8a60f84b62c817cd52dbf2f53", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 968, "license_type": "permissive", "max_line_length": 135, "num_lines": 39, "path": "/aleph/static/js/controllers/EntitiesSearchCtrl.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import aleph from '../aleph';\n\naleph.controller('EntitiesSearchCtrl', ['$scope', '$http', '$timeout', '$anchorScroll', 'Entity', 'Authz', 'Title', 'data', 'metadata',\n function($scope, $http, $timeout, $anchorScroll, Entity, Authz, Title, data, metadata) {\n\n $scope.authz = Authz;\n\n $scope.submitSearch = function(form) {\n $scope.query.set('q', $scope.query.state.q);\n };\n\n $scope.loadOffset = function(offset) {\n $scope.query.set('offset', offset);\n $anchorScroll();\n };\n\n $scope.$on('$routeUpdate', function() {\n reloadSearch();\n });\n\n var reloadSearch = function() {\n Entity.search().then(function(data) {\n updateSearch(data);\n });\n };\n\n var updateSearch = function(data) {\n $scope.result = data.result;\n $scope.query = data.query;\n\n if (data.query.getQ()) {\n Title.set(\"'\" + data.query.getQ() + \"'\", \"entities\");\n } else {\n Title.set(\"Search databases\", \"entities\");\n }\n };\n\n updateSearch(data);\n}]);\n" }, { "alpha_fraction": 0.5950632691383362, "alphanum_fraction": 0.5952672362327576, "avg_line_length": 35.582088470458984, "blob_id": "415198579a377a073b4239c410b9777469f35e3b", "content_id": "4369534785dfe2571064de88c60c6a30473b775d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4902, "license_type": "permissive", "max_line_length": 82, "num_lines": 134, "path": "/aleph/authz.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from werkzeug.exceptions import Forbidden\n\nfrom aleph.core import db, get_config\nfrom aleph.model import Collection, Role, Permission\nfrom aleph.util import ensure_list\n\n\ndef get_public_roles():\n \"\"\"Roles which make a collection to be considered public.\"\"\"\n return [\n Role.load_id(Role.SYSTEM_GUEST),\n Role.load_id(Role.SYSTEM_USER),\n ]\n\n\nclass Authz(object):\n \"\"\"Hold the authorization information for a user.\n\n This is usually attached to a request, but can also be used separately,\n e.g. in the context of notifications.\n \"\"\"\n READ = 'read'\n WRITE = 'write'\n PUBLIC = 'public'\n\n def __init__(self, role=None, override=False):\n self.roles = set([Role.load_id(Role.SYSTEM_GUEST)])\n self.role = role\n self.logged_in = role is not None\n self.override = self.is_admin = override\n self.in_maintenance = get_config('MAINTENANCE')\n\n if self.logged_in:\n self.is_admin = role.is_admin\n self.roles.add(role.id)\n self.roles.add(Role.load_id(Role.SYSTEM_USER))\n for group in role.roles:\n self.roles.add(group.id)\n\n # Pre-load collection authorisation info and cache the result.\n # This is the core authorisation function, and is called at least once\n # per request. It will query and cache the ID for all collections the\n # current user is authorised to read or write.\n self.collections = {\n self.READ: set(),\n self.WRITE: set(),\n self.PUBLIC: set()\n }\n q = db.session.query(Permission.collection_id,\n Permission.role_id,\n Permission.read,\n Permission.write)\n q = q.filter(Permission.deleted_at == None) # noqa\n q = q.filter(Permission.role_id.in_(self.roles))\n q = q.filter(Permission.collection_id != None) # noqa\n for collection_id, role_id, read, write in q:\n if read or write:\n self.collections[self.READ].add(collection_id)\n if role_id in get_public_roles():\n self.collections[self.PUBLIC].add(collection_id)\n if write and self.logged_in:\n self.collections[self.WRITE].add(collection_id)\n if self.is_admin:\n q = Collection.all_ids().filter(Collection.deleted_at == None) # noqa\n for collection_id, in q:\n self.collections[self.READ].add(collection_id)\n self.collections[self.WRITE].add(collection_id)\n\n # Disable all in maintenance mode.\n if self.in_maintenance:\n self.collections[self.WRITE] = set()\n\n self.collections_read = list(self.collections[self.READ])\n self.collections_write = list(self.collections[self.WRITE])\n\n def _collection_check(self, collection, action):\n if isinstance(collection, Collection):\n collection = collection.id\n try:\n return int(collection) in self.collections.get(action)\n except:\n return False\n\n def collection_read(self, collection):\n \"\"\"Check if a given collection can be read.\"\"\"\n return self._collection_check(collection, self.READ)\n\n def collection_write(self, collection):\n \"\"\"Check if a given collection can be written.\"\"\"\n return self._collection_check(collection, self.WRITE)\n\n def collection_public(self, collection):\n return self._collection_check(collection, self.PUBLIC)\n\n def collections_intersect(self, action, colls, default_all=True):\n \"\"\"Intersect the given and the available set of collections.\n\n This will return all available collections if the given set is empty\n and the ``default_all`` argument is ``True``.\n \"\"\"\n available = self.collections.get(action)\n intersect = set()\n for collection_id in colls:\n try:\n if isinstance(collection_id, dict):\n collection_id = collection_id.get('id')\n collection_id = int(collection_id)\n if collection_id in available:\n intersect.add(collection_id)\n except:\n pass\n if not len(intersect) and default_all:\n return available\n return list(intersect)\n\n def session_write(self):\n if self.in_maintenance:\n return False\n return self.logged_in\n\n def check_roles(self, roles):\n # if self.in_maintenance:\n # return False\n if self.is_admin:\n return True\n isect = self.roles.intersection(ensure_list(roles))\n return len(isect) > 0\n\n def require(self, pred):\n if not pred:\n raise Forbidden(\"Sorry, you're not permitted to do this!\")\n\n def __repr__(self):\n return '<Authz(%s)>' % self.role\n" }, { "alpha_fraction": 0.7098976373672485, "alphanum_fraction": 0.7098976373672485, "avg_line_length": 32.80769348144531, "blob_id": "aac20f769b335e498a9922318a6642c9ba6c4461", "content_id": "84ec9343d3b910f02f0e90f8dffae48578e7dc72", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 879, "license_type": "permissive", "max_line_length": 77, "num_lines": 26, "path": "/aleph/static/js/loaders/loadEntities.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "var loadEntitiesSearch = ['Entity', function(Entity) {\n return Entity.search();\n}];\n\nvar loadEntity = ['$route', 'Entity', function($route, Entity) {\n var entityId = $route.current.params.entity_id;\n return Entity.get(entityId);\n}];\n\nvar loadSimilarEntities = ['$route', 'Entity', function($route, Entity) {\n var entityId = $route.current.params.entity_id;\n return Entity.searchSimilar(entityId, 'similar_');\n}];\n\nvar loadEntityLinks = ['$route', 'Link', function($route, Link) {\n var entityId = $route.current.params.entity_id;\n return Link.search(entityId, 'links_');\n}];\n\nvar loadEntityDocuments = ['$route', 'Document', function($route, Document) {\n var entityId = $route.current.params.entity_id;\n return Document.searchEntity(entityId, 'documents_');\n}];\n\nexport {loadEntitiesSearch, loadEntity, loadSimilarEntities, loadEntityLinks,\n loadEntityDocuments};\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6296296119689941, "avg_line_length": 29.85714340209961, "blob_id": "f128c8fa90ad1ca2d8e3db882785733195df37b7", "content_id": "d990927f4651550d581fe9c469a3e505aaa136c3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 216, "license_type": "permissive", "max_line_length": 76, "num_lines": 7, "path": "/aleph/static/js/loaders/loadHome.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "var loadStatistics = ['$http', '$q', '$route', function($http, $q, $route) {\n return $http.get('/api/1/statistics', {cache: true}).then(function(res) {\n return res.data;\n });\n}];\n\nexport default loadStatistics;\n" }, { "alpha_fraction": 0.5441176295280457, "alphanum_fraction": 0.5477941036224365, "avg_line_length": 30.627906799316406, "blob_id": "dcf3623e8398f54da8aa129522a82164bea8bd36", "content_id": "14b8bc0afe7ac4a66fe938c6158596135e432ddf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1360, "license_type": "permissive", "max_line_length": 73, "num_lines": 43, "path": "/aleph/logic/datasets.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import logging\n\nfrom aleph.index import index_items\n\nlog = logging.getLogger(__name__)\nPAGE = 1000\n\n\ndef load_rows(dataset, query, rows):\n \"\"\"Load a single batch of QUEUE_PAGE rows from the given query.\"\"\"\n entities = {}\n links = []\n for row in rows:\n entity_map = {}\n for entity in query.entities:\n data = entity.to_index(row)\n if data is not None:\n entity_map[entity.name] = data\n entities[data['id']] = data\n\n for link in query.links:\n for inverted in [False, True]:\n data = link.to_index(row, entity_map, inverted=inverted)\n if data is not None:\n links.append(data)\n\n index_items(entities, links)\n log.info(\"[%s] Indexed %s rows as %s entities, %s links...\",\n dataset.name, len(rows), len(entities), len(links))\n\n\ndef load_dataset(dataset):\n \"\"\"Index all the entities and links in a given dataset.\"\"\"\n for query in dataset.queries:\n rows = []\n for row_idx, row in enumerate(query.iterrows(), 1):\n rows.append(row)\n if len(rows) >= PAGE:\n log.info(\"[%s] Tasked %s rows...\", dataset.name, row_idx)\n load_rows(dataset, query, rows)\n rows = []\n if len(rows):\n load_rows(dataset, query, rows)\n" }, { "alpha_fraction": 0.6023346185684204, "alphanum_fraction": 0.6042801737785339, "avg_line_length": 34.52995300292969, "blob_id": "0de94ccf133b7a28093bde62866ab1699162fa53", "content_id": "b1d089298e5f4bb67268690bc90d7e531a39929b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7710, "license_type": "permissive", "max_line_length": 113, "num_lines": 217, "path": "/aleph/model/document.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import logging\nfrom datetime import datetime, timedelta\nfrom normality import ascii_text\nfrom sqlalchemy import func\nfrom sqlalchemy.dialects.postgresql import JSONB\nfrom sqlalchemy.orm.attributes import flag_modified\n\nfrom aleph.core import db\nfrom aleph.model.metadata import Metadata\nfrom aleph.model.validate import validate\nfrom aleph.model.collection import Collection\nfrom aleph.model.reference import Reference\nfrom aleph.model.common import DatedModel\nfrom aleph.model.document_record import DocumentRecord\nfrom aleph.model.document_tag import DocumentTag\nfrom aleph.text import index_form\n\nlog = logging.getLogger(__name__)\n\n\nclass Document(db.Model, DatedModel, Metadata):\n _schema = 'document.json#'\n\n SCHEMA = 'Document'\n\n TYPE_TEXT = 'text'\n TYPE_TABULAR = 'tabular'\n TYPE_OTHER = 'other'\n\n STATUS_PENDING = 'pending'\n STATUS_SUCCESS = 'success'\n STATUS_FAIL = 'fail'\n\n id = db.Column(db.BigInteger, primary_key=True)\n content_hash = db.Column(db.Unicode(65), nullable=True, index=True)\n foreign_id = db.Column(db.Unicode, unique=False, nullable=True)\n type = db.Column(db.Unicode(10), nullable=False, index=True)\n status = db.Column(db.Unicode(10), nullable=True, index=True)\n meta = db.Column(JSONB, default={})\n\n crawler = db.Column(db.Unicode(), index=True)\n crawler_run = db.Column(db.Unicode())\n error_type = db.Column(db.Unicode(), nullable=True)\n error_message = db.Column(db.Unicode(), nullable=True)\n\n parent_id = db.Column(db.BigInteger, db.ForeignKey('document.id'), nullable=True) # noqa\n children = db.relationship('Document', backref=db.backref('parent', uselist=False, remote_side=[id])) # noqa\n\n collection_id = db.Column(db.Integer, db.ForeignKey('collection.id'), nullable=False, index=True) # noqa\n collection = db.relationship(Collection, backref=db.backref('documents', lazy='dynamic')) # noqa\n\n def __init__(self, **kw):\n self.meta = {}\n super(Document, self).__init__(**kw)\n\n def update(self, data):\n validate(data, self._schema)\n self.title = data.get('title')\n self.summary = data.get('summary')\n self.languages = data.get('languages')\n self.countries = data.get('countries')\n db.session.add(self)\n\n def update_meta(self):\n flag_modified(self, 'meta')\n\n def delete_records(self):\n pq = db.session.query(DocumentRecord)\n pq = pq.filter(DocumentRecord.document_id == self.id)\n # pq.delete(synchronize_session='fetch')\n pq.delete()\n db.session.flush()\n\n def delete_tags(self):\n pq = db.session.query(DocumentTag)\n pq = pq.filter(DocumentTag.document_id == self.id)\n # pq.delete(synchronize_session='fetch')\n pq.delete()\n db.session.flush()\n\n def delete_references(self, origin=None):\n pq = db.session.query(Reference)\n pq = pq.filter(Reference.document_id == self.id)\n if origin is not None:\n pq = pq.filter(Reference.origin == origin)\n # pq.delete(synchronize_session='fetch')\n pq.delete()\n db.session.flush()\n\n def delete(self, deleted_at=None):\n self.delete_references()\n self.delete_records()\n db.session.delete(self)\n\n def insert_records(self, sheet, iterable, chunk_size=1000):\n chunk = []\n for index, data in enumerate(iterable):\n chunk.append({\n 'document_id': self.id,\n 'index': index,\n 'sheet': sheet,\n 'data': data\n })\n if len(chunk) >= chunk_size:\n db.session.bulk_insert_mappings(DocumentRecord, chunk)\n chunk = []\n\n if len(chunk):\n db.session.bulk_insert_mappings(DocumentRecord, chunk)\n\n def text_parts(self):\n pq = db.session.query(DocumentRecord)\n pq = pq.filter(DocumentRecord.document_id == self.id)\n for record in pq.yield_per(1000):\n for text in record.text_parts():\n yield text\n\n @classmethod\n def crawler_last_run(cls, crawler_id):\n q = db.session.query(func.max(cls.updated_at))\n q = q.filter(cls.crawler == crawler_id)\n return q.scalar()\n\n @classmethod\n def is_crawler_active(cls, crawler_id):\n # TODO: add a function to see if a particular crawl is still running\n # this should be defined as having \"pending\" documents.\n last_run_time = cls.crawler_last_run(crawler_id)\n if last_run_time is None:\n return False\n return last_run_time > (datetime.utcnow() - timedelta(hours=1))\n\n @classmethod\n def crawler_stats(cls, crawler_id):\n # Check if the crawler was active very recently, if so, don't\n # allow the user to execute a new run right now.\n stats = {\n 'updated': cls.crawler_last_run(crawler_id),\n 'running': cls.is_crawler_active(crawler_id)\n }\n\n q = db.session.query(cls.status, func.count(cls.id))\n q = q.filter(cls.crawler == crawler_id)\n q = q.group_by(cls.status)\n for (status, count) in q.all():\n stats[status] = count\n return stats\n\n @classmethod\n def by_keys(cls, parent_id=None, collection=None, foreign_id=None,\n content_hash=None):\n \"\"\"Try and find a document by various criteria.\"\"\"\n q = cls.all()\n\n if collection is not None:\n q = q.filter(Document.collection_id == collection.id)\n\n if parent_id is not None:\n q = q.filter(Document.parent_id == parent_id)\n\n if foreign_id is not None:\n q = q.filter(Document.foreign_id == foreign_id)\n elif content_hash is not None:\n q = q.filter(Document.content_hash == content_hash)\n else:\n raise ValueError(\"No unique criterion for document.\")\n\n document = q.first()\n if document is None:\n document = cls()\n document.type = cls.TYPE_OTHER\n document.collection_id = collection.id\n document.collection = collection\n document.parent_id = parent_id\n document.foreign_id = foreign_id\n document.content_hash = content_hash\n document.status = document.STATUS_PENDING\n db.session.add(document)\n return document\n\n def to_dict(self):\n data = self.to_meta_dict()\n try:\n from flask import request # noqa\n data['public'] = request.authz.collection_public(self.collection_id) # noqa\n except:\n data['public'] = None\n data.update({\n 'id': self.id,\n 'type': self.type,\n 'status': self.status,\n 'parent_id': self.parent_id,\n 'foreign_id': self.foreign_id,\n 'content_hash': self.content_hash,\n 'crawler': self.crawler,\n 'crawler_run': self.crawler_run,\n 'error_type': self.error_type,\n 'error_message': self.error_message,\n 'collection_id': self.collection_id,\n 'created_at': self.created_at,\n 'updated_at': self.updated_at\n })\n return data\n\n def to_index_dict(self):\n data = self.to_dict()\n data['text'] = index_form(self.text_parts())\n data['schema'] = self.SCHEMA\n data['schemata'] = [self.SCHEMA]\n data['name_sort'] = ascii_text(data.get('title'))\n data['title_latin'] = ascii_text(data.get('title'))\n data['summary_latin'] = ascii_text(data.get('summary'))\n data.pop('tables')\n return data\n\n def __repr__(self):\n return '<Document(%r,%r,%r)>' % (self.id, self.type, self.title)\n" }, { "alpha_fraction": 0.7095588445663452, "alphanum_fraction": 0.7095588445663452, "avg_line_length": 37.85714340209961, "blob_id": "4161585e785da7fb90d757cae4304567724ad4f9", "content_id": "7dc02c4a80e14c2f6c71033df55ce9f9d4a16fb1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "permissive", "max_line_length": 67, "num_lines": 7, "path": "/aleph/index/collections.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from aleph.index.util import query_delete\n\n\ndef delete_collection(collection_id):\n \"\"\"Delete all documents from a particular collection.\"\"\"\n query_delete({'term': {'collection_id': collection_id}})\n query_delete({'term': {'entity_collection_id': collection_id}})\n" }, { "alpha_fraction": 0.7427598237991333, "alphanum_fraction": 0.7490630149841309, "avg_line_length": 28.350000381469727, "blob_id": "fd0f58027c514e53e8bf31a89a35b496843f2849", "content_id": "0314c0ad91cabdbbb04e45674a89fa084da39c79", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5870, "license_type": "permissive", "max_line_length": 82, "num_lines": 200, "path": "/docs/research.md", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "# Research topics\n\nTopics in need of more research.\n\n## Domain model ideas\n\n* Each imported document is either tabular or textual. It has many records,\n i.e. data rows or document pages.\n* An entity (such as a person, organisation, or topic) is like a permanent\n search query; each entity can have multiple actual search terms associated\n with it (`selectors`).\n* Documents matching an entity after that entity has been created yield\n notifications if a user is subscribed.\n\n## Decentralized pipeline ideas\n\nThe idea is to pick various subsets of functionality out of a larger continuum\nof possible tasks related to document and data processing and try to modularise\nor separate to components and stages of the processing pipelines. This way\nthese could become re-usable while deployed in the differing contexts of the\nvarious tools.\n\nRelated:\n\n* [Centipede](https://github.com/opennewslabs/centipede)\n\n## Entity merging\n\nDe-dupe TODO:\n\n1. merge identifiers\n2. merge properties\n3. merge names, make merged names into a.k.a's\n4. merge collections\n5. update references\n6. update alerts\n7. delete source entities\n8. update source entities\n9. update target entity\n\n## Open design questions\n\n### Entity graph model\n\nThe idea is about making a formal instead of a (i.e.) corporate graph. Let's\nmake a messy one that has all the attributes we can derive from our internal\ndata structure. Then let's use it as a recommendation engine, rather than an\nacademic research object :)\n\n#### Authorisation\n\nMaking sure that users can only see the parts of the graph to which they have\nexplicit access is the hardest part of this. Every node in the graph needs to\nbe associated with one or many collections, and every user querying the\ndatabase has access to several hundred collections. The following are options\nfor modelling this:\n\n* Make each `Collection` a node and connect it to all its subjects using\n `PART_OF` relationships. Query these links at the same time as the actual\n data.\n* Add labels to each node to express the `Collections` that it belongs to.\n This fails because it is impossible to do an OR search on node labels in\n Neo4J.\n\nNeo4J Lead Generation Patterns:\n\n```cypher\nMATCH (c:Collection)<-[:PART_OF]-(src)\nMATCH pth = (src)-[*1..3]-(dest)\nMATCH (nc:Collection)\nWHERE\n all(n IN nodes(pth) WHERE (n)<-[:PART_OF]-(nc))\n AND nc.id IN [250]\nRETURN src, pth, dest\nLIMIT 10;\n```\n\n```cypher\nMATCH (c:Collection)<-[:PART_OF]-(src)\nMATCH pth = (src)-[*1..3]-(dest)\nMATCH (nc:Collection)\n\n all(n IN nodes(pth) WHERE (n)<-[:PART_OF]-(nc))\nRETURN pth\nLIMIT 10;\n```\n\n```cypher\nMATCH (c:Collection)<-[:PART_OF]-(src)\nMATCH pth = (src)-[*1..3]-(dest)\nMATCH (nc:Collection)\nWHERE\n c.alephCollection = 250\n AND nc.alephCollection IN [250, 39]\n AND all(n IN nodes(pth) WHERE (n)<-[:PART_OF]-(nc))\nRETURN pth\nLIMIT 10;\n```\n\n#### Model ideas\n\n* Actor (actorName, actorFingerprint, actorLegalEntity, actorCompany, actorPerson)\n * UNDER_JURISDICTION Country\n * PART_OF Collection\n * LOCATED_AT Address\n * REACHABLE_AT PhoneNumber\n * REACHABLE_AT EMail\n * AUTHORED Document\n * BORN_AT Date\n * DIED_AT Date\n * FOUNDED_AT Date\n * DISSOLVED_AT Date\n* Country (countryName, countryCode)\n* Collection (collectionId, collectionName)\n* Document (documentTitle, documentId, documentType)\n * MENTIONS Actor\n * MENTIONS PhoneNumber\n * MENTIONS EMail\n * PART_OF Collection\n * MENTIONS Date\n* PhoneNumber (phoneNumber)\n * LOCATED_IN Country\n* Address (addressText)\n * LOCATED_IN Country\n* EMail (emailAddress)\n * LOCATED_IN Country\n* Date (yearMonthDay)\n\n\n#### Indexing notes\n\nNeo4J queries can go from instantaneous to horrible based on the existance of\nan index, much quicker than Postgres. Here's the current indexing strategy:\n\n```\nMERGE (n) SET n:Aleph;\nMERGE (n:Collection) REMOVE n:Aleph;\n\nDROP INDEX ON :Entity(id);\nDROP INDEX ON :Phone(id);\nDROP INDEX ON :Email(id);\nDROP INDEX ON :Document(id);\nDROP INDEX ON :Address(id);\n\nDROP INDEX ON :Entity(fingerprint);\nDROP INDEX ON :Phone(fingerprint);\nDROP INDEX ON :Email(fingerprint);\nDROP INDEX ON :Document(fingerprint);\nDROP INDEX ON :Address(fingerprint);\n\nCREATE CONSTRAINT ON (n:Aleph) ASSERT n.id IS UNIQUE;\nCREATE INDEX ON :Aleph(fingerprint);\n```\n\n#### Loading external graph data\n\nThe purpose of this function is to add structured graph data - such as\ncompany registries, contract or concessions info, or financial\ntransactions to the graph database backing aleph. It will then make this\ngraph data available as recommendations and through the scene editor.\n\nMedium-term, the intention is to make the mappings used by this component\ninto user-editable parts of the aleph interface, such that any tabular\ndata uploaded can be woven into the graph.\n\nMapping file:\n\n```yaml\n## Database configuration URL:\n# Can also be defined as DATABASE_URI in the environment.\ndatabase: postgresql://localhost/database\n\n## Destination collection configuration:\ncollection: my_collection_foreign_id\n```\n\n##### Use case: African mining concessions\n\n* Which company holds the most concessions across all datasets?\n* Longest chains of ownership -\n* Can we track them back to Exhibit 21 structures, who is the BO?\n* Can we make links to offshore datasets (PP, OL, BS, PA)?\n\n##### Use case: Moldavian linkages\n\n* Small networks that have a large extent of control of Moldavian economy.\n* Small networks connected to political actors (e.g. Parliament).\n* Clusters within the larger economy\n* Public contracts that connect to PEPs\n* Public contracts that connect to the procurement blacklist\n\n##### Use case: PEPs and companies -- across all registers.\n\n* Run all PEPs from EP & Aleph against all offshore registers and point\n out the ultimate children in an ownership chain.\n\n##### Use case: EU transparency data\n\n* Show all advisory group member companies and persons that also\n were awarded EU-wide contracts.\n" }, { "alpha_fraction": 0.6536144614219666, "alphanum_fraction": 0.6536144614219666, "avg_line_length": 25.559999465942383, "blob_id": "17f42a4ced64f871dce690819201ef6d88280259", "content_id": "8082846ca1490d3f45ae4d1b424e6a7ba6a9c823", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 664, "license_type": "permissive", "max_line_length": 70, "num_lines": 25, "path": "/aleph/tests/test_view_util.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from flask import Request\n\nfrom aleph.views.util import extract_next_url\nfrom aleph.tests.util import TestCase\n\n\nclass ViewUtilTest(TestCase):\n\n def setUp(self):\n super(ViewUtilTest, self).setUp()\n\n def test_extract_next_url_blank(self):\n req = Request.from_values('')\n\n self.assertEqual('/', extract_next_url(req))\n\n def test_extract_next_url_unsafe(self):\n req = Request.from_values('/?next={}'.format(self.fake.url()))\n\n self.assertEqual('/', extract_next_url(req))\n\n def test_extract_next_url_safe(self):\n req = Request.from_values('/?next=/help')\n\n self.assertEqual('/help', extract_next_url(req))\n" }, { "alpha_fraction": 0.6244897842407227, "alphanum_fraction": 0.6269387602806091, "avg_line_length": 26.840909957885742, "blob_id": "ad6c3e9d39fd1b322e20c264a3f6064be924d0a6", "content_id": "49235aa7ef9a1bb3a047b6c81b3dfac3545b5371", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1225, "license_type": "permissive", "max_line_length": 150, "num_lines": 44, "path": "/aleph/static/js/controllers/CollectionsLeadsCtrl.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import aleph from '../aleph';\n\naleph.controller('CollectionsLeadsCtrl', ['$scope', '$http', '$timeout', '$anchorScroll', 'Lead', 'leads', 'metadata', 'collection', 'Authz', 'Title',\n function($scope, $http, $timeout, $anchorScroll, Lead, leads, metadata, collection, Authz, Title) {\n\n $scope.authz = Authz;\n $scope.collection = collection;\n $scope.metadata = metadata;\n\n $scope.loadOffset = function(offset) {\n $scope.query.set('offset', offset);\n $anchorScroll();\n };\n\n $scope.isEmpty = function() {\n return !$scope.query.isFiltered() && $scope.result.limit > 0 && $scope.result.total == 0;\n };\n\n $scope.setJudgement = function(lead, judgement) {\n var url = '/api/1/collections/' + collection.id + '/leads';\n lead.judgement = judgement;\n $http.post(url, lead).then(function(res) {\n console.log(res.data);\n });\n };\n\n $scope.$on('$routeUpdate', function() {\n reloadSearch();\n });\n\n var reloadSearch = function() {\n Lead.search(collection.id).then(function(data) {\n updateSearch(data);\n });\n };\n\n var updateSearch = function(data) {\n $scope.result = data.result;\n $scope.query = data.query;\n Title.set(\"Leads\", \"collections\");\n };\n\n updateSearch(leads);\n}]);\n" }, { "alpha_fraction": 0.6670135259628296, "alphanum_fraction": 0.6706555485725403, "avg_line_length": 33.94545364379883, "blob_id": "c57dd508f7c01d96f25ae955fb8a2641c24a60d3", "content_id": "66e4ec63444b15077b841c7d69acace414e2f7fc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1922, "license_type": "permissive", "max_line_length": 79, "num_lines": 55, "path": "/aleph/views/ingest_api.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import os\nimport json\nfrom flask import Blueprint, request\nfrom werkzeug import secure_filename\nfrom werkzeug.exceptions import BadRequest\nfrom apikit import obj_or_404, jsonify\n\nfrom aleph.core import upload_folder\nfrom aleph.events import log_event\nfrom aleph.ingest import ingest_document\nfrom aleph.model import Collection, Document\nfrom aleph.model.common import make_textid\nfrom aleph.model.validate import validate\nfrom aleph.util import checksum\n\n\nblueprint = Blueprint('ingest_api', __name__)\n\n\[email protected]('/api/1/collections/<int:collection_id>/ingest',\n methods=['POST', 'PUT'])\ndef ingest_upload(collection_id):\n collection = obj_or_404(Collection.by_id(collection_id))\n request.authz.require(request.authz.collection_write(collection.id))\n log_event(request)\n crawler_run = make_textid()\n\n try:\n meta = json.loads(request.form.get('meta', '{}'))\n except Exception as ex:\n raise BadRequest(unicode(ex))\n\n documents = []\n for storage in request.files.values():\n sec_fn = os.path.join(upload_folder, secure_filename(storage.filename))\n storage.save(sec_fn)\n content_hash = checksum(sec_fn)\n document = Document.by_keys(collection=collection,\n content_hash=content_hash)\n document.crawler = 'user_upload:%s' % request.authz.role.id\n document.crawler_run = crawler_run\n document.mime_type = storage.mimetype\n document.file_name = storage.filename\n\n try:\n meta = json.loads(request.form.get('meta', '{}'))\n validate(meta, 'metadata.json#')\n document.meta.update(meta)\n except Exception as ex:\n raise BadRequest(unicode(ex))\n\n ingest_document(document, sec_fn, user_queue=True)\n os.unlink(sec_fn)\n documents.append(document)\n return jsonify({'status': 'ok', 'documents': documents})\n" }, { "alpha_fraction": 0.6487383842468262, "alphanum_fraction": 0.6547144651412964, "avg_line_length": 35.435482025146484, "blob_id": "b74c0ae53ee10e30f8b86559d3e72f299216423f", "content_id": "aba9b26b933cd6129515d7692eb0157b7ff9f676", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4518, "license_type": "permissive", "max_line_length": 72, "num_lines": 124, "path": "/aleph/views/collections_api.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from flask import Blueprint, request\nfrom apikit import obj_or_404, jsonify, Pager, request_data\nfrom normality import ascii_text\nfrom dalet import COUNTRY_NAMES\n\nfrom aleph.core import USER_QUEUE, USER_ROUTING_KEY, get_config, db\nfrom aleph.model import Collection\nfrom aleph.search import QueryState, lead_count\nfrom aleph.events import log_event\nfrom aleph.logic import delete_collection, update_collection\nfrom aleph.logic import analyze_collection\n\nblueprint = Blueprint('collections_api', __name__)\n\n\[email protected]('/api/1/collections', methods=['GET'])\ndef index():\n # allow to filter for writeable collections only, needed\n # in some UI scenarios:\n state = QueryState(request.args, request.authz)\n permission = request.args.get('permission')\n if permission not in [request.authz.READ, request.authz.WRITE]:\n permission = request.authz.READ\n collections = request.authz.collections[permission]\n\n # Other filters for navigation\n label = request.args.get('label')\n managed = state.getbool('managed', None)\n\n # Include counts (of entities, documents) in list view?\n counts = state.getbool('counts', False)\n\n def converter(colls):\n return [c.to_dict(counts=counts) for c in colls]\n\n facet = [f.lower().strip() for f in request.args.getlist('facet')]\n q = Collection.find(label=label,\n countries=state.getfilter('countries'),\n category=state.getfilter('category'),\n collection_id=collections,\n managed=managed)\n data = Pager(q).to_dict(results_converter=converter)\n facets = {}\n if 'countries' in facet:\n facets['countries'] = {\n 'values': Collection.facet_by(q, Collection.countries,\n mapping=COUNTRY_NAMES)\n }\n if 'category' in facet:\n mapping = get_config('COLLECTION_CATEGORIES', {})\n facets['category'] = {\n 'values': Collection.facet_by(q, Collection.category,\n mapping=mapping)\n }\n data['facets'] = facets\n return jsonify(data)\n\n\[email protected]('/api/1/collections', methods=['POST', 'PUT'])\ndef create():\n request.authz.require(request.authz.logged_in)\n data = request_data()\n data['managed'] = False\n collection = Collection.create(data, request.authz.role)\n db.session.commit()\n update_collection(collection)\n log_event(request)\n return jsonify(collection)\n\n\[email protected]('/api/1/collections/<int:id>', methods=['GET'])\ndef view(id):\n collection = obj_or_404(Collection.by_id(id))\n request.authz.require(request.authz.collection_read(collection))\n data = collection.to_dict(counts=True)\n data['lead_count'] = lead_count(id)\n return jsonify(data)\n\n\[email protected]('/api/1/collections/<int:id>', methods=['POST', 'PUT'])\ndef update(id):\n collection = obj_or_404(Collection.by_id(id))\n request.authz.require(request.authz.collection_write(collection))\n collection.update(request_data())\n db.session.add(collection)\n db.session.commit()\n update_collection(collection)\n log_event(request)\n return view(id)\n\n\[email protected]('/api/1/collections/<int:id>/process',\n methods=['POST', 'PUT'])\ndef process(id):\n collection = obj_or_404(Collection.by_id(id))\n request.authz.require(request.authz.collection_write(collection))\n analyze_collection.apply_async([collection.id], queue=USER_QUEUE,\n routing_key=USER_ROUTING_KEY)\n log_event(request)\n return jsonify({'status': 'ok'})\n\n\[email protected]('/api/1/collections/<int:id>/pending', methods=['GET'])\ndef pending(id):\n collection = obj_or_404(Collection.by_id(id))\n request.authz.require(request.authz.collection_read(collection))\n q = collection.pending_entities()\n q = q.limit(30)\n entities = []\n for entity in q.all():\n data = entity.to_dict()\n data['name_latin'] = ascii_text(entity.name)\n entities.append(data)\n return jsonify({'results': entities, 'total': len(entities)})\n\n\[email protected]('/api/1/collections/<int:id>', methods=['DELETE'])\ndef delete(id):\n collection = obj_or_404(Collection.by_id(id))\n request.authz.require(request.authz.collection_write(collection))\n delete_collection.apply_async([collection.id], queue=USER_QUEUE,\n routing_key=USER_ROUTING_KEY)\n log_event(request)\n return jsonify({'status': 'ok'})\n" }, { "alpha_fraction": 0.5479857921600342, "alphanum_fraction": 0.5515402555465698, "avg_line_length": 28.61403465270996, "blob_id": "47ab43f2f4fb87ce8e316213ee70b96cabc41e3c", "content_id": "3943c5307cf66b874800296ddc6c692816b92254", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3376, "license_type": "permissive", "max_line_length": 77, "num_lines": 114, "path": "/aleph/search/records.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from elasticsearch.helpers import scan\n\nfrom aleph.core import es, es_index\nfrom aleph.index import TYPE_RECORD\nfrom aleph.util import ensure_list\nfrom aleph.search.fragments import text_query_string\nfrom aleph.search.util import execute_basic\nfrom aleph.search.fragments import match_all, filter_query\nfrom aleph.model import DocumentRecord\n\nSNIPPET_SIZE = 100\n\n\ndef records_query(document_id, state):\n try:\n rows = [int(r) for r in state.getlist('row')]\n except:\n rows = []\n\n score_query = state.has_text or len(rows)\n shoulds = records_query_shoulds(state)\n if not len(shoulds):\n shoulds = [match_all()]\n\n if len(rows):\n shoulds.append({\n \"constant_score\": {\n \"filter\": {'terms': {'index': rows}},\n \"boost\": 1000\n }\n })\n\n query = records_query_internal(document_id, shoulds, size=state.limit)\n query['query'] = filter_query(query['query'], state.filters)\n query['from'] = state.offset\n\n sort = [{'index': 'asc'}, {'page': 'asc'}]\n if score_query:\n sort.insert(0, '_score')\n return query\n\n\ndef records_query_shoulds(state):\n shoulds = []\n if state.has_text:\n shoulds.append(text_query_string(state.text))\n\n for term in state.highlight_terms:\n shoulds.append(text_query_string(term))\n return shoulds\n\n\ndef records_query_internal(document_id, shoulds, size=5):\n return {\n 'size': size,\n 'query': {\n 'bool': {\n 'minimum_should_match': 1,\n 'should': shoulds,\n 'filter': [{'term': {'document_id': document_id}}]\n }\n },\n 'highlight': {\n 'fields': {\n 'text': {\n 'fragment_size': SNIPPET_SIZE,\n 'number_of_fragments': 1\n }\n }\n },\n '_source': ['document_id', 'sheet', 'index']\n }\n\n\ndef scan_entity_mentions(entity):\n \"\"\"Find mentions of a given entity in all records.\"\"\"\n shoulds = []\n for term in entity.regex_terms:\n shoulds.append(text_query_string(term))\n\n query = {\n 'query': {\n 'bool': {\n 'should': shoulds,\n 'minimum_should_match': 1\n }\n },\n 'sort': [{'document_id': 'desc'}],\n '_source': ['document_id', 'text']\n }\n for res in scan(es, query=query, index=es_index, doc_type=[TYPE_RECORD]):\n for text in ensure_list(res.get('_source').get('text')):\n yield (res.get('_source').get('document_id'), text)\n\n\ndef execute_records_query(document_id, state, query):\n \"\"\"Execute a query against records and return a set of results.\"\"\"\n result, hits, output = execute_basic(TYPE_RECORD, query)\n ids = []\n for rec in hits.get('hits', []):\n record = rec.get('_source')\n record['score'] = rec.get('_score')\n record['id'] = int(rec.get('_id'))\n ids.append(rec.get('_id'))\n for text in rec.get('highlight', {}).get('text', []):\n record['text'] = text\n output['results'].append(record)\n\n for record in DocumentRecord.find_records(document_id, ids):\n for result in output['results']:\n if result['id'] == record.id:\n result['data'] = record.data\n result['text'] = record.text\n return output\n" }, { "alpha_fraction": 0.6190191507339478, "alphanum_fraction": 0.6190191507339478, "avg_line_length": 34.574466705322266, "blob_id": "1af8208a681f41c5c316aa05f22f457cc4362c22", "content_id": "53e6af07d391824442fd939ec542a2c733368443", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3344, "license_type": "permissive", "max_line_length": 79, "num_lines": 94, "path": "/aleph/ingest/manager.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import os\nimport logging\n\nfrom ingestors import Manager\nfrom ingestors.util import decode_path\n\nfrom aleph.core import db\nfrom aleph.model import Document, Cache\nfrom aleph.analyze import analyze_document\nfrom aleph.ingest.result import DocumentResult\nfrom aleph.util import checksum\n\nlog = logging.getLogger(__name__)\n\n\nclass DocumentManager(Manager):\n \"\"\"Handle the process of ingesting documents.\n\n This includes creating and flushing records, setting document state and\n dispatching child ingestors as needed.\n \"\"\"\n\n RESULT_CLASS = DocumentResult\n\n def __init__(self, config, archive):\n super(DocumentManager, self).__init__(config)\n self.archive = archive\n\n def before(self, result):\n db.session.flush()\n result.document.status = Document.STATUS_PENDING\n result.document.delete_records()\n\n def after(self, result):\n result.update()\n db.session.commit()\n if result.error_message:\n log.warn('Error [%r]: %s', result, result.error_message)\n else:\n log.debug('Ingested: %r', result.document)\n analyze_document(result.document)\n\n def get_cache(self, key):\n return Cache.get_cache(key)\n\n def set_cache(self, key, value):\n Cache.set_cache(key, value)\n\n def handle_child(self, parent, file_path, title=None, mime_type=None,\n id=None, file_name=None):\n file_path = decode_path(file_path)\n file_name = decode_path(file_name) or os.path.basename(file_path)\n\n content_hash = None\n if not os.path.isdir(file_path):\n content_hash = checksum(file_path)\n\n document = Document.by_keys(parent_id=parent.document.id,\n collection=parent.document.collection,\n foreign_id=id, content_hash=content_hash)\n document.title = title or document.meta.get('title')\n document.file_name = file_name or document.meta.get('file_name')\n document.mime_type = mime_type or document.meta.get('mime_type')\n\n from aleph.ingest import ingest_document\n ingest_document(document, file_path, user_queue=parent.user_queue)\n\n def ingest_document(self, document, file_path=None, user_queue=False):\n \"\"\"Ingest a database-backed document.\n\n First retrieve it's data and then call the actual ingestor.\n \"\"\"\n if file_path is None:\n file_path = self.archive.load_file(document.content_hash,\n file_name=document.file_name)\n\n if file_path is None:\n # TODO: save this to the document?\n log.error(\"Cannot load data: %r\", document)\n return\n\n try:\n if not len(document.languages) and document.collection is not None:\n document.languages = document.collection.languages or []\n\n if not len(document.countries) and document.collection is not None:\n document.countries = document.collection.countries or []\n\n result = DocumentResult(self, document,\n file_path=file_path,\n user_queue=user_queue)\n self.ingest(file_path, result=result)\n finally:\n self.archive.cleanup_file(document.content_hash)\n" }, { "alpha_fraction": 0.6532066464424133, "alphanum_fraction": 0.6627078652381897, "avg_line_length": 29.071428298950195, "blob_id": "33f7e966df596cf61b9d26553f33eed145de220f", "content_id": "a2d89eedf40d2a9f0bab742d2389d76ab6fd466a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 421, "license_type": "permissive", "max_line_length": 69, "num_lines": 14, "path": "/aleph/tests/test_export_api.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from aleph.tests.util import TestCase\n\n\nclass ExportApiTestCase(TestCase):\n\n def setUp(self):\n super(ExportApiTestCase, self).setUp()\n self.load_fixtures('docs.yaml')\n\n def test_smoke_comes_out(self):\n self.login(is_admin=True)\n res = self.client.get('/api/1/query/export')\n assert res.status_code == 200, res\n assert 'openxmlformats' in res.content_type, res.content_type\n" }, { "alpha_fraction": 0.6288565993309021, "alphanum_fraction": 0.6356624364852905, "avg_line_length": 28.386667251586914, "blob_id": "01b0ac4c786235e7bf3d9896c941c2ef8e8659e8", "content_id": "38c70a1eab822a61788d5ec3cc42c57e02c9dfcb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2204, "license_type": "permissive", "max_line_length": 78, "num_lines": 75, "path": "/aleph/text.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "# coding: utf-8\nimport six\nimport logging\nfrom normality import normalize, stringify, latinize_text, collapse_spaces\nfrom normality import slugify # noqa\nfrom normality.cleaning import decompose_nfkd, remove_control_chars\n\nlog = logging.getLogger(__name__)\nINDEX_MAX_LEN = 1024 * 1024 * 100\n\n\ndef index_form(texts):\n \"\"\"Turn a set of strings into the appropriate form for indexing.\"\"\"\n results = []\n total_len = 0\n\n for text in texts:\n # We don't want to store more than INDEX_MAX_LEN of text per doc\n if total_len > INDEX_MAX_LEN:\n # TODO: there might be nicer techniques for dealing with overly\n # long text buffers?\n results = list(set(results))\n total_len = sum((len(t) for t in results))\n if total_len > INDEX_MAX_LEN:\n break\n\n text = stringify(text)\n if text is None:\n continue\n text = collapse_spaces(text)\n # XXX: is NFKD a great idea?\n text = decompose_nfkd(text)\n total_len += len(text)\n results.append(text)\n\n # Make latinized text version\n latin = latinize_text(text)\n latin = stringify(latin)\n if latin is None or latin == text:\n continue\n total_len += len(latin)\n results.append(latin)\n return results\n\n\ndef match_form(text):\n \"\"\"Turn a string into a form appropriate for name matching.\n\n The goal of this function is not to retain a readable version of the given\n string, but rather to yield a normalised version suitable for comparisons\n and machine analysis.\n \"\"\"\n return normalize(text, lowercase=True, ascii=True)\n\n\ndef string_value(value, encoding=None):\n value = stringify(value, encoding=encoding, encoding_default='utf-8')\n value = remove_control_chars(value)\n return value\n\n\ndef encoded_value(text):\n if isinstance(text, six.binary_type):\n return text\n return six.text_type(text).encode('utf-8')\n\n\ndef has_value(value):\n \"\"\"Check a given value is not empty.\"\"\"\n if value is None:\n return False\n if isinstance(value, six.string_types):\n if not len(value.strip()):\n return False\n return True\n" }, { "alpha_fraction": 0.7876960039138794, "alphanum_fraction": 0.7876960039138794, "avg_line_length": 62.769229888916016, "blob_id": "7dc712cd83ad0f198ce135b189cbe42cb5f26b10", "content_id": "3922d5c99d7c66194107b8cd331c8da5e9ab1e07", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 829, "license_type": "permissive", "max_line_length": 79, "num_lines": 13, "path": "/aleph/index/__init__.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from aleph.index.admin import init_search, upgrade_search # noqa\nfrom aleph.index.admin import delete_index, flush_index # noqa\nfrom aleph.index.entities import index_entity, delete_entity # noqa\nfrom aleph.index.documents import index_document, index_document_id # noqa\nfrom aleph.index.documents import delete_document # noqa\nfrom aleph.index.records import index_records # noqa\nfrom aleph.index.datasets import index_items, delete_dataset # noqa\nfrom aleph.index.leads import index_lead, delete_entity_leads # noqa\nfrom aleph.index.mapping import TYPE_DOCUMENT, TYPE_RECORD, TYPE_ENTITY # noqa\nfrom aleph.index.mapping import TYPE_LINK, TYPE_LEAD # noqa\nfrom aleph.index.mapping import DOCUMENT_MAPPING, RECORD_MAPPING # noqa\nfrom aleph.index.mapping import( # noqa\n ENTITY_MAPPING, LINK_MAPPING, LEAD_MAPPING)\n" }, { "alpha_fraction": 0.7922848463058472, "alphanum_fraction": 0.7922848463058472, "avg_line_length": 36.44444274902344, "blob_id": "c02b3ad0059adda6986153da5a8a7f6db1305d1a", "content_id": "11c003add7e85369f8151bbb9d8cd1fa35879389", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 337, "license_type": "permissive", "max_line_length": 65, "num_lines": 9, "path": "/contrib/test.sh", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "psql -c \"DROP DATABASE IF EXISTS aleph_test;\" $ALEPH_DATABASE_URI\npsql -c \"CREATE DATABASE aleph_test;\" $ALEPH_DATABASE_URI\n\nexport ALEPH_REGEX_ENTITIES=true\nexport ALEPH_PASSWORD_LOGIN=true\nexport ALEPH_PASSWORD_REGISTRATION=true\n\npip install -q -r requirements-testing.txt\nnosetests --with-coverage --cover-package=aleph --cover-erase\n" }, { "alpha_fraction": 0.5775577425956726, "alphanum_fraction": 0.5775577425956726, "avg_line_length": 24.25, "blob_id": "f9c67702d93e0d5da85338188d611341da523b34", "content_id": "f4f4dd260f37b4f0abcc15ba7cd76cd439d1865a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 606, "license_type": "permissive", "max_line_length": 70, "num_lines": 24, "path": "/aleph/static/js/directives/entityReconTeaser.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import aleph from '../aleph';\n\naleph.directive('entityReconTeaser', ['Metadata', function(Metadata) {\n var metadata = {};\n Metadata.get().then(function(md) {\n metadata = md;\n })\n\n return {\n restrict: 'E',\n transclude: false,\n scope: {},\n templateUrl: 'templates/entities/recon.html',\n link: function (scope, element, attrs, model) {\n scope.getUrl = function() {\n var url = metadata.app.url + 'api/freebase/reconcile';\n if (metadata.session.logged_in) {\n url += '?api_key=' + metadata.session.api_key;\n }\n return url;\n };\n }\n };\n}]);\n" }, { "alpha_fraction": 0.596403181552887, "alphanum_fraction": 0.6072772741317749, "avg_line_length": 33.65217208862305, "blob_id": "5a8f97b09ead0b854c9dc30d6c6afd301c1f6e12", "content_id": "ca0047f8ea296718f9c8e542149e65b79a3ec241", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2391, "license_type": "permissive", "max_line_length": 77, "num_lines": 69, "path": "/aleph/logic/distance.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from itertools import product\nfrom Levenshtein import jaro_winkler\nfrom pprint import pprint # noqa\n\nSUPER_SCIENTIFIC_WEIGHTINGS = {\n 'names': 0.3,\n 'fp_distance': 0.3,\n 'fp_tokens': 0.2,\n 'countries': 0.1,\n 'dates': 0.1,\n 'addresses_distance': 0.1,\n 'addresses_tokens': 0.1,\n 'emails': 0.3,\n 'phones': 0.3,\n 'identifiers': 0.4,\n}\n\n\ndef pred_best_jw(a, b, field):\n \"\"\"Find the closest jaro-winkler match.\"\"\"\n best = float()\n for (ak, bk) in product(a.get(field, []), b.get(field, [])):\n best = max(best, jaro_winkler(ak.lower(), bk.lower()))\n return best\n\n\ndef pred_matching_elem(a, b, field):\n \"\"\"Find the closest jaro-winkler match.\"\"\"\n for (ak, bk) in product(a.get(field, []), b.get(field, [])):\n if ak.lower() == bk.lower():\n return 1.0\n return 0.0\n\n\ndef pred_token_overlap(a, b, field):\n \"\"\"Find the closest jaro-winkler match.\"\"\"\n best = float()\n a = [set(n.split()) for n in a.get(field, [])]\n b = [set(n.split()) for n in b.get(field, [])]\n for (ak, bk) in product(a, b):\n overlap = float(len(ak.intersection(bk)))\n overlap = overlap / float(max(len(ak), len(bk)))\n best = max(overlap, best)\n return best\n\n\ndef entity_distance(entity, other):\n # once we have enough training data, this should use a regression model\n # of some sort to calculate a multi-attribute based similarity metric.\n # cf. https://github.com/datamade/rlr\n # http://scikit-learn.org/stable/auto_examples/linear_model/plot_ols.html\n if 'names' not in other:\n other['names'] = [other['name']]\n features = {\n 'names': pred_best_jw(entity, other, 'names'),\n 'fp_distance': pred_best_jw(entity, other, 'fingerprints'),\n 'fp_tokens': pred_token_overlap(entity, other, 'fingerprints'),\n 'countries': pred_best_jw(entity, other, 'countries'),\n 'addresses_distance': pred_best_jw(entity, other, 'addresses'),\n 'addresses_tokens': pred_token_overlap(entity, other, 'addresses'),\n 'emails': pred_best_jw(entity, other, 'emails'),\n 'phones': pred_best_jw(entity, other, 'phones'),\n 'identifiers': pred_best_jw(entity, other, 'identifiers'),\n }\n # pprint(features)\n score = float()\n for field, value in features.items():\n score += value * SUPER_SCIENTIFIC_WEIGHTINGS[field]\n return min(1.0, score)\n" }, { "alpha_fraction": 0.6406723856925964, "alphanum_fraction": 0.6412234902381897, "avg_line_length": 26.49242401123047, "blob_id": "20ae0b99a265acfd5985ef343d4cf697fbb2dc2d", "content_id": "1c81a3b1208f3b59508471c7774c797ed4394156", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3629, "license_type": "permissive", "max_line_length": 77, "num_lines": 132, "path": "/aleph/schema/types.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import re\nimport fingerprints\nfrom normality import ascii_text, stringify, collapse_spaces\nfrom dalet import is_partial_date, parse_date\nfrom dalet import parse_phone, parse_country, parse_email\n\nfrom aleph.util import ensure_list\n\n\nclass StringProperty(object):\n index_invert = None\n\n def __init__(self):\n self.name = type(self).__name__.lower().replace('property', '')\n\n def clean(self, value, record, config):\n value = stringify(value)\n if value is not None:\n return collapse_spaces(value)\n\n def normalize(self, values):\n results = set()\n for value in values:\n results.update(ensure_list(self.normalize_value(value)))\n return results\n\n def normalize_value(self, value):\n return self.clean(value, {}, {})\n\n def fingerprint(self, values):\n return []\n\n\nclass NameProperty(StringProperty):\n index_invert = 'names'\n\n def normalize_value(self, value):\n value = collapse_spaces(value)\n return value, ascii_text(value)\n\n def fingerprint(self, values):\n # TODO: this should not be a property thing, so that fp's can include\n # dates etx.\n fps = []\n for value in values:\n fps.append(fingerprints.generate(value))\n return [fp for fp in fps if fp is not None]\n\n\nclass URLProperty(StringProperty):\n index_invert = None\n\n\nclass DateProperty(StringProperty):\n index_invert = 'dates'\n\n def clean(self, value, record, config):\n value = super(DateProperty, self).clean(value, record, config)\n return parse_date(value, date_format=config.get('format'))\n\n def normalize_value(self, value):\n if is_partial_date(value):\n return value\n\n\nclass CountryProperty(StringProperty):\n index_invert = 'countries'\n\n def clean(self, value, record, config):\n value = super(CountryProperty, self).clean(value, record, config)\n return parse_country(value) or value\n\n def normalize_value(self, value):\n return parse_country(value)\n\n\nclass AddressProperty(StringProperty):\n index_invert = 'addresses'\n\n def normalize_value(self, value):\n return fingerprints.generate(value)\n\n\nclass PhoneProperty(StringProperty):\n index_invert = 'phones'\n\n def clean(self, value, record, config):\n value = super(PhoneProperty, self).clean(value, record, config)\n number = parse_phone(value, config.get('country'))\n return number or value\n\n\nclass EmailProperty(StringProperty):\n index_invert = 'emails'\n\n def clean(self, value, record, config):\n value = super(EmailProperty, self).clean(value, record, config)\n return parse_email(value) or value\n\n def normalize_value(self, value):\n return parse_email(value)\n\n\nclass IdentiferProperty(StringProperty):\n index_invert = 'identifiers'\n clean_re = re.compile('[^a-zA-Z0-9]*')\n\n def normalize_value(self, value):\n value = stringify(value)\n if value is not None:\n value = self.clean_re.sub('', value).upper()\n return stringify(value)\n\n\ndef resolve_type(name):\n \"\"\"Look up a configerty type by name.\"\"\"\n types = {\n 'string': StringProperty,\n 'name': NameProperty,\n 'date': DateProperty,\n 'country': CountryProperty,\n 'address': AddressProperty,\n 'phone': PhoneProperty,\n 'email': EmailProperty,\n 'url': URLProperty,\n 'uri': URLProperty,\n 'identifier': IdentiferProperty\n }\n type_ = types.get(name.strip().lower())\n if type_ is None:\n raise TypeError(\"No such type: %s\" % name)\n return type_\n" }, { "alpha_fraction": 0.6079632639884949, "alphanum_fraction": 0.6186829805374146, "avg_line_length": 30.095237731933594, "blob_id": "5201d24a2e1a55d6b6d50e37fbda24e06bc8fb80", "content_id": "fc6a5d8e8fc4ce04a63b1f5f78d2d921af613ecf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 653, "license_type": "permissive", "max_line_length": 56, "num_lines": 21, "path": "/aleph/tests/test_base_api.py", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "from aleph.tests.util import TestCase\n\n\nclass BaseApiTestCase(TestCase):\n\n def setUp(self):\n super(BaseApiTestCase, self).setUp()\n\n def test_index(self):\n res = self.client.get('/')\n assert res.status_code == 200, res\n assert '<title>' in res.data, res.data\n assert 'ng-view' in res.data, res.data\n\n def test_metadata(self):\n res = self.client.get('/api/1/metadata')\n assert res.status_code == 200, res\n assert 'countries' in res.json, res.json\n countries = res.json['countries']\n assert 'ar' in countries, countries\n assert countries['ar'] == 'Argentina', countries\n" }, { "alpha_fraction": 0.5064615607261658, "alphanum_fraction": 0.508307695388794, "avg_line_length": 24.793651580810547, "blob_id": "d1619afa2070eaf4d6e02d59623c5e4520eecc64", "content_id": "b3a9ab133e16d814ed8c7cf97ac2c8f26ac0e579", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1625, "license_type": "permissive", "max_line_length": 83, "num_lines": 63, "path": "/aleph/static/js/services/Dataset.js", "repo_name": "gazeti/aleph", "src_encoding": "UTF-8", "text": "import aleph from '../aleph';\n\naleph.factory('Dataset', ['$q', '$http', '$location', 'Query', 'Authz', 'Metadata',\n function($q, $http, $location, Query, Authz, Metadata) {\n\n var indexDfd = null;\n\n var getIndex = function() {\n if (indexDfd === null) {\n indexDfd = $q.defer();\n $http.get('/api/1/datasets', {cache: true}).then(function(res) {\n indexDfd.resolve(res.data)\n }, function(err) {\n indexDfd.reject(err);\n });\n }\n return indexDfd.promise;\n }\n\n return {\n index: getIndex,\n search: function() {\n var dfd = $q.defer();\n var query = Query.parse(),\n state = angular.copy(query.state),\n params = {params: state, cache: true};\n $http.get('/api/1/datasets', params).then(function(res) {\n dfd.resolve({\n 'query': query,\n 'result': res.data\n });\n }, function(err) {\n dfd.reject(err);\n });\n getIndex();\n return dfd.promise;\n },\n get: function(name) {\n var dfd = $q.defer(),\n url = '/api/1/datasets/' + name;\n $http.get(url, {cache: true}).then(function(res) {\n dfd.resolve(res.data)\n }, function(err) {\n dfd.reject(err);\n });\n return dfd.promise;\n },\n getBase: function(name) {\n var dfd = $q.defer();\n getIndex().then(function(data) {\n for (var i in data.results) {\n var dataset = data.results[i];\n if (dataset.name == name) {\n dfd.resolve(dataset);\n }\n }\n }, function(err) {\n dfd.reject(err);\n });\n return dfd.promise;\n }\n };\n}]);\n" } ]
73
ignaciotb/smarc_data_tools
https://github.com/ignaciotb/smarc_data_tools
c174e9a177271d0b30064d8737db073a5569b4cc
0c97a256402b2034560ffee5eaa5196b7b1b683f
f61d055b0a1eaf4b83aee3a24e0d6985d1584be4
refs/heads/master
2020-04-09T21:17:04.903813
2018-03-07T15:25:24
2018-03-07T15:25:24
124,244,171
0
0
null
2018-03-07T14:00:27
2018-03-07T14:37:23
2018-05-22T09:02:47
Makefile
[ { "alpha_fraction": 0.7821949124336243, "alphanum_fraction": 0.7832731604576111, "avg_line_length": 44.870967864990234, "blob_id": "0a13e4293b72d897d425cd6e76d1e453025b8c76", "content_id": "3783da03a88111ba4032a493f3399e910734eceb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 21331, "license_type": "no_license", "max_line_length": 275, "num_lines": 465, "path": "/stevens_pier_project/mapClasses/Makefile", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "# CMAKE generated file: DO NOT EDIT!\n# Generated by \"Unix Makefiles\" Generator, CMake Version 2.8\n\n# Default target executed when no arguments are given to make.\ndefault_target: all\n.PHONY : default_target\n\n#=============================================================================\n# Special targets provided by cmake.\n\n# Disable implicit rules so canonical targets will work.\n.SUFFIXES:\n\n# Remove some rules from gmake that .SUFFIXES does not remove.\nSUFFIXES =\n\n.SUFFIXES: .hpux_make_needs_suffix_list\n\n# Suppress display of executed commands.\n$(VERBOSE).SILENT:\n\n# A target that is always out of date.\ncmake_force:\n.PHONY : cmake_force\n\n#=============================================================================\n# Set environment variables for the build.\n\n# The shell in which to execute make rules.\nSHELL = /bin/sh\n\n# The CMake executable.\nCMAKE_COMMAND = /usr/bin/cmake\n\n# The command to remove a file.\nRM = /usr/bin/cmake -E remove -f\n\n# Escaping for special characters.\nEQUALS = =\n\n# The top-level source directory on which CMake was run.\nCMAKE_SOURCE_DIR = /home/clarisse/catkin_ws/src\n\n# The top-level build directory on which CMake was run.\nCMAKE_BINARY_DIR = /home/clarisse/catkin_ws/src\n\n#=============================================================================\n# Targets provided globally by CMake.\n\n# Special rule for the target edit_cache\nedit_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running interactive CMake command-line interface...\"\n\t/usr/bin/cmake -i .\n.PHONY : edit_cache\n\n# Special rule for the target edit_cache\nedit_cache/fast: edit_cache\n.PHONY : edit_cache/fast\n\n# Special rule for the target install\ninstall: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install\n\n# Special rule for the target install\ninstall/fast: preinstall/fast\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install/fast\n\n# Special rule for the target install/local\ninstall/local: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing only the local directory...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_LOCAL_ONLY=1 -P cmake_install.cmake\n.PHONY : install/local\n\n# Special rule for the target install/local\ninstall/local/fast: install/local\n.PHONY : install/local/fast\n\n# Special rule for the target install/strip\ninstall/strip: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing the project stripped...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake\n.PHONY : install/strip\n\n# Special rule for the target install/strip\ninstall/strip/fast: install/strip\n.PHONY : install/strip/fast\n\n# Special rule for the target list_install_components\nlist_install_components:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Available install components are: \\\"Unspecified\\\"\"\n.PHONY : list_install_components\n\n# Special rule for the target list_install_components\nlist_install_components/fast: list_install_components\n.PHONY : list_install_components/fast\n\n# Special rule for the target rebuild_cache\nrebuild_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake to regenerate build system...\"\n\t/usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : rebuild_cache\n\n# Special rule for the target rebuild_cache\nrebuild_cache/fast: rebuild_cache\n.PHONY : rebuild_cache/fast\n\n# Special rule for the target test\ntest:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running tests...\"\n\t/usr/bin/ctest --force-new-ctest-process $(ARGS)\n.PHONY : test\n\n# Special rule for the target test\ntest/fast: test\n.PHONY : test/fast\n\n# The main all target\nall: cmake_check_build_system\n\tcd /home/clarisse/catkin_ws/src && $(CMAKE_COMMAND) -E cmake_progress_start /home/clarisse/catkin_ws/src/CMakeFiles /home/clarisse/catkin_ws/src/filteringprocess/filteringprocess/mapClasses/CMakeFiles/progress.marks\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/all\n\t$(CMAKE_COMMAND) -E cmake_progress_start /home/clarisse/catkin_ws/src/CMakeFiles 0\n.PHONY : all\n\n# The main clean target\nclean:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/clean\n.PHONY : clean\n\n# The main clean target\nclean/fast: clean\n.PHONY : clean/fast\n\n# Prepare targets for installation.\npreinstall: all\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/preinstall\n.PHONY : preinstall\n\n# Prepare targets for installation.\npreinstall/fast:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/preinstall\n.PHONY : preinstall/fast\n\n# clear depends\ndepend:\n\tcd /home/clarisse/catkin_ws/src && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1\n.PHONY : depend\n\n# Convenience name for target.\nfilteringprocess/filteringprocess/mapClasses/CMakeFiles/_mapclasses_generate_messages_check_deps_buff.dir/rule:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/CMakeFiles/_mapclasses_generate_messages_check_deps_buff.dir/rule\n.PHONY : filteringprocess/filteringprocess/mapClasses/CMakeFiles/_mapclasses_generate_messages_check_deps_buff.dir/rule\n\n# Convenience name for target.\n_mapclasses_generate_messages_check_deps_buff: filteringprocess/filteringprocess/mapClasses/CMakeFiles/_mapclasses_generate_messages_check_deps_buff.dir/rule\n.PHONY : _mapclasses_generate_messages_check_deps_buff\n\n# fast build rule for target.\n_mapclasses_generate_messages_check_deps_buff/fast:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/_mapclasses_generate_messages_check_deps_buff.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/_mapclasses_generate_messages_check_deps_buff.dir/build\n.PHONY : _mapclasses_generate_messages_check_deps_buff/fast\n\n# Convenience name for target.\nfilteringprocess/filteringprocess/mapClasses/CMakeFiles/map.dir/rule:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/CMakeFiles/map.dir/rule\n.PHONY : filteringprocess/filteringprocess/mapClasses/CMakeFiles/map.dir/rule\n\n# Convenience name for target.\nmap: filteringprocess/filteringprocess/mapClasses/CMakeFiles/map.dir/rule\n.PHONY : map\n\n# fast build rule for target.\nmap/fast:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/map.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/map.dir/build\n.PHONY : map/fast\n\n# Convenience name for target.\nfilteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_gencpp.dir/rule:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_gencpp.dir/rule\n.PHONY : filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_gencpp.dir/rule\n\n# Convenience name for target.\nmapclasses_gencpp: filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_gencpp.dir/rule\n.PHONY : mapclasses_gencpp\n\n# fast build rule for target.\nmapclasses_gencpp/fast:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_gencpp.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_gencpp.dir/build\n.PHONY : mapclasses_gencpp/fast\n\n# Convenience name for target.\nfilteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages.dir/rule:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages.dir/rule\n.PHONY : filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages.dir/rule\n\n# Convenience name for target.\nmapclasses_generate_messages: filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages.dir/rule\n.PHONY : mapclasses_generate_messages\n\n# fast build rule for target.\nmapclasses_generate_messages/fast:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages.dir/build\n.PHONY : mapclasses_generate_messages/fast\n\n# Convenience name for target.\nfilteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_cpp.dir/rule:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_cpp.dir/rule\n.PHONY : filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\nmapclasses_generate_messages_cpp: filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_cpp.dir/rule\n.PHONY : mapclasses_generate_messages_cpp\n\n# fast build rule for target.\nmapclasses_generate_messages_cpp/fast:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_cpp.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_cpp.dir/build\n.PHONY : mapclasses_generate_messages_cpp/fast\n\n# Convenience name for target.\nfilteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_lisp.dir/rule:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_lisp.dir/rule\n.PHONY : filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\nmapclasses_generate_messages_lisp: filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_lisp.dir/rule\n.PHONY : mapclasses_generate_messages_lisp\n\n# fast build rule for target.\nmapclasses_generate_messages_lisp/fast:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_lisp.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_lisp.dir/build\n.PHONY : mapclasses_generate_messages_lisp/fast\n\n# Convenience name for target.\nfilteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_py.dir/rule:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_py.dir/rule\n.PHONY : filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_py.dir/rule\n\n# Convenience name for target.\nmapclasses_generate_messages_py: filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_py.dir/rule\n.PHONY : mapclasses_generate_messages_py\n\n# fast build rule for target.\nmapclasses_generate_messages_py/fast:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_py.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_generate_messages_py.dir/build\n.PHONY : mapclasses_generate_messages_py/fast\n\n# Convenience name for target.\nfilteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_genlisp.dir/rule:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_genlisp.dir/rule\n.PHONY : filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_genlisp.dir/rule\n\n# Convenience name for target.\nmapclasses_genlisp: filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_genlisp.dir/rule\n.PHONY : mapclasses_genlisp\n\n# fast build rule for target.\nmapclasses_genlisp/fast:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_genlisp.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_genlisp.dir/build\n.PHONY : mapclasses_genlisp/fast\n\n# Convenience name for target.\nfilteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_genpy.dir/rule:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_genpy.dir/rule\n.PHONY : filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_genpy.dir/rule\n\n# Convenience name for target.\nmapclasses_genpy: filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_genpy.dir/rule\n.PHONY : mapclasses_genpy\n\n# fast build rule for target.\nmapclasses_genpy/fast:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_genpy.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/mapclasses_genpy.dir/build\n.PHONY : mapclasses_genpy/fast\n\n# Convenience name for target.\nfilteringprocess/filteringprocess/mapClasses/CMakeFiles/scanBuffer.dir/rule:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanBuffer.dir/rule\n.PHONY : filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanBuffer.dir/rule\n\n# Convenience name for target.\nscanBuffer: filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanBuffer.dir/rule\n.PHONY : scanBuffer\n\n# fast build rule for target.\nscanBuffer/fast:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanBuffer.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanBuffer.dir/build\n.PHONY : scanBuffer/fast\n\n# Convenience name for target.\nfilteringprocess/filteringprocess/mapClasses/CMakeFiles/scanFilter.dir/rule:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanFilter.dir/rule\n.PHONY : filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanFilter.dir/rule\n\n# Convenience name for target.\nscanFilter: filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanFilter.dir/rule\n.PHONY : scanFilter\n\n# fast build rule for target.\nscanFilter/fast:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanFilter.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanFilter.dir/build\n.PHONY : scanFilter/fast\n\n# Convenience name for target.\nfilteringprocess/filteringprocess/mapClasses/CMakeFiles/utils.dir/rule:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f CMakeFiles/Makefile2 filteringprocess/filteringprocess/mapClasses/CMakeFiles/utils.dir/rule\n.PHONY : filteringprocess/filteringprocess/mapClasses/CMakeFiles/utils.dir/rule\n\n# Convenience name for target.\nutils: filteringprocess/filteringprocess/mapClasses/CMakeFiles/utils.dir/rule\n.PHONY : utils\n\n# fast build rule for target.\nutils/fast:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/utils.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/utils.dir/build\n.PHONY : utils/fast\n\nsrc/map.o: src/map.cpp.o\n.PHONY : src/map.o\n\n# target to build an object file\nsrc/map.cpp.o:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/map.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/map.dir/src/map.cpp.o\n.PHONY : src/map.cpp.o\n\nsrc/map.i: src/map.cpp.i\n.PHONY : src/map.i\n\n# target to preprocess a source file\nsrc/map.cpp.i:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/map.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/map.dir/src/map.cpp.i\n.PHONY : src/map.cpp.i\n\nsrc/map.s: src/map.cpp.s\n.PHONY : src/map.s\n\n# target to generate assembly for a file\nsrc/map.cpp.s:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/map.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/map.dir/src/map.cpp.s\n.PHONY : src/map.cpp.s\n\nsrc/scanBuffer.o: src/scanBuffer.cpp.o\n.PHONY : src/scanBuffer.o\n\n# target to build an object file\nsrc/scanBuffer.cpp.o:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanBuffer.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanBuffer.dir/src/scanBuffer.cpp.o\n.PHONY : src/scanBuffer.cpp.o\n\nsrc/scanBuffer.i: src/scanBuffer.cpp.i\n.PHONY : src/scanBuffer.i\n\n# target to preprocess a source file\nsrc/scanBuffer.cpp.i:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanBuffer.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanBuffer.dir/src/scanBuffer.cpp.i\n.PHONY : src/scanBuffer.cpp.i\n\nsrc/scanBuffer.s: src/scanBuffer.cpp.s\n.PHONY : src/scanBuffer.s\n\n# target to generate assembly for a file\nsrc/scanBuffer.cpp.s:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanBuffer.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanBuffer.dir/src/scanBuffer.cpp.s\n.PHONY : src/scanBuffer.cpp.s\n\nsrc/scanFilter.o: src/scanFilter.cpp.o\n.PHONY : src/scanFilter.o\n\n# target to build an object file\nsrc/scanFilter.cpp.o:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanFilter.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanFilter.dir/src/scanFilter.cpp.o\n.PHONY : src/scanFilter.cpp.o\n\nsrc/scanFilter.i: src/scanFilter.cpp.i\n.PHONY : src/scanFilter.i\n\n# target to preprocess a source file\nsrc/scanFilter.cpp.i:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanFilter.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanFilter.dir/src/scanFilter.cpp.i\n.PHONY : src/scanFilter.cpp.i\n\nsrc/scanFilter.s: src/scanFilter.cpp.s\n.PHONY : src/scanFilter.s\n\n# target to generate assembly for a file\nsrc/scanFilter.cpp.s:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanFilter.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/scanFilter.dir/src/scanFilter.cpp.s\n.PHONY : src/scanFilter.cpp.s\n\nsrc/utils.o: src/utils.cpp.o\n.PHONY : src/utils.o\n\n# target to build an object file\nsrc/utils.cpp.o:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/utils.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/utils.dir/src/utils.cpp.o\n.PHONY : src/utils.cpp.o\n\nsrc/utils.i: src/utils.cpp.i\n.PHONY : src/utils.i\n\n# target to preprocess a source file\nsrc/utils.cpp.i:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/utils.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/utils.dir/src/utils.cpp.i\n.PHONY : src/utils.cpp.i\n\nsrc/utils.s: src/utils.cpp.s\n.PHONY : src/utils.s\n\n# target to generate assembly for a file\nsrc/utils.cpp.s:\n\tcd /home/clarisse/catkin_ws/src && $(MAKE) -f filteringprocess/filteringprocess/mapClasses/CMakeFiles/utils.dir/build.make filteringprocess/filteringprocess/mapClasses/CMakeFiles/utils.dir/src/utils.cpp.s\n.PHONY : src/utils.cpp.s\n\n# Help Target\nhelp:\n\t@echo \"The following are some of the valid targets for this Makefile:\"\n\t@echo \"... all (the default if no target is provided)\"\n\t@echo \"... clean\"\n\t@echo \"... depend\"\n\t@echo \"... _mapclasses_generate_messages_check_deps_buff\"\n\t@echo \"... edit_cache\"\n\t@echo \"... install\"\n\t@echo \"... install/local\"\n\t@echo \"... install/strip\"\n\t@echo \"... list_install_components\"\n\t@echo \"... map\"\n\t@echo \"... mapclasses_gencpp\"\n\t@echo \"... mapclasses_generate_messages\"\n\t@echo \"... mapclasses_generate_messages_cpp\"\n\t@echo \"... mapclasses_generate_messages_lisp\"\n\t@echo \"... mapclasses_generate_messages_py\"\n\t@echo \"... mapclasses_genlisp\"\n\t@echo \"... mapclasses_genpy\"\n\t@echo \"... rebuild_cache\"\n\t@echo \"... scanBuffer\"\n\t@echo \"... scanFilter\"\n\t@echo \"... test\"\n\t@echo \"... utils\"\n\t@echo \"... src/map.o\"\n\t@echo \"... src/map.i\"\n\t@echo \"... src/map.s\"\n\t@echo \"... src/scanBuffer.o\"\n\t@echo \"... src/scanBuffer.i\"\n\t@echo \"... src/scanBuffer.s\"\n\t@echo \"... src/scanFilter.o\"\n\t@echo \"... src/scanFilter.i\"\n\t@echo \"... src/scanFilter.s\"\n\t@echo \"... src/utils.o\"\n\t@echo \"... src/utils.i\"\n\t@echo \"... src/utils.s\"\n.PHONY : help\n\n\n\n#=============================================================================\n# Special targets to cleanup operation of make.\n\n# Special rule to run CMake to check the build system integrity.\n# No rule that depends on this can have commands that come from listfiles\n# because they might be regenerated.\ncmake_check_build_system:\n\tcd /home/clarisse/catkin_ws/src && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0\n.PHONY : cmake_check_build_system\n\n" }, { "alpha_fraction": 0.7848599553108215, "alphanum_fraction": 0.7857683300971985, "avg_line_length": 41.070064544677734, "blob_id": "32d9ea05737a5f96842ed9a425c1e2c2acc3e6f1", "content_id": "da7e1d8d25360a3c03e0977e5b668467627a179c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 6605, "license_type": "no_license", "max_line_length": 290, "num_lines": 157, "path": "/stevens_pier_project/mapClasses/cmake/mapclasses-genmsg.cmake", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "# generated from genmsg/cmake/pkg-genmsg.cmake.em\n\nmessage(STATUS \"mapclasses: 1 messages, 0 services\")\n\nset(MSG_I_FLAGS \"-Imapclasses:/home/clarisse/catkin_ws/src/filteringprocess/filteringprocess/mapClasses/msg;-Istd_msgs:/opt/ros/indigo/share/std_msgs/cmake/../msg;-Isensor_msgs:/opt/ros/indigo/share/sensor_msgs/cmake/../msg;-Igeometry_msgs:/opt/ros/indigo/share/geometry_msgs/cmake/../msg\")\n\n# Find all generators\nfind_package(gencpp REQUIRED)\nfind_package(genlisp REQUIRED)\nfind_package(genpy REQUIRED)\n\nadd_custom_target(mapclasses_generate_messages ALL)\n\n# verify that message/service dependencies have not changed since configure\n\n\n\nget_filename_component(_filename \"/home/clarisse/catkin_ws/src/filteringprocess/filteringprocess/mapClasses/msg/buff.msg\" NAME_WE)\nadd_custom_target(_mapclasses_generate_messages_check_deps_${_filename}\n COMMAND ${CATKIN_ENV} ${PYTHON_EXECUTABLE} ${GENMSG_CHECK_DEPS_SCRIPT} \"mapclasses\" \"/home/clarisse/catkin_ws/src/filteringprocess/filteringprocess/mapClasses/msg/buff.msg\" \"sensor_msgs/PointField:sensor_msgs/PointCloud2:std_msgs/Header\"\n)\n\n#\n# langs = gencpp;genlisp;genpy\n#\n\n### Section generating for lang: gencpp\n### Generating Messages\n_generate_msg_cpp(mapclasses\n \"/home/clarisse/catkin_ws/src/filteringprocess/filteringprocess/mapClasses/msg/buff.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/indigo/share/sensor_msgs/cmake/../msg/PointField.msg;/opt/ros/indigo/share/sensor_msgs/cmake/../msg/PointCloud2.msg;/opt/ros/indigo/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/mapclasses\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_cpp(mapclasses\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/mapclasses\n \"${ALL_GEN_OUTPUT_FILES_cpp}\"\n)\n\nadd_custom_target(mapclasses_generate_messages_cpp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_cpp}\n)\nadd_dependencies(mapclasses_generate_messages mapclasses_generate_messages_cpp)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/clarisse/catkin_ws/src/filteringprocess/filteringprocess/mapClasses/msg/buff.msg\" NAME_WE)\nadd_dependencies(mapclasses_generate_messages_cpp _mapclasses_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(mapclasses_gencpp)\nadd_dependencies(mapclasses_gencpp mapclasses_generate_messages_cpp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS mapclasses_generate_messages_cpp)\n\n### Section generating for lang: genlisp\n### Generating Messages\n_generate_msg_lisp(mapclasses\n \"/home/clarisse/catkin_ws/src/filteringprocess/filteringprocess/mapClasses/msg/buff.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/indigo/share/sensor_msgs/cmake/../msg/PointField.msg;/opt/ros/indigo/share/sensor_msgs/cmake/../msg/PointCloud2.msg;/opt/ros/indigo/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/mapclasses\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_lisp(mapclasses\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/mapclasses\n \"${ALL_GEN_OUTPUT_FILES_lisp}\"\n)\n\nadd_custom_target(mapclasses_generate_messages_lisp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_lisp}\n)\nadd_dependencies(mapclasses_generate_messages mapclasses_generate_messages_lisp)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/clarisse/catkin_ws/src/filteringprocess/filteringprocess/mapClasses/msg/buff.msg\" NAME_WE)\nadd_dependencies(mapclasses_generate_messages_lisp _mapclasses_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(mapclasses_genlisp)\nadd_dependencies(mapclasses_genlisp mapclasses_generate_messages_lisp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS mapclasses_generate_messages_lisp)\n\n### Section generating for lang: genpy\n### Generating Messages\n_generate_msg_py(mapclasses\n \"/home/clarisse/catkin_ws/src/filteringprocess/filteringprocess/mapClasses/msg/buff.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/indigo/share/sensor_msgs/cmake/../msg/PointField.msg;/opt/ros/indigo/share/sensor_msgs/cmake/../msg/PointCloud2.msg;/opt/ros/indigo/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/mapclasses\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_py(mapclasses\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/mapclasses\n \"${ALL_GEN_OUTPUT_FILES_py}\"\n)\n\nadd_custom_target(mapclasses_generate_messages_py\n DEPENDS ${ALL_GEN_OUTPUT_FILES_py}\n)\nadd_dependencies(mapclasses_generate_messages mapclasses_generate_messages_py)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/clarisse/catkin_ws/src/filteringprocess/filteringprocess/mapClasses/msg/buff.msg\" NAME_WE)\nadd_dependencies(mapclasses_generate_messages_py _mapclasses_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(mapclasses_genpy)\nadd_dependencies(mapclasses_genpy mapclasses_generate_messages_py)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS mapclasses_generate_messages_py)\n\n\n\nif(gencpp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/mapclasses)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/mapclasses\n DESTINATION ${gencpp_INSTALL_DIR}\n )\nendif()\nadd_dependencies(mapclasses_generate_messages_cpp std_msgs_generate_messages_cpp)\nadd_dependencies(mapclasses_generate_messages_cpp sensor_msgs_generate_messages_cpp)\n\nif(genlisp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/mapclasses)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/mapclasses\n DESTINATION ${genlisp_INSTALL_DIR}\n )\nendif()\nadd_dependencies(mapclasses_generate_messages_lisp std_msgs_generate_messages_lisp)\nadd_dependencies(mapclasses_generate_messages_lisp sensor_msgs_generate_messages_lisp)\n\nif(genpy_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/mapclasses)\n install(CODE \"execute_process(COMMAND \\\"/usr/bin/python\\\" -m compileall \\\"${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/mapclasses\\\")\")\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/mapclasses\n DESTINATION ${genpy_INSTALL_DIR}\n )\nendif()\nadd_dependencies(mapclasses_generate_messages_py std_msgs_generate_messages_py)\nadd_dependencies(mapclasses_generate_messages_py sensor_msgs_generate_messages_py)\n" }, { "alpha_fraction": 0.568965494632721, "alphanum_fraction": 0.568965494632721, "avg_line_length": 18.33333396911621, "blob_id": "15e1ad5528a7f91ca377d1c619409258583c35df", "content_id": "61774c979fcdba29d0264ef01f50d2cba1ed4e0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 58, "license_type": "no_license", "max_line_length": 35, "num_lines": 3, "path": "/stevens_pier_project/mapClasses/dlib_build/CMakeFiles/dlib.dir/cmake_clean_target.cmake", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "FILE(REMOVE_RECURSE\n \"../../../../devel/lib/libdlib.a\"\n)\n" }, { "alpha_fraction": 0.7482876777648926, "alphanum_fraction": 0.7534246444702148, "avg_line_length": 82.57142639160156, "blob_id": "65202ff49ba6aadad1e512da6ba91c9260b596ac", "content_id": "d574fd29be31df45f90881476dcd1064fe72493b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 584, "license_type": "no_license", "max_line_length": 199, "num_lines": 7, "path": "/stevens_pier_project/mapClasses/catkin_generated/package.cmake", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "set(_CATKIN_CURRENT_PACKAGE \"mapclasses\")\nset(mapclasses_MAINTAINER \"unnar <[email protected]>\")\nset(mapclasses_DEPRECATED \"\")\nset(mapclasses_VERSION \"0.0.0\")\nset(mapclasses_BUILD_DEPENDS \"geometry_msgs\" \"pcl_conversions\" \"pcl_ros\" \"roscpp\" \"rospy\" \"sensor_msgs\" \"visualization_msgs\" \"std_msgs\" \"tf\" \"tf_conversions\" \"eigen_conversions\" \"message_generation\")\nset(mapclasses_RUN_DEPENDS \"geometry_msgs\" \"pcl_conversions\" \"pcl_ros\" \"roscpp\" \"rospy\" \"sensor_msgs\" \"visualization_msgs\" \"std_msgs\" \"tf\" \"tf_conversion\" \"eigen_conversion\" \"message_runtime\")\nset(mapclasses_BUILDTOOL_DEPENDS \"catkin\")" }, { "alpha_fraction": 0.7365079522132874, "alphanum_fraction": 0.7365079522132874, "avg_line_length": 30.5, "blob_id": "c371678cbed7468fb049b9f68de4adddc506d513", "content_id": "fec1c0395c60217652bf12aef58cab003521b59a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 315, "license_type": "no_license", "max_line_length": 71, "num_lines": 10, "path": "/stevens_pier_project/mapClasses/CMakeFiles/scanBuffer.dir/cmake_clean.cmake", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/scanBuffer.dir/src/scanBuffer.cpp.o\"\n \"../../../devel/lib/libscanBuffer.pdb\"\n \"../../../devel/lib/libscanBuffer.so\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang CXX)\n INCLUDE(CMakeFiles/scanBuffer.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n" }, { "alpha_fraction": 0.73102867603302, "alphanum_fraction": 0.7318718433380127, "avg_line_length": 19.824562072753906, "blob_id": "00fc3828e8fe84b9162ff8a9c8dd251649026e29", "content_id": "5c65a4400663dc68b650b9191d2911f20e3ba36c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1186, "license_type": "no_license", "max_line_length": 95, "num_lines": 57, "path": "/stevens_pier_project/mapClasses/include/mapClasses/utils/utils.h", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "#ifndef UTILS_H\n#define UTILS_H\n\n#include <pcl_ros/point_cloud.h>\n#include <pcl/point_cloud.h>\n#include <pcl/point_types.h>\n#include <tf/transform_listener.h>\n#include <Eigen/Dense>\n#include <queue>\n\nnamespace utils{\n\n\ttemplate<typename PointCloudT>\n\tvoid printCloud(PointCloudT a);\n\n\ttemplate<typename PointT>\n\tfloat Distance(PointT a, PointT b);\n\n\ttemplate<typename PointT>\n\tfloat Distance(PointT a);\n\t\n\ttemplate<typename PointT>\n\tfloat squaredDistance(PointT a, PointT b);\n\n\ttemplate<typename PointT>\n\tfloat squaredDistance(PointT a);\n\n\tfloat dotVec(std::vector<float> a, std::vector<float> b);\n\n\tfloat lenVec(std::vector<float> a);\n\n\ttemplate<typename PointT>\n\tbool isPointPerpendicularToLine(const PointT a, const PointT b, const PointT p, float &theta);\n\n\tvoid calcMeanVar(const std::vector<float> vec, float &ave, float &var);\n\n\n\tclass poseTracker{\n\t\tstd::queue<Eigen::Vector3f> pose;\n\tpublic:\t\n\t\tposeTracker(){ };\n\t\tbool newPose(tf::StampedTransform transform);\n\t};\n\n\n\tnamespace params{\n\t\ttemplate<typename T>\n\t\tT loadParam( std::string name, ros::NodeHandle &nh);\n\n\t\ttemplate<typename T>\n\t\tT loadParam( std::string name, ros::NodeHandle *nh);\n\t}\n\n}\n\n#include \"utils.hpp\"\n#endif" }, { "alpha_fraction": 0.7952522039413452, "alphanum_fraction": 0.8071216344833374, "avg_line_length": 55.16666793823242, "blob_id": "ca7c11c42c81182f4f4b574324e79a0ce4d853d8", "content_id": "4279fe1261a81aa36c4540ccabf6bdd069b35b81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 337, "license_type": "no_license", "max_line_length": 103, "num_lines": 6, "path": "/stevens_pier_project/mapClasses/dlib_build/CTestTestfile.cmake", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "# CMake generated Testfile for \n# Source directory: /home/clarisse/Downloads/dlib-18.18/dlib\n# Build directory: /home/clarisse/catkin_ws/src/filteringprocess/filteringprocess/mapClasses/dlib_build\n# \n# This file includes the relevant testing commands required for \n# testing this directory and lists subdirectories to be tested as well.\n" }, { "alpha_fraction": 0.6490417122840881, "alphanum_fraction": 0.6697486042976379, "avg_line_length": 30.94769287109375, "blob_id": "73a7ba51833a989f5cad954384e9c59af11ce10c", "content_id": "0512edd21acf7b5299343535b3baceab00623409", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10383, "license_type": "no_license", "max_line_length": 142, "num_lines": 325, "path": "/stevens_pier_project/mapping/py/moveVideoray.py", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n'''\n\tNathaniel Goldfarb\n\t8/16/15\n\tI&E scholar\n\tVideoRay research group\n\n\tThis program moves the videoray model in rviz using\n\tdata from the /usbl_pose node\n\tbased on \"Using urdf with robot_state_publisher\" tutorial\n'''\n\n\nimport rospy\nimport roslib\nimport math\nimport tf\n#import outlier_filter\nfrom geometry_msgs.msg import Twist, Vector3, Pose, PoseStamped, TransformStamped, Transform\nfrom std_msgs.msg import Bool, String\nfrom nav_msgs.msg import Path\nfrom numpy import mean, std\nimport sys\nimport numpy as np\nimport random\nfrom tf.transformations import euler_from_quaternion, quaternion_matrix\nimport kalman_filter\nimport md_filter\n\n\n# Callback that recieves iformation that a new position transformation is available\n# based on the sonar data and ICP.\ndef sonar_move_callback(tmp):\n\t# print \"SONAR\"\n\tif sonar_move_callback.start:\n\t\tsonar_move_callback.RPos = np.eye(4)*25\n\t\tsonar_move_callback.RPose = np.eye(6)*25\n\t\tsonar_move_callback.start = 0\n\n\t# get the position the sonar scan estimates\n\t# try:\n\t# \t(trans,rot) = listener.lookupTransform('/odom_sonar', '/odom' , rospy.Time(0))\n\t# except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n\t# \tprint \"returning\"\n\t# \treturn\n\n\t# print \"listen: \", trans, rot\n\t# print \"geomet: \", (tmp.translation.x,tmp.translation.y,tmp.translation.z), (tmp.rotation.x, tmp.rotation.y, tmp.rotation.z, tmp.rotation.w)\n\t# print rot, trans\n\n\t# Create objects for orientation and position updates\n\tpos = PoseStamped()\n\tpos.pose.position = tmp.translation\n\tpos.pose.orientation = tmp.rotation\n\tpose_move(pos.pose, sonar_move_callback.RPose, True)\n\tpos_move(pos, sonar_move_callback.RPos, True)\n\n\n# Callback that recieves pose information from gyro and depth sensor\ndef pose_move_callback(pos):\n\tif pose_move_callback.start:\n\t\tpose_move_callback.R = np.eye(6)*25\n\t\tpose_move_callback.start = 0\n\tpose_move(pos, pose_move_callback.R, False)\n\n# Callback that recieves position (x,y) from the usbl positioning system\ndef usbl_move_callback(pos):\n\t# print \"USBL\"\n\tif usbl_move_callback.start:\n\t\tusbl_move_callback.R = np.eye(4)*5\n\t\tusbl_move_callback.start = 0\n\t\tusbl_move_callback.md_filter = md_filter.md_filter(2, [2.0, 1.9], 10, [0, 1])\n\t# if usbl_move_callback.md_filter.update( [pos.pose.position.x,pos.pose.position.y] ):\n\tpos_move(pos, usbl_move_callback.R, False)\n\n\ndef pos_move(pos, R, correction):\n\t# Note pos and current position have x and y swapped.\n\t# -> pos.position.x corresponds to current.position.y\n\n\t# Check that the values aren't crazy\n\tif correction:\n\t\tmax = 5\n\t\t# The pos is in world coordinates need to translate to kalman filter coordinates (x->y, y->x)\n\t\ta = quaternion_matrix([pos.pose.orientation.x, pos.pose.orientation.y, pos.pose.orientation.z, pos.pose.orientation.w])\n\t\ta[0,3] = pos.pose.position.x\n\t\ta[1,3] = pos.pose.position.y\n\t\tb = np.array([current.position.x, current.position.y, 0, 1])\n\t\tc = a.dot(b)\n\t\tpos.pose.position.x = c[1]\n\t\tpos.pose.position.y = c[0]\n\telse:\n\t\tmax = 10\n\n\tif(abs(pos.pose.position.x - current.position.y) > max or abs(pos.pose.position.y - current.position.x) > max ):\n\t\tprint \"CRAZY POSITION\"\n\t\treturn\n\n\n\tbroadcaster = tf.TransformBroadcaster()\n\tif pos_move.start:\n\t\tdt = 0.0\n\n\t\tloc = np.matrix([[0],#v_x\n\t\t\t\t\t\t [0],#v_y\n\t\t\t\t\t\t [pos.pose.position.x],#x\n\t\t\t\t\t\t [pos.pose.position.y]])#y\n\n\t\tA = np.matrix([[1, 0, 0, 0,],\n\t\t\t\t \t [0, 1, 0, 0,],\n\t\t\t\t [dt, 0, 1, 0,],\n\t\t\t\t [0, dt, 0, 1,]])\n\t\tB = np.matrix([0])\n\t\tC = np.eye(loc.shape[0])\n\t\tQ = np.eye(loc.shape[0])*0.5\n\t\t# R = np.eye(loc.shape[0])*5000\n\t\tP = np.eye(loc.shape[0])\n\t\tU = np.matrix( [[0]])\n\t\tZ = np.matrix( [[0],[0],[0],[0]])\n\n\t\tpos_move.kalman = kalman_filter.kalman_filter(A,B,C,Q,P,R,loc)\n\t\tpos_move.md_filter = md_filter.md_filter(2, [1.75, 1.6], 10, [0, 1])\n\t\tpos_move.path = Path()\n\t\tpos_move.path.header.frame_id=\"odom\"\n\t\tpos_move.start = 0\n\n\n\tif not correction:\n\t\tpos.pose.position.x += pos_move.xd\n\t\tpos.pose.position.y += pos_move.yd\n\n\n\n\t# if pos_move.md_filter.update( [pos.pose.position.x,pos.pose.position.y] ) or correction or not correction:\n\tZ = np.matrix( [[0],[0],[pos.pose.position.x],[pos.pose.position.y] ])\n\tU = np.matrix( [[0]])\n\tpos_move.kalman.move(U,Z, R)\n\tkalman_pos = pos_move.kalman.getState()\n\tif correction:\n\t\t# We are subscribing to the inverse translation, thus the minus sign.\n\t\tpos_move.xd += kalman_pos.item(2) - current.position.y\n\t\tpos_move.yd += kalman_pos.item(3) - current.position.x\n\t# print \"addition: \", pose_move.yaw_addition\n\t# print \"pos:\", \"x:\", current.position.x, \"y:\", current.position.y\n\t# print \"mx: \", pos_move.xd\n\t# print \"my: \", pos_move.yd\n\t# print \"yaw:\", pose_move.yaw\n\tcurrent.position.y = kalman_pos.item(2)\n\tcurrent.position.x = kalman_pos.item(3)\n\n\t# else:\n\t# \tprint \"NOT ACCEPTED\"\n\n\tpath_now = PoseStamped()\n\tpath_now.pose.position.x = current.position.x\n\tpath_now.pose.position.y = current.position.y\n\tpath_now.pose.position.z = 0\n\t# from odom (parent) to body (child) \n\tbroadcaster.sendTransform( (current.position.x,current.position.y,current.position.z),\n\t\t\t\t\t\t\t\t(current.orientation.x,current.orientation.y,current.orientation.z,current.orientation.w),\n\t\t\t\t\t\t\t\t\trospy.Time.now(), \"body\", \"odom\" )\n\t# from buffer (parent) to odom (child)\n\tbroadcaster.sendTransform((-pos_move.yd, -pos_move.xd, 0),\n\t\t\t\t\t\t\t\ttf.transformations.quaternion_from_euler(0, 0, -pose_move.yaw/180*math.pi),\n\t\t\t\t\t\t\t\trospy.Time.now(),\"odom\",\"buffer\")\n\tpos_move.path.poses.append(path_now)\n\tpath.publish(pos_move.path)\n\n\n\tpose_kalman = PoseStamped();\n\tpose_kalman.pose.position.x = current.position.x \n\tpose_kalman.pose.position.y = current.position.y\n\tpose_kalman.pose.position.z = current.position.z\n\tpose_kalman.pose.orientation.x = current.orientation.x\n\tpose_kalman.pose.orientation.y = current.orientation.y\n\tpose_kalman.pose.orientation.z = current.orientation.z\n\tpose_kalman.pose.orientation.w = current.orientation.w\n\tpose_kalman.header.frame_id = \"odom\"\n\tpub_pose.publish(pose_kalman)\n\n\ndef pose_move(pos, R, correction):\n\n\ttoDegree = 180/math.pi\n\tbroadcaster = tf.TransformBroadcaster()\n\t(roll, pitch, yaw) = tf.transformations.euler_from_quaternion([-1*pos.orientation.y,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t pos.orientation.x,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t-1*pos.orientation.z,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t pos.orientation.w])\n\n\troll = roll*toDegree\n\tpitch = pitch*toDegree\n\tyaw = yaw*toDegree\n\n\tif correction:\n\t\tif abs(yaw) > 45:\n\t\t\tprint \"CRAZY YAW\"\n\t\t\treturn\n\t\troll = pose_move.kalman.getState().item(3)\n\t\tpitch = pose_move.kalman.getState().item(4)\n\t\tyaw = pose_move.kalman.getState().item(5) - yaw\n\n\tif pose_move.start:\n\t\tdt = .00\n\t\tposition = np.matrix([[0], # v_x\n\t\t\t\t\t\t [0], # v_y\n\t\t\t\t\t\t [0], # v_z\n\t\t\t\t\t\t [roll], # x\n\t\t\t\t\t\t [pitch], # y\n\t\t\t\t\t\t [yaw]])\t # z\n\n\t\tA = np.matrix([[1, 0, 0, 0, 0, 0, ],\n\t\t\t\t\t [0, 1, 0, 0, 0, 0, ],\n\t\t\t\t\t [0, 0, 1, 0, 0, 0, ],\n\t\t\t\t\t [dt, 0, 0, 1, 0, 0, ],\n\t\t\t\t\t [0, dt, 0, 0, 1, 0, ],\n\t\t\t\t\t [0, 0, dt, 0, 0, 1, ]])\n\t\tB = np.matrix([0])\n\t\tC = np.eye(position.shape[0])\n\t\tQ = np.eye(position.shape[0]) * .5\n\t\tR = np.eye(position.shape[0]) * 50\n\t\tP = np.eye(position.shape[0])\n\t\tU = np.matrix([[0]])\n\t\tZ = np.matrix([[0], [0], [0], [0], [0], [0] ])\n\n\t\tpose_move.kalman = kalman_filter.kalman_filter(A,B,C,Q,P,R,position)\n\t\tpose_move.start = 0\n\n\tif not correction:\n\t\tyaw += pose_move.yaw\n\t\tcurrent.position.z = -pos.position.z\n\t\t# current.position.z = 0\n\n\t# Z = np.matrix([[0], [0], [0], [roll], [pitch], [yaw+pose_move.yaw_addition]])\n\tZ = np.matrix([[0], [0], [0], [roll], [pitch], [yaw]])\n\t# Z = np.matrix([[0], [0], [0], [0], [0], [yaw]])\n\tU = np.matrix([[0]])\n\n\ttmp_yaw = pose_move.kalman.getState().item(5)\n\n\tpose_move.kalman.move(U, Z, R)\n\tpos = pose_move.kalman.getState()\n\n\tif correction:\n\t\t# print \"yaw input:\", yaw\n\t\t# print \"yaw before:\", tmp_yaw\n\t\t# print \"yaw after:\", pos[5]\n\t\t# print \"yaw change:\", yaw - tmp_yaw\n\t\tpose_move.yaw += pos.item(5) - tmp_yaw\n\t\t# print \"yaw addition:\", pose_move.yaw\n\n\troll = pos[3]\n\tpitch = pos[4]\n\tyaw = pos[5]\n\n\n\n\tquad = tf.transformations.quaternion_from_euler(roll/toDegree, pitch/toDegree, yaw/toDegree )\n\t# quad = tf.transformations.quaternion_from_euler(0, 0, yaw/toDegree )\n\n\tcurrent.orientation.x = quad[0]\n\tcurrent.orientation.y = quad[1]\n\tcurrent.orientation.z = quad[2]\n\tcurrent.orientation.w = quad[3]\n\n\tbroadcaster.sendTransform( (current.position.x,current.position.y,current.position.z),\n\t\t\t\t\t\t\t\t(current.orientation.x,current.orientation.y,current.orientation.z,\n\t\t\t\t\t\t\t\tcurrent.orientation.w),\n\t\t\t\t\t\t\t\trospy.Time.now(), \"body\", \"odom\" )\n\tbroadcaster.sendTransform((-pos_move.yd,-pos_move.xd, 0),\n\t\t\t\t\t\t\t\ttf.transformations.quaternion_from_euler(0, 0, -pose_move.yaw/180*math.pi),\n\t\t\t\t\t\t\t\trospy.Time.now(),\"odom\",\"buffer\")\n\n\ndef save_path(name):\n\tprint name.data\n\tf = open(\"/home/clarisse/catkin_ws/src/filteringprocess/mapping/savedData/\" + name.data + \"-path.txt\", 'w')\n\tfor pose in pos_move.path.poses:\n\t\tf.write(str(pose.pose.position.x) + \" \" + str(pose.pose.position.y) + \"\\n\")\n\tprint \"hmmmmmmmmm\"\n\tf.close()\n\nif __name__ == '__main__':\n\tprint \"moveVideoray\"\n\t#set up the node\n\trospy.init_node('moveVideoray', anonymous=True)\n\t#make a broadcaster foir the tf frame\n\tbroadcaster = tf.TransformBroadcaster()\n\tlistener = tf.TransformListener()\n\n\t#make intilial values\n\tcurrent = Pose()\n\tcurrent.position.x = 0\n\tcurrent.position.y = 0\n\tcurrent.position.z = 0\n\tcurrent.orientation.x = 0\n\tcurrent.orientation.y = 0\n\tcurrent.orientation.z = 0\n\tcurrent.orientation.w = 0\n\tpose_move.yaw = 0\n\tpos_move.xd = 0\n\tpos_move.yd = 0\n\t#send the tf frame\n\tbroadcaster.sendTransform( (current.position.x,current.position.y,current.position.z),\n\t\t\t\t\t\t\t\t(current.orientation.x,current.orientation.y,current.orientation.z,current.orientation.w),\n\t\t\t\t\t\t\t\t\t rospy.Time.now(), \"body\", \"odom\" )\n\n\t#listen for information\n\n\tpos_move.start = 1\n\tpose_move.start = 1\n\tusbl_move_callback.start = 1\n\tsonar_move_callback.start = 1\n\tpose_move_callback.start = 1\n\tpose_move.yaw_addition = 0.0\n\tpub_pose = rospy.Publisher(\"/pose_after_kalman\", PoseStamped, queue_size=1)\n\tpub = rospy.Publisher('update_buffer', Bool, queue_size=1)\n\tpath = rospy.Publisher('path', Path, queue_size=1)\n\trospy.Subscriber(\"/usbl_pose\", PoseStamped, usbl_move_callback);\n\trospy.Subscriber(\"/mapBuilder/trans_update\", Transform, sonar_move_callback);\n\trospy.Subscriber(\"/pose_only\", Pose, pose_move_callback);\n\trospy.Subscriber(\"/writeMapToTXT\", String, save_path);\n\trospy.spin()\n\tprint \"moveVideoray\"\n" }, { "alpha_fraction": 0.7130976915359497, "alphanum_fraction": 0.7169092297554016, "avg_line_length": 25, "blob_id": "b93c854db360dbca2d7cae762fd91164f4b0db26", "content_id": "d723471e2b024deb48da1ba818dd1a21a2a395ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2886, "license_type": "no_license", "max_line_length": 91, "num_lines": 111, "path": "/stevens_pier_project/mapClasses/include/mapClasses/scanBuffer/scanBuffer.h", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "/**\n *\tMaintaines a buffer of previous scans.\n * @author Unnar Axelsson\n */\n\n#ifndef SCANBUFFER_H\n#define SCANBUFFER_H\n\n#include <ros/ros.h>\n#include <ros/package.h>\n#include <pcl_ros/point_cloud.h>\n#include <pcl/point_cloud.h>\n\n#include <pcl/point_types.h>\n#include <pcl/kdtree/kdtree_flann.h>\n#include <pcl/common/transforms.h>\n#include <tf/transform_listener.h>\n#include <tf/transform_broadcaster.h>\n\n#include <dlib/rand.h>\n#include <dlib/svm_threaded.h>\n\nusing namespace std;\nusing namespace dlib;\n\n\n /** \\typedef PointT: each beam is associate with x,y,z and intensity */\ntypedef pcl::PointXYZI PointT;\ntypedef pcl::PointCloud<PointT> PointCloudT;\n\n// dlib typedefs\ntypedef matrix<double,20,1> sample_type_short;\ntypedef matrix<double,40,1> sample_type;\ntypedef one_vs_one_trainer<any_trainer<sample_type> > ovo_trainer;\ntypedef polynomial_kernel<sample_type> poly_kernel;\n\n\n/**\n * \\brief setting og buffer\n * \\details \tparameters are finding in mapping/include/parameters/MapBuilder.yaml\n */\nstruct scanBufferParams{\n\tint bufferSize; /**< Number of buffers at any time\t\t\t*/\n\tint scanSize; /**< Maximum number of beams stored in each buffer\t\t\t*/\n\tfloat ecTolerance; /**< maximum to closest point within cluster\t\t\t*/\n\tfloat ecMinClusterSize; /**< Minimum size of a single cluster\t\t\t*/\n\tstring outputLines; /**< Output topic for line fit over cluster\t\t\t*/\n\tstring outputBuffUpdate;/**< Output topic for buffer\t\t\t*/\n\tstring outputBuff; /**< Output topic for buffer\t\t\t*/\n\t// int maxIntensity; /**< Maximum intensity point that a cluster needs to include\t\t\t*/\n};\n\nstruct buff{\n\tPointCloudT::Ptr buffer;\n\tstd::vector<float> dist;\n\tint countStart;\n\tint countLast;\n\n\tbuff(){\n\t\tbuffer = PointCloudT::Ptr (new PointCloudT());\n\t\tcountStart = 0;\n\t\tcountLast = 0;\n\t}\n};\n\n\nclass scanBuffer{\n\n\tros::NodeHandle *nh_;\n\tros::Publisher pubLines_;\n\tros::Publisher pubBuff_;\n\tros::Publisher pubBuffUpdate_;\n\tros::Subscriber _sub_poseKalman; \n\n\tscanBufferParams par_;\n\tstd::vector<buff> buff_;\n\n\tstd::vector<PointCloudT::Ptr> clouds_;\n\ttf::TransformListener listener_;\n\tstd::vector<int> cloudsCount_;\n\tint idxMax;\n\tint countLastProcessed;\n\n\tovo_trainer trainer;\n\tsvm_nu_trainer<poly_kernel> poly_trainer;\n\tone_vs_one_decision_function<ovo_trainer, decision_function<poly_kernel> > df;\n\tvector_normalizer<sample_type_short> normalizer;\n\tprobabilistic_decision_function<poly_kernel> df_1_3;\n\n\tbool isFirstPose; \n\tdouble nbrBeamReceived ;\n\npublic:\n\n\tscanBuffer();\n\n\tvoid initialize(ros::NodeHandle *nh);\n\n\tbool newScan(buff bu, PointCloudT::Ptr out, int &type);\n\tvoid updateBuffer(Eigen::Affine3f trans);\n\nprivate:\n\n\tvoid processBuffer(buff bu, PointCloudT::Ptr out, int &type);\n\tvoid findWalls(const buff bu, PointCloudT::Ptr out, std::vector<PointCloudT::Ptr> &walls);\n\tvoid findPillars(const buff bu, PointCloudT::Ptr out);\n\tvoid loadParams(void);\n\n};\n\n#endif\n" }, { "alpha_fraction": 0.42482268810272217, "alphanum_fraction": 0.6531915068626404, "avg_line_length": 21.74193572998047, "blob_id": "1a16df6f3af22a06cb49c9168fc14ca3046c9e19", "content_id": "63e84c3e7482b18284006f02aae7cf08eb543661", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1410, "license_type": "no_license", "max_line_length": 62, "num_lines": 62, "path": "/stevens_pier_project/mapping/savedData/UnnarResults_Dec_10/svmprec.py", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport pickle\nimport pylab\nimport random\nimport os.path\nimport csv\nfrom pylab import figure, show, rand\nfrom matplotlib.patches import Ellipse, Circle\nfrom scipy.stats import chi2\n\n# print \"tot: \", (226+141+86)/float(226+141+86+3+37+7+1+33+3)\n\n\n# pnpre = 252/float(252+4+10)\n# pnrec = 252/float(252+8+34)\n# pnacc = (252+140+3+1+84)/float(252+140+3+1+84 + 4+10+8+34)\n\n# pwpre = 140/float(140 + 8+3)\n# pwrec = 140/float(140+4+1)\n# pwacc = (140+252+10+85+35)/float((140+252+10+85+35)+4+8+3+1)\n\n# pppre = 85/float(85 + 34+1)\n# pprec = 85/float(85+10+3)\n# ppacc = (85+252+4+8+140)/float((85+252+4+8+140)+34+1+10+3)\n\n# print pnpre\n# print pnrec\n# print pnacc\n# print pwpre\n# print pwrec\n# print pwacc\n# print pppre\n# print pprec\n# print ppacc\n# print \"tot: \", (252+140+85)/float(252+140+85+4+10+3+8+34+1)\n\n\n# rnpre = 253/float(253+4+9)\n# rnrec = 253/float(253+3+35)\n# rnacc = (253+144+4+1+84)/float(253+144+4+1+84 + 4+9+3+35)\n\n# rwpre = 144/float(144 + 3+4)\n# rwrec = 144/float(144+4+1)\n# rwacc = (144+253+9+84+35)/float((144+253+9+84+35)+4+3+1+4)\n\n# rppre = 84/float(84 + 35+1)\n# rprec = 84/float(84+9+4)\n# rpacc = (84+253+4+3+144)/float((84+253+4+3+144)+9+4+35+1)\n\n# print rnpre\n# print rnrec\n# print rnacc\n# print rwpre\n# print rwrec\n# print rwacc\n# print rppre\n# print rprec\n# print rpacc\n\nprint \"tot: \", (253+144+84)/float(253+144+84+4+9+4+1+35+3)\n" }, { "alpha_fraction": 0.6001165509223938, "alphanum_fraction": 0.6298310160636902, "avg_line_length": 27.296703338623047, "blob_id": "ec966efc01004c2f79eb9fcadc97451da6788ab5", "content_id": "86bb1b1e858f801023d44f364716ff4b6999bcbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5149, "license_type": "no_license", "max_line_length": 100, "num_lines": 182, "path": "/stevens_pier_project/move_videoray/src/moveVideoray.py", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n'''\n\tNathaniel Goldfarb \n\t8/16/15\n\tI&E scholar\n\tVideoRay research group\n\t\n\tThis program moves the videoray model in rviz using \n\tdata from the /usbl_pose node\n\tbased on \"Using urdf with robot_state_publisher\" tutorial\n'''\n\n\n\n\nimport rospy\nimport roslib\nimport math\nimport tf\n#import outlier_filter\nfrom geometry_msgs.msg import Twist, Vector3, Pose, PoseStamped, TransformStamped\nfrom numpy import mean, std\nimport sys\nimport numpy as np\nimport random\nfrom tf.transformations import euler_from_quaternion\nimport kalman_filter\nimport md_filter\nimport numpy as np\n\n\n#//move the videoray using the data from the /pose_only node\ndef usbl_move(pos):\n\n\tbroadcaster = tf.TransformBroadcaster()\n\tif usbl_move.start:\n\t\tdt = 2\n\n\t\tloc = np.matrix([[0],#v_x\n\t\t\t\t\t\t [0],#v_y\n\t\t\t\t\t\t [pos.pose.position.x],#x\n\t\t\t\t\t\t [pos.pose.position.y]])#z\n\t\n\t\tA = np.matrix([[1, 0, 0, 0,],\n\t\t\t\t \t [0, 1, 0, 0,],\n\t\t\t\t [dt, 0, 1, 0,],\n\t\t\t\t [0, dt, 0, 1,]])\n\t\tB = np.matrix([0])\n\t\tC = np.eye(loc.shape[0])\n\t\tQ = np.eye(loc.shape[0])*0.5\n\t\tR = np.eye(loc.shape[0])*5000\n\t\tP = np.eye(loc.shape[0])\n\t\tU = np.matrix( [[0]])\n\t\tZ = np.matrix( [[0],[0],[0],[0]])\n\t\t\n\t\tusbl_move.kalman = kalman_filter.kalman_filter(A,B,C,Q,P,R,loc)\n\t\tusbl_move.md_filter = md_filter.md_filter(2, [1.75, 1.6], 10, [0, 1])\n\t\tusbl_move.start = 0\n\n\n\tif usbl_move.md_filter.update( [pos.pose.position.x,pos.pose.position.y] ):\n\t\t#update.ax.scatter(pos.position.x,pos.position.y,-1*current.position.z,color='b')\n\t\tcurrent.position.x = pos.pose.position.x\n\t\tcurrent.position.y = pos.pose.position.y\n\t\t#current.position.z = pos.pose.position.z\n\t\t#update('b')\n\t\tZ = np.matrix( [[0],[0],[pos.pose.position.x],[pos.pose.position.y] ])\n\t\tU = np.matrix( [[0]])\n\t\tusbl_move.kalman.move(U,Z)\n\t\tkalman_pos = usbl_move.kalman.getState()\n\t\tcurrent.position.y = kalman_pos[2]\n\t\tcurrent.position.x = kalman_pos[3]\n\t\n\tbroadcaster.sendTransform( (current.position.x,current.position.y,current.position.z), \n\t\t\t\t\t\t\t\t(current.orientation.x,current.orientation.y,current.orientation.z,current.orientation.w),\n\t\t\t\t\t\t\t\t\trospy.Time.now(), \"body\", \"odom\" )\n\t\n\n\n\ndef pose_move(pos):\n\n\t#pos.position.z is in kPa, has to be convereted to depth\n\t# P = P0 + pgz ----> pos.position.z = P0 + pg*z_real\n\t#z_real = -(1000*pos.position.z-101.325)/9.81 \n\tz_real = -pos.position.z \n\ttoDegree = 180/math.pi\n\tcurrent.position.z = z_real\n\tbroadcaster = tf.TransformBroadcaster()\n\t#set up the Kalman Filter\n\t#tf.transformations.quaternion_from_euler()\n\t(roll, pitch, yaw) = tf.transformations.euler_from_quaternion([-1*pos.orientation.y,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t pos.orientation.x,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t-1*pos.orientation.z,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t pos.orientation.w])\n\t\n\troll = roll*toDegree\n\tpitch = pitch*toDegree\n\tyaw = yaw*toDegree\n\ts = 'The value of roll is ' + repr(roll) + ', and pitch is ' + repr(pitch) + ', yaw = ' + repr(yaw)\n\tprint s\n\n\tif pose_move.start:\n\t\tdt = .02\t\n\t\tpos = np.matrix([[0], # v_x\n\t\t\t\t\t\t [0], # v_y\n\t\t\t\t\t\t [0], # v_z\n\t\t\t\t\t\t [roll], # x\n\t\t\t\t\t\t [pitch], # y\n\t\t\t\t\t\t [yaw]])\t # z\n\t\t\n\t\tA = np.matrix([[1, 0, 0, 0, 0, 0, ],\n\t\t\t\t\t [0, 1, 0, 0, 0, 0, ],\n\t\t\t\t\t [0, 0, 1, 0, 0, 0, ],\n\t\t\t\t\t [dt, 0, 0, 1, 0, 0, ],\n\t\t\t\t\t [0, dt, 0, 0, 1, 0, ],\n\t\t\t\t\t [0, 0, dt, 0, 0, 1, ]])\n\t\tB = np.matrix([0])\n\t\tC = np.eye(pos.shape[0])\n\t\tQ = np.eye(pos.shape[0]) * .5\n\t\tR = np.eye(pos.shape[0]) * 500\n\t\tP = np.eye(pos.shape[0])\n\t\tU = np.matrix([[0]])\n\t\tZ = np.matrix([[0], [0], [0], [0], [0], [0] ])\n\n\t\tpose_move.kalman = kalman_filter.kalman_filter(A,B,C,Q,P,R,pos)\n\t\t\n\t\tpose_move.start = 0\n\n\n\tZ = np.matrix([[0], [0], [0], [roll], [pitch], [yaw]])\n\tU = np.matrix([[0]])\n\tpose_move.kalman.move(U, Z)\n\tpos = pose_move.kalman.getState()\n\t\t\n\troll = pos[3]\n\tpitch = pos[4]\n\tyaw = pos[5]\n\n\t# quad = tf.transformations.quaternion_from_euler(roll/toDegree, pitch/toDegree, yaw/toDegree )\n\tquad = tf.transformations.quaternion_from_euler(0, 0, yaw/toDegree )\n\t\n\tcurrent.orientation.x = quad[0]\n\tcurrent.orientation.y = quad[1]\n\tcurrent.orientation.z = quad[2]\n\tcurrent.orientation.w = quad[3]\n\n\tbroadcaster.sendTransform( (current.position.x,current.position.y,current.position.z), \n\t\t\t\t\t\t\t\t(current.orientation.x,current.orientation.y,current.orientation.z,\n\t\t\t\t\t\t\t\tcurrent.orientation.w),\n\t\t\t\t\t\t\t\trospy.Time.now(), \"body\", \"odom\" )\n\n\n\nif __name__ == '__main__':\n\t#set up the node\n\trospy.init_node('moveVideoray', anonymous=True)\n\t#make a broadcaster foir the tf frame\n\tbroadcaster = tf.TransformBroadcaster()\n\t#make intilial values\n\tcurrent = Pose()\n\tcurrent.position.x = 0\n\tcurrent.position.y = 0\n\tcurrent.position.z = 0\n\tcurrent.orientation.x = 0\n\tcurrent.orientation.y = 0\n\tcurrent.orientation.z = 0\n\tcurrent.orientation.w = 0\n\t#send the tf frame\n\tbroadcaster.sendTransform( (current.position.x,current.position.y,current.position.z), \n\t\t\t\t\t\t\t\t(current.orientation.x,current.orientation.y,current.orientation.z,current.orientation.w),\n\t\t\t\t\t\t\t\t\t rospy.Time.now(), \"body\", \"odom\" )\n\n\t#listen for information\n\t\n\tusbl_move.start = 1\n\tpose_move.start = 1\n\t#pub = rospy.Publisher(\"newPose\", Pose)\n\trospy.Subscriber(\"/usbl_pose\", PoseStamped, usbl_move);\n\trospy.Subscriber(\"/pose_only\", Pose, pose_move);\n\trospy.spin()" }, { "alpha_fraction": 0.7433302402496338, "alphanum_fraction": 0.7442502379417419, "avg_line_length": 20.33333396911621, "blob_id": "adca2900e5dac7fc10068c65bf3383a321c57f3f", "content_id": "cca4cf3f4ebfd740d85c5d2dd475870165e72e87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1087, "license_type": "no_license", "max_line_length": 66, "num_lines": 51, "path": "/stevens_pier_project/mapping/include/scanBuffer/scanBuffer.h", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "/**\n *\tMaintaines a buffer of previous scans.\n * @author Unnar Axelsson\n */\n\n#ifndef SCANBUFFER_H\n#define SCANBUFFER_H\n\n#include <ros/ros.h>\n#include <pcl_ros/point_cloud.h>\n#include <pcl/point_cloud.h>\n\n#include <pcl/point_types.h>\n#include <pcl/kdtree/kdtree_flann.h>\n#include <pcl/common/transforms.h>\n#include <tf/transform_listener.h>\n#include <tf/transform_broadcaster.h>\n\n\ntypedef pcl::PointXYZI PointT;\ntypedef pcl::PointCloud<PointT> PointCloudT;\n\nclass scanBuffer{\n\n\tstd::vector<PointCloudT::Ptr> clouds_;\n\ttf::TransformListener listener;\n\ttf::TransformBroadcaster br;\n\tstd::vector<int> cloudsCount_;\n\tint idxMax;\n\ttf::TransformListener listener_;\n\tint bufferSize_, scanSize_, maxIntensity_;\n\tfloat ecTolerance_, ecMinClusterSize_;\n\tros::Publisher *pubLines_;\n\npublic:\n\tscanBuffer(void);\n\n\tbool newScan(const PointCloudT::Ptr scan, PointCloudT::Ptr out);\n\tvoid updateBuffer(Eigen::Affine3f trans);\n\n\tvoid setPublisher(ros::Publisher *pubLines){\n\t\tpubLines_ = pubLines;\n\t}\n\nprivate:\n\tvoid processBuffer(PointCloudT::Ptr cloud, PointCloudT::Ptr out);\n\tvoid loadParams(void);\n\n};\n\n#endif" }, { "alpha_fraction": 0.5051664710044861, "alphanum_fraction": 0.5373134613037109, "avg_line_length": 19.279069900512695, "blob_id": "0706b5dda19e6b3d7ad474c3066d726658a95a50", "content_id": "9a72de3ca222a0346d1b4c97e74c2481e7795733", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 871, "license_type": "no_license", "max_line_length": 75, "num_lines": 43, "path": "/stevens_pier_project/mapClasses/src/utils.cpp", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "#include \"mapClasses/utils/utils.h\"\n\nnamespace utils{\n\n\tfloat dotVec(std::vector<float> a, std::vector<float> b){\n\t\treturn a[0]*b[0] + a[1]*b[1];\n\t}\n\n\tfloat lenVec(std::vector<float> a){\n\t\treturn std::sqrt(a[0]*a[0] + a[1]*a[1]);\n\t}\n\n\t// void calcMeanVar(const std::vector<float> vec, float &ave, float &var){\n\n\t// \tfloat cnt = 0.0f;\n\t// \tfloat sum = 0.0f;\n\t// \tfloat cnt2= 0.0f;\n\n\t// \tfor(size_t i=0; i < vec.size(); ++i){\n\t// \t\tcnt += vec.at(i);\n\t// \t\tsum += i*vec.at(i);\n\t// \t\tcnt2 += i*i*vec.at(i);\n\t// \t}\n\t// \tave = sum/cnt;\n\t// \tvar = cnt2/cnt - ave*ave;\n\t// }\n\n\tvoid calcMeanVar(const std::vector<float> vec, float &ave, float &var){\n\n\t\tfloat cnt = 0.0f;\n\t\tfloat sum = 0.0f;\n\t\tfloat cnt2= 0.0f;\n\n\t\tfor(size_t i=0; i < vec.size(); ++i){\n\t\t\tcnt += vec.at(i);\n\t\t\tsum += i*vec.at(i);\n\t\t\tcnt2 += i*i*vec.at(i);\n\t\t}\n\t\tave = sum/cnt;\n\t\tvar = cnt2/cnt - ave*ave;\n\t}\n\n}" }, { "alpha_fraction": 0.5537325739860535, "alphanum_fraction": 0.6164889335632324, "avg_line_length": 28.1434268951416, "blob_id": "59dc73c13f3e5c9515d86916896e5282a80cbcf6", "content_id": "b2a3a774e0e5d7fb288293037d704bd2dbb805c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7314, "license_type": "no_license", "max_line_length": 204, "num_lines": 251, "path": "/stevens_pier_project/mapping/py/pillarPlot.py", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport pickle\nimport pylab\nimport random\nimport os.path\nimport csv\nfrom pylab import figure, show, rand\nfrom matplotlib.patches import Ellipse, Circle\nfrom scipy.stats import chi2\nfrom scipy.optimize import curve_fit\n# \nfile = 'pillar00-new'\nfile2 = 'pillar-test'\nsaveName = 'hmmm'\nshowPath = False\nshowGrid = True\n\ntableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120), \n (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150), \n (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148), \n (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199), \n (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)] \n \n# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts. \nfor i in range(len(tableau20)): \n r, g, b = tableau20[i] \n tableau20[i] = (r / 255., g / 255., b / 255.)\n\ndef rad2deg(rad):\n\treturn rad/math.pi*180.0\n\ndef confidenceEllipsFromPillar(pillar):\n\tpos = np.array(pillar).T\n\tcovar = np.cov(pos)\n\tw,v = np.linalg.eig(covar)\n\tif w[0] < w[1]:\n\t\tw[0], w[1] = w[1], w[0]\n\t\tv[0], v[1] = v[1], v[0]\n\talpha = -rad2deg(math.atan2(v[0][1], v[0][0]))\n\tsize = [2*math.sqrt(5.991*x) for x in w]\n\treturn [size[0], size[1], alpha]\n\ndef ellipseFromConfidence(xy, para):\n\tell = Ellipse(xy, para[0], para[1], para[2])\n\tell.set_clip_box(ax.bbox)\n\tell.set_alpha(0.2)\n\tell.set_facecolor(tableau20[0])\n\treturn ell\n\ndef rmsPillar(pillar):\n\tarr = np.array(pillar).T\n\tarr = arr - arr.mean(axis=1).reshape(2,1)\n\treturn math.sqrt(np.power(arr,2).sum(axis=0).mean())\n\ndef circleFromRMS(xy, rad):\n\tcir = Circle(xy, rad)\n\tcir.set_clip_box(ax.bbox)\n\tcir.set_alpha(0.2)\n\tcir.set_facecolor(tableau20[0])\n\treturn cir\n\ndef meanPillar(pillar):\n\treturn np.array(pillar).T.mean(axis=1)\n\n\t# return [sum(x)/float(len(x)) for x in zip(*pillar)]\n\ndef distToLine(A,B,C,x,y):\n\treturn abs(A*x+B*y+C)/math.sqrt(A**2+B**2)\n\n\n\n\ncurrent_directory = os.path.dirname(__file__)\nparent_directory = os.path.split(current_directory)[0]\nfile_path = os.path.join(parent_directory, 'savedData/')\nf = open(file_path + file + '.txt', 'rw')\n\ncsv_out = open(file_path + file + '.csv', 'wb')\ncsvf = csv.writer(csv_out)\npillars = []\nfeatures = []\nmode = 0\nfor line in f:\n\tif \"PILLARS\" in line:\n\t\tmode = 0\n\t\tcontinue\n\telif \"FEATURES\" in line:\n\t\tmode = 1\n\t\tcontinue\n\n\tif mode == 0:\n\t\tif \"Pillar\" in line:\n\t\t\tpillars.append([])\n\t\telse:\n\t\t\ttmpList = line.rstrip().split(',')\n\t\t\tprint tmpList\n\t\t\tpillars[-1].append([float(tmpList[0]), -float(tmpList[1])])\n\telif mode == 1:\n\t\tif \"Feature\" in line:\n\t\t\tfeatures.append([])\n\t\telse:\n\t\t\ttmpList = line.rstrip().split(',')\n\t\t\tprint tmpList\n\t\t\tfeatures[-1].append([float(tmpList[0]), -float(tmpList[1])])\n\t# print(\"a\", line)\n# print pillars\n\n\nstat = []\n\t\nfig = figure()\nax = fig.add_subplot(111, aspect='equal')\nfor i in range(len(pillars)):\n\tplt.scatter(*zip(*pillars[i]), color=tableau20[i+3%20], marker=\"o\", s=50)\n\t# plt.scatter(*zip(*features[i]), color=tableau20[i+3%20], marker=\"x\", s=20, alpha=0.3)\n\n\nmeanPillars = []\nfor pillar in pillars:\n\tl = meanPillar(pillar)\n\t# print l\n\tmeanPillars.append(l)\n\tstat.append([rmsPillar(pillar)])\n\tstat[-1].extend(confidenceEllipsFromPillar(pillar)[0:2])\n\tplt.scatter(l[0], l[1], s=40)\n\tax.add_artist(ellipseFromConfidence(l,confidenceEllipsFromPillar(pillar)))\n\tax.add_artist(circleFromRMS(l, rmsPillar(pillar)))\nplt.gca().set_aspect('equal', adjustable='box')\n\n# csvf.writerow([\"Pillar\" ,\"Hits\", \"RMS (m)\", \"Ellipse x(m)\", \"Ellipse y(m)\"])\nfor i in range(len(stat)):\n\tprint '{}{}{}'.format(\"pillar \", i+1,\":\"), '{}{}'.format(\"# \", len(pillars[i])), '{}{}'.format(\"rms: \", stat[i][0]), '{}{}'.format(\"ellipse: \", stat[i][1:3])\n\t# print '{}'.format(len(pillars[i])), '{}{}'.format(\",\", stat[i][0]), '{}{}{}{}'.format(\",\", stat[i][1], \",\", stat[i][2])\n\tcsvf.writerow([str(i+1) ,len(pillars[i]), stat[i][0], stat[i][1], stat[i][2]])\n\n# Find average values for statistics\nmeanStat = np.array(stat).T.mean(axis=1)\n\nrms = 0;\nellipse1 = 0\nellipse2 = 0\npillarcount = 0\nfor i in range(0,9):\n\t# print stat[i][0], len(pillars[i])\n\tpillarcount += len(pillars[i])\n\trms += stat[i][0] * len(pillars[i])\n\tellipse1 += stat[i][1] * len(pillars[i])\n\tellipse2 += stat[i][2] * len(pillars[i])\n\n\nnumberPillars = sum([len(pillar) for pillar in pillars])/float(len(pillars))\n\nprint '{}'.format(\"Average: \"), '{}{}'.format(\"# \", numberPillars), '{}{}'.format(\"rms: \", rms/float(pillarcount)), '{}{}'.format(\"ellipse: \", [ellipse1/float(pillarcount), ellipse2/float(pillarcount)])\n# print '{}'.format(numberPillars), '{}{}'.format(\",\", numberPillars), '{}{}{}{}'.format(\",\", meanStat[1],\",\",meanStat[2])\ncsvf.writerow([\"Average\", numberPillars, rms/float(pillarcount), ellipse1/float(pillarcount), ellipse2/float(pillarcount)])\n\n\na = []\nb = []\nif showPath:\n\tprint \"hahahhahah\"\n\tprint file_path + file2 + \"-path.txt\"\n\tf = open(file_path + file2 + \"-path.txt\", 'r')\n\tfor line in f:\n\t\tc = line.split(',')\n\t\ta.append(float(c[0]))\n\t\tb.append(-float(c[1]))\n\t\tprint line\n\nplt.plot(a,b,color=tableau20[14])\n\nprint \"rms mean: \", rms/pillarcount\n# plt.title(file)\nax.set_xlabel('x(m)')\nax.set_ylabel('y(m)')\nax.set_xlim(-5, 23)\nax.set_ylim(-2, 4)\nax.set(aspect=1, adjustable='box-forced')\n# plt.savefig(file_path + file2 + '.png', bbox_inches='tight', dpi = 250)\n# plt.show()\n\n\n\nif showGrid:\n\tdef f(x, A, B): # this is your 'straight line' y=f(x)\n\t return A*x + B\n\n\txl = []\n\tyl = []\n\txh = []\n\tyh = []\n\n\t# Edit to make sure xl and yl are filled with the bottom row of pillars and that xh and yh with upper row\n\tfor i in range(0,len(meanPillars)):\n\t\tif i==1 or i==3 or i==5 or i==8:\n\t\t\txl.append(meanPillars[i][0])\n\t\t\tyl.append(meanPillars[i][1])\n\t\tif i==0 or i==4 or i==7:\n\t\t\txh.append(meanPillars[i][0])\n\t\t\tyh.append(meanPillars[i][1])\n\n\n\t# Draw grid through the pillars\n\t# Lower pillars\n\tA,B = np.polyfit(xl, yl, 1)\n\tplt.plot([-5,23],[f(-5,A,B), f(23,A,B)], '--', color=tableau20[14], zorder=1000)\n\n\t# Higer pillars\n\tdist = 0\n\tfor i in range(0,len(xh)):\n\t\tdist += distToLine(A,-1,B,xh[i],yh[i])\n\tdist /= 3.0\n\t# print \"dist:\", dist\n\tdist = dist/math.cos(math.atan2(A,1))\n\tplt.plot([-5,23],[f(-5,A,B+dist), f(23,A,B+dist)], '--', color=tableau20[14], zorder=1000)\n\n\n\tspacings = []\n\t# horizontal lines, find best spacing\n\tfor i in range(0,len(xl)-1):\n\t\tspacings.append(math.sqrt((xl[i+1]-xl[i])**2 + (yl[i+1]-yl[i])**2))\n\n\tspacings = sum(spacings)/float(len(spacings))\n\tspacings = spacings/math.cos(math.atan2(-A,1))\n\tB = xl[0]+A*yl[0]\n\tplt.plot([f(-2,-A,B), f(4,-A,B)], [-2,4], '--', color=tableau20[14], zorder=1000)\n\tB = B+spacings\n\tplt.plot([f(-2,-A,B), f(4,-A,B)], [-2,4], '--', color=tableau20[14], zorder=1000)\n\tB = B+spacings\n\tplt.plot([f(-2,-A,B), f(4,-A,B)], [-2,4], '--', color=tableau20[14], zorder=1000)\n\tB = B+spacings\n\tplt.plot([f(-2,-A,B), f(4,-A,B)], [-2,4], '--', color=tableau20[14], zorder=1000)\n\n\tprint xl[0], yl[0]\n\n\tprint \"grid size:\", spacings, dist\n\n\n# fig = figure()\n# ax = fig.add_subplot(111, aspect='equal')\n# ax.set_xlabel('x(m)')\n# ax.set_ylabel('y(m)')\n# ax.set_xlim(-5, 23)\n# ax.set_ylim(-2, 4)\n# for i in range(0,len(meanPillars)):\n# \talph = i/10.0+0.05\n\t# if i==1 or i==3 or i==5 or i==8:\n\t\t# plt.scatter(meanPillars[i][0], meanPillars[i][1],color='b')\nplt.show()" }, { "alpha_fraction": 0.5224833488464355, "alphanum_fraction": 0.5390500426292419, "avg_line_length": 36.81269454956055, "blob_id": "eaafa4e86893090be7a4e5d68c3877a3d08be17a", "content_id": "052bc5b0d2d6ab421c05f6c6dbf37af68cb0e8a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 36338, "license_type": "no_license", "max_line_length": 169, "num_lines": 961, "path": "/stevens_pier_project/mapClasses/src/map.cpp", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "#include \"mapClasses/map/map.h\"\n#include \"mapClasses/utils/utils.h\"\n\n#include <pcl/registration/icp.h>\n#include <pcl/filters/passthrough.h>\n#include <pcl/filters/extract_indices.h>\n#include <pcl/filters/filter_indices.h>\n#include <eigen_conversions/eigen_msg.h>\n#include <geometry_msgs/TransformStamped.h>\n#include <geometry_msgs/Pose2D.h>\n#include <pcl/registration/transformation_estimation_point_to_plane.h>\n#include <math.h>\n#include <vector>\n\nusing namespace utils::params;\n\nvoid sonarMap::initialize(ros::NodeHandle *nh){\n map = PointCloudT::Ptr (new PointCloudT());\n mapWalls = PointCloudTNormal::Ptr (new PointCloudTNormal());\n nh_ = nh;\n setParameters();\n\n // Publish to outputTransform\n\t\t// In this case the topic is : /mapBuilder/trans_update\n\t\t// a message of type geometry_msgs::Transform\n pubTransform_ = nh_->advertise<geometry_msgs::Transform>(par_.outputTransform, 1);\n\n // Publish to outputMapWalls\n\t\t// In this case the topic is : /mapBuilder/map_walls\n\t\t// a message of type PointCloudTNormal\n pubMapWalls_ = nh_->advertise<PointCloudTNormal>(par_.outputMapWalls, 1);\n\n // Publish to outputMapWallsAligned\n\t\t// In this case the topic is : /mapBuilder/map_walls_aligned\n\t\t// a message of type PointCloudTNormal\n pubMapWallsAligned_ = nh_->advertise<PointCloudTNormal>(par_.outputMapWallsAligned, 1);\n \n\n aligned_tmp = PointCloudT::Ptr (new PointCloudT());\n aligned_tmp->header.frame_id=\"odom\";\n features = PointCloudT::Ptr (new PointCloudT());\n pillars = PointCloudT::Ptr (new PointCloudT());\n pillars->header.frame_id=\"odom\";\n features->header.frame_id=\"odom\";\n}\n\nbool sonarMap::newScan(PointCloudT::Ptr beam, std::vector<int> & TdataAss, std::vector<int> & erasedId,const int type){\n\n if(type == 1){\n return sonarMap::newWallScan(beam);\n } else if(type == 2){\n return sonarMap::newPillarScan(beam, TdataAss, erasedId);\n }\n return false;\n}\n\nbool sonarMap::newWallScan(PointCloudT::Ptr beam){\n // TODO add feature as param to functions.\n // Do this like this for now since feature is a global variable accessed by multiple functions.\n features->clear();\n *features = *beam;\n features->header.frame_id = \"odom\";\n\n pcl::PointCloud<pcl::Normal> normals;\n normals.resize(beam->width * beam->height);\n PointCloudTNormal::Ptr beamNormal (new PointCloudTNormal());\n pcl::concatenateFields(*beam, normals, *beamNormal);\n // Check to see if the map has been properly initialized.\n\n \n if(!sonarMap::intializeMapWall(beamNormal, mapWalls, 15)) return false;\n\n // create a kdtree over the wall map\n // find the beam neighbors on map\n pcl::KdTreeFLANN<PointTNormal> kdtree;\n kdtree.setInputCloud(mapWalls);\n\n std::vector<int> pidx;\n std::vector<float> psd;\n PointCloudTNormal::Ptr filtered_beamNormal (new PointCloudTNormal());\n for(size_t i = 0; i < beam->points.size(); ++i){\n if(kdtree.radiusSearch(beamNormal->points.at(i), 5.0, pidx, psd) > 0){\n // if(features->points.size() <= 2){\n if(pidx.size() > 0){\n filtered_beamNormal->points.push_back(beamNormal->points.at(i));\n }\n }\n }\n\n PointCloudTNormal::Ptr aligned (new PointCloudTNormal());\n\n typedef pcl::registration::TransformationEstimationPointToPlane<PointTNormal, PointTNormal> te;\n boost::shared_ptr<te> teSVDscale (new te);\n\n pcl::IterativeClosestPoint<PointTNormal, PointTNormal> icp;\n // icp.setTransformationEstimation(teSVDscale);\n icp.setInputSource(filtered_beamNormal);\n icp.setInputTarget(mapWalls);\n icp.setMaxCorrespondenceDistance (5.0);\n icp.setRANSACOutlierRejectionThreshold(0.5);\n icp.setRANSACIterations(100);\n icp.setMaximumIterations(1000);\n icp.setTransformationEpsilon(1e-3);;\n // icp.setMaximumIterations(500);\n // icp.setTransformationEpsilon (1e-8);\n // icp.setEuclideanFitnessEpsilon (1.0);\n\n //estimates the transformation and returns the transformed source (input) as output.\n icp.align(*aligned);\n\n if(!(icp.getFitnessScore() < 1.5 && icp.hasConverged())){\n return false;\n }\n\n\n Eigen::Affine3f transform;\n transform = icp.getFinalTransformation();\n pcl::transformPointCloud (*beamNormal, *aligned, transform);\n\n if(!sonarMap::publishWallTransform(transform)){\n std::cout << \"Aligned, but transformation to big.....\" << std::endl;\n return false;\n }\n\n for(int i = 0; i < aligned->points.size(); ++i)\n {\n if(kdtree.radiusSearch(aligned->points.at(i), 2.0, pidx, psd) == 0){\n mapWalls->points.push_back(aligned->points.at(i));\n }\n }\n\n pubMapWalls_.publish(mapWalls);\n pubMapWallsAligned_.publish(aligned);\n return true;\n}\n\n\n\n\n\nbool sonarMap::newPillarScan(PointCloudT::Ptr beam, std::vector<int> & TdataAss,std::vector<int> & erasedId){\n\n features->clear();\n *features = *beam;\n features->header.frame_id = \"odom\";\n\n /* Check to see if the map has been properly initialized */\n if(!sonarMap::intializeMapPillar(features, map, 3,1.0)) return false;\n\n\n/* Data fitering for aligning */\n /* Passthrough Filter, only align to map features with intensity higher than 2.0 */\n PointCloudT::Ptr filtered_map (new PointCloudT());\n sonarMap::passThroughFilter(map, filtered_map, 2.0);\n /* Create a kd-tree from the filtered map */\n pcl::KdTreeFLANN<PointT> kdtree;\n kdtree.setInputCloud(filtered_map);\n /* Radius search variables */\n std::vector<int> pidx ; // the resultant indices of the neighboring points \n std::vector<float> psd ; // the resultant squared distances to the neighboring points\n double radius = 5.0 ; // the radius of the sphere bounding all of p_q's neighbors \n\n /* Filter out points far away from the map and only use those who are close for aligning */\n PointCloudT::Ptr filtered_scan(new PointCloudT()) ; // scan point who are are close enough \n for(size_t i = 0; i < features->points.size(); ++i){\n if(kdtree.radiusSearch(features->points.at(i), radius, pidx, psd) > 0){\n if(pidx.size() > 0){\n filtered_scan->points.push_back(features->points.at(i));\n }\n }\n }\n/* .................................................................... */\n\n // ICP variables\n Eigen::Affine3f transform ; // Eigen transformation between filtered_map et filtered_scan \n PointCloudT aligned ; // filtered_scan transformed according to the transformation find by ICP \n\n if(filtered_scan->points.size() > 2){\n /* Align filtered_scan (source) to filtered_map (target)*/ \n if(icpAlign(filtered_map, filtered_scan, kdtree, aligned, transform)){\n\n /* Applied the transformation find by the ICP in all the feaure point*/\n pcl::transformPointCloud (*features, *aligned_tmp, transform);\n /* dataAssociation between the map and the aligned feature */\n sonarMap::dataAssociation(aligned_tmp,TdataAss,erasedId);\n /* piblication of the transformation to update the kalman */\n sonarMap::publishSonarTransform(transform);\n\n return true;\n }\n } else if(filtered_scan->points.size() > 1){\n if(sonarMap::matchTwoPoints(filtered_map, filtered_scan, kdtree, transform)){\n pcl::transformPointCloud (*features, *aligned_tmp, transform);\n if(std::abs(transform.matrix()(0,3)) > 1.5 || std::abs(transform.matrix()(1,3)) > 1.5){\n // std::cout << \"hahahahahahha\" << std::endl;\n return false;\n }\n ROS_ERROR(\"TWO POINTS\");\n sonarMap::dataAssociation(aligned_tmp,TdataAss, erasedId);\n sonarMap::publishSonarTransform(transform);\n return true;\n }\n } else {\n // ROS_ERROR(\"LESS THAN TWO POINTS\");\n }\n aligned_tmp->clear();\n return false;\n}\n\n\nvoid sonarMap::dataAssociation(PointCloudT::Ptr aligned, std::vector<int> & TdataAss, std::vector<int> & erasedId ){\n\n //* Create a kd-tree from map \n pcl::KdTreeFLANN<PointT> kdtree5;\n kdtree5.setInputCloud(map);\n //* K nearest search variables \n int K = 5; // the number of neighbors to search for \n std::vector<int> pidx(K); // the resultant indices of the K neighboring points\n std::vector<float> psq(K); // the resultant squared distances to the K neighboring points \n int pidxSize; // number of neighbors found\n\n std::vector<int> p1; // aligned index\n std::vector<int> p2; // map index\n std::vector<float> dist; // Indexes to points not associated\n std::vector<int> left; // Indexes to points not associated\n\n std::vector <int>::iterator it;\n int nPosition;\n int idx; // id of the closest point \n int idp2;\n\n\n\n\n\n bool end = false;\n int cptTmp = 0 ;\n for(size_t i = 0; i < aligned->points.size(); ++i){\n pidxSize = kdtree5.nearestKSearch(aligned->points.at(i), K, pidx, psq);\n if(pidxSize > 0)\n {\n // Find the closest definite feature within certain distance\n idx = 0;\n for (int j = 0; j < pidxSize; ++j)\n {\n if(pidx[j] < map->points.size() && map->points.at(pidx[j]).intensity >= 2.0 && psq[j] < 1.5){\n idx = j;\n break;\n }\n }\n\n if(psq[idx] < 1.5 && pidx[idx] < map->points.size()){\n // Close enough to a current feature: ASSOCIATED \n it = find(p2.begin(), p2.end(), pidx[idx]);\n if (it != p2.end()){\n nPosition = distance(p2.begin(), it);\n // left.push_back(p1.at(nPosition));\n if(psq[idx] < dist.at(nPosition)){\n p1.at(nPosition) = i;\n dist.at(nPosition) = psq[idx];\n }\n } else {\n p1.push_back(i);\n p2.push_back(pidx[idx]);\n dist.push_back(psq[idx]);\n }\n \n } else {\n // Probably a new feature\n left.push_back(i);\n }\n\n }\n }\n // for(size_t i = 0; i < aligned->points.size(); ++i){\n \n // pidxSize = kdtree5.nearestKSearch(aligned->points.at(i), K, pidx, psq);\n\n // do{\n\n // //* if neighbors finded \n // if(pidxSize > 0)\n // {\n // //* Find the closest definite feature within certain distance\n // idx = 0;\n // for (int j = 0; j < pidxSize; ++j)\n // {\n // //* keep the closest neighbors with an intensity less than 2.0 \n // if( map->points.at(pidx[j]).intensity >= 2.0 && psq[j] < 1.5 && pidx[j] < map->points.size() ){\n // idx = j;\n // break;\n // }\n // }\n\n \n \n\n // //* Close enough to a current feature: ASSOCIATED\n // if(psq[idx] < 1.5 && pidx[idx] < map->points.size() && cptTmp != pidxSize+1)\n // {\n\n // //* try to find the value of pidx[idx] in the range (p2.begin(), p2.end())\n // //* return an iterator is the value is finded and the last position of the range otherwise\n // it = find(p2.begin(), p2.end(), pidx[idx]);\n \n // //* index find : Feature already associated \n // if (it != p2.end() ){\n\n // nPosition = distance(p2.begin(), it);\n // // if new associate is closest than then the previous, the association is changed \n // if(psq[idx] < dist.at(nPosition)){\n // p1.at(nPosition) = i;\n // dist.at(nPosition) = psq[idx];\n // end = true;\n \n // } else {\n // cptTmp++;\n // }\n // } else {\n\n // //* Association aligned point i and map point pidx[idx] with the sqr distance psq[idx]\n // p1.push_back(i);\n // p2.push_back(pidx[idx]);\n // dist.push_back(psq[idx]);\n // end = true;\n // }\n\n // } else {\n // // Probably a new feature\n // left.push_back(i);\n // end = true;\n // }\n \n\n // }\n // }\n // while(end==false);\n // }\n\n std::vector<int> tmp (aligned->points.size(),-3);\n TdataAss.clear();\n TdataAss = tmp ;\n for (int i = 0; i < p1.size(); ++i)\n {\n TdataAss.at(p1[i]) = p2[i];\n }\n for (int i = 0; i < left.size(); ++i)\n {\n TdataAss.at(left[i]) = -1;\n }\n\n\n \n\n \n/* .................................................................... */\n\n //* Change the intensity of the map point associated : increase certainty detected points\n sonarMap::changeIntensity(0.0, 4.0, 0.2, p2);\n\n //* Decrease certainty detected points \n //* Create dtree from aligned \n pcl::KdTreeFLANN<PointT> kdtree6;\n kdtree6.setInputCloud(aligned);\n std::vector<int> pointIdxRadiusSearch; // the resultant indices of the neighboring points \n std::vector<float> pointRadiusSquaredDistance; // the resultant squared distances to the neighboring points\n\n for(size_t i = 0; i < map->points.size(); ++i){\n try{\n if(map->points.at(i).intensity < 2.0){ \n if ( kdtree6.radiusSearch (map->points.at(i), 5.0, pointIdxRadiusSearch, pointRadiusSquaredDistance) > 0 ){\n map->points.at(i).intensity -= 0.1;\n }\n }\n }\n catch(int e){\n // ROS_WARN(\"OUT OF BOUNDS\");\n // std::cout << \"i: \" << i << \"size: \" << map->points.size() << std::endl;\n // ROS_WARN(\"OUT OF BOUNDS\");\n }\n }\n\n\n/* .................................................................... */\n\n/* Add new points */ \n/* detail : add left points : aligned point without map corresponding */\n // Add new points\n // std::cout << \"new points\" << std::endl;\n for(size_t i = 0; i < left.size(); ++i){\n aligned->points.at(left.at(i)).intensity = 1.0;\n map->points.push_back(aligned->points.at(left.at(i)));\n\n PointCloudT::Ptr tmp (new PointCloudT());\n tmp->points.push_back(aligned->points.at(left.at(i)));\n pillarPoints.push_back(tmp);\n\n PointCloudT::Ptr tmp2 (new PointCloudT());\n tmp2->points.push_back(features->points.at(left.at(i)));\n featurePoints.push_back(tmp2);\n\n featureCount.push_back(1);\n }\n/* .................................................................... */\n\n\n/* update the map points according to theirs corresponding points */\n/* detail : update the time number where the feature have been seen */ \n for(size_t i = 0; i < p2.size(); ++i){\n\n if(p2[i] >= map->points.size() && map->points[p2[i]].intensity < 2.0){\n continue;\n }\n featureCount[p2[i]]++; \n pillarPoints[p2[i]]->points.push_back(aligned->points.at(p1[i]));\n featurePoints[p2[i]]->points.push_back(features->points.at(p1[i]));\n map->points.at(p2[i]).x = (map->points.at(p2[i]).x*(featureCount[p2[i]]-1)+aligned->points.at(p1[i]).x) / featureCount[p2[i]];\n map->points.at(p2[i]).y = (map->points.at(p2[i]).y*(featureCount[p2[i]]-1)+aligned->points.at(p1[i]).y) / featureCount[p2[i]];\n }\n // std::cout << \"finished updating featur\" << std::endl;\n/* .................................................................... */\n\n\n/* ..................................................................................................................................................... */\n\n\n // std::cout << \"pillars\" << std::endl;\n // add pillars estimated outlines\n tf::StampedTransform transform;\n try{\n listener_.lookupTransform(\"/odom\", \"/body\", ros::Time(0), transform);\n }\n catch (tf::TransformException ex){\n std::cout << \"exception: \" << ex.what() << std::endl;\n }\n float theta;\n PointT p;\n for(size_t i = 0; i < p1.size(); ++i){\n if(p2[i] < map->points.size() && map->points[p2[i]].intensity >= 2.0){\n theta = atan2(aligned->points.at(p1.at(i)).y - transform.getOrigin().y(), aligned->points.at(p1.at(i)).x - transform.getOrigin().x());\n p.x = aligned->points.at(p1.at(i)).x - cos(theta)*0.0;\n p.y = aligned->points.at(p1.at(i)).y - sin(theta)*0.0;\n pillars->points.push_back(p);\n }\n }\n sonarMap::pruneMap(erasedId);\n/* ..................................................................................................................................................... */\n\n}\n\n\n\n\n\n\n\n\n\n\nbool sonarMap::intializeMapPillar(const PointCloudT::Ptr scan, PointCloudT::Ptr mapPtr, const int size, int sqr_distance){\n if(mapPtr->points.size() >= size) return true;\n\n // check if the distance between the new points and the map points : here less than 1.O \n bool toClose;\n for(size_t i = 0; i < scan->points.size(); ++i){\n toClose = false;\n for(size_t j = 0; j < mapPtr->points.size(); ++j){\n if(utils::Distance(mapPtr->points.at(j), scan->points.at(i)) < sqr_distance){\n toClose = true;\n }\n }\n // if the point is further than all the map points, we can add it on the map \n if(!toClose){ \n /* Add new point to map with big intensity */\n mapPtr->points.push_back(scan->points.at(i));\n mapPtr->points.back().intensity = 4.0;\n\n /* Add new point on the pillarPoins */\n PointCloudT::Ptr tmp (new PointCloudT());\n tmp->points.push_back(scan->points.at(i));\n pillarPoints.push_back(tmp);\n\n /* Add new point on the featurePoints */\n PointCloudT::Ptr tmp2 (new PointCloudT());\n *tmp2 += *tmp; \n featurePoints.push_back(tmp2);\n\n /* the new point has been seen 1 time for the moment */\n featureCount.push_back(1);\n }\n }\n mapPtr->header.frame_id = scan->header.frame_id;\n\n // Check to see if map is initialized now\n if(mapPtr->points.size() >= size){\n return true;\n } \n \n return false;\n}\n\n\n\n\n\n\n\n\n\n\n// add the new point on the map\nbool sonarMap::intializeMapWall(const PointCloudTNormal::Ptr scan, PointCloudTNormal::Ptr mapPtr, const int size){\n if(mapPtr->points.size() >= size) return true;\n\n // use for loops to check new points, ok due to small size.\n bool toClose;\n for(size_t i = 0; i < scan->points.size(); ++i){\n toClose = false;\n for(size_t j = 0; j < mapPtr->points.size(); ++j){\n if(utils::Distance(mapPtr->points.at(j), scan->points.at(i)) < 1.0){\n toClose = true;\n }\n }\n // if(!toClose && map->points.size() < 4){ // Add new point to map\n if(!toClose){ // Add new point to map\n mapPtr->points.push_back(scan->points.at(i));\n mapPtr->points.back().intensity = 4.0;\n }\n }\n mapPtr->header.frame_id = scan->header.frame_id;\n // Check to see if map is initialized now\n if(mapPtr->points.size() >= size){\n return true;\n }\n\n return false;\n}\n\nvoid sonarMap::passThroughFilter(const PointCloudT::Ptr scan, PointCloudT::Ptr filtered, const float lim_low){\n filtered->header = scan->header;\n for(size_t i = 0; i < scan->points.size(); ++i){\n if(scan->points.at(i).intensity >= lim_low)\n filtered->points.push_back(scan->points.at(i));\n }\n}\n\nbool sonarMap::icpAlign(const PointCloudT::Ptr map, const PointCloudT::Ptr scan, const pcl::KdTreeFLANN<PointT> &kdtree, PointCloudT &aligned, Eigen::Affine3f &trans){\n pcl::IterativeClosestPoint<PointT, PointT> icp;\n icp.setInputSource(scan);\n icp.setInputTarget(map);\n icp.setMaxCorrespondenceDistance (5.0);\n // icp.setRANSACOutlierRejectionThreshold (1.0);\n // icp.setMaximumIterations(500);\n // icp.setTransformationEpsilon (1e-8);\n icp.setEuclideanFitnessEpsilon (1.0);\n icp.align(aligned);\n\n // align point : successful\n if(icp.getFitnessScore() < 1.0 && icp.hasConverged()){\n trans = icp.getFinalTransformation();\n Eigen::Vector3f ea= trans.matrix().block<3,3>(0,0).eulerAngles(0, 1, 2);\n sonarMap::minimizeEuler(ea);\n if(std::abs(ea[2]) > M_PI/9){\n std::cout << \" ROTATION TO BIG\" << std::endl;\n return false;\n }\n\n // PointCloudT::Ptr tmp_aligned(&aligned);\n // double free or corruption error\n // Stupid, use for loop to copy to PointCloud::Ptr\n PointCloudT::Ptr tmp_aligned(new PointCloudT());\n for(size_t i = 0; i < aligned.points.size(); ++i){\n tmp_aligned->points.push_back(aligned.points[i]);\n }\n\n pcl::KdTreeFLANN<PointT> kdtree3;\n kdtree3.setInputCloud(tmp_aligned);\n\n // std::vector<int> pointIdxRadiusSearch;\n // std::vector<float> pointRadiusSquaredDistance;\n\n // std::vector<int> featureIdx(features->points.size());\n // int count = 0;\n // for(size_t i = 0; i < map->points.size(); ++i){\n // if ( kdtree3.radiusSearch (map->points.at(i), 3.0, pointIdxRadiusSearch, pointRadiusSquaredDistance) > 0 ){\n // if(featureIdx[pointIdxRadiusSearch[0]] == 0){\n // featureIdx[pointIdxRadiusSearch[0]] = 1;\n // count++;\n // }\n // }\n // }\n\n\n std::vector<int> pointIdxRadiusSearch;\n std::vector<float> pointRadiusSquaredDistance;\n std::vector<int> pIdxRS;\n std::vector<float> pRSD;\n\n std::vector<int> featureIdx(features->points.size());\n int count = 0;\n int featureNeighbours = 0;\n int mapNeighbours = 0;\n for(size_t i = 0; i < map->points.size(); ++i){\n // std::cout << \"i: \" << i << \" size: \" << map->points.size() << std::endl;\n featureNeighbours = kdtree3.radiusSearch (map->points.at(i), 2.0, pointIdxRadiusSearch, pointRadiusSquaredDistance);\n mapNeighbours = kdtree.radiusSearch (map->points.at(i), 4.0, pIdxRS, pRSD);\n if(featureNeighbours > 0){\n if(featureNeighbours-mapNeighbours > 0){\n featureIdx[pointIdxRadiusSearch[0]] = 1;\n count++;\n }\n else{\n for(size_t j = 0; j < featureNeighbours; ++j){\n // std::cout << \"hmmmmm: \" << j << \"max: \" << featureNeighbours << \"map\" << mapNeighbours << std::endl;\n featureIdx[pointIdxRadiusSearch[j]] = 1;\n }\n }\n }\n }\n\n\n if(count > 0){\n ROS_WARN(\"##############\");\n ROS_WARN(\"ALIGNING AGAIN\");\n ROS_WARN(\"##############\");\n // More than one detected feature has been aligned to the same map feature.\n // Need to remove and align again.\n PointCloudT::Ptr new_aligned (new PointCloudT());\n for(size_t i = 0; i < tmp_aligned->points.size(); ++i){\n if(featureIdx[i] == 1){\n new_aligned->points.push_back(tmp_aligned->points[i]);\n }\n }\n\n icp.setInputSource(new_aligned);\n icp.setInputTarget(map);\n icp.setMaxCorrespondenceDistance (3.0);\n // icp.setRANSACOutlierRejectionThreshold (1.0);\n // icp.setMaximumIterations(500);\n // icp.setTransformationEpsilon (1e-8);\n icp.setEuclideanFitnessEpsilon (1.0);\n icp.align(aligned);\n if(icp.getFitnessScore() < 1.0 && icp.hasConverged()){\n Eigen::Affine3f trans2;\n trans2 = icp.getFinalTransformation();\n trans = trans2 * trans;\n }\n }\n else{\n // std::cout << \"NOT aligned again\" << std::endl;\n }\n\n\n }\n\n // check that the transformation is sensible(does not have really small or really big values\n // Just put the values small and big enough\n for(int i = 0; i < trans.matrix().rows(); ++i){\n for(int j = 0; j < trans.matrix().cols(); ++j){\n if(trans.matrix()(i,j) != 0){\n if( std::abs(trans.matrix()(i,j)) < 1*std::pow(10,-20) || 1*std::pow(10,10) < std::abs(trans.matrix()(i,j)) ){\n // if( 10000000 < std::abs(trans.matrix()(i,j)) ){\n ROS_WARN(\"Nonsensical transformation\");\n // std::cout << trans.matrix() << std::endl;\n return false;\n }\n }\n }\n }\n return true;\n}\n\n\nvoid sonarMap::changeIntensity(float limLow, float limHigh, float change){\n for(size_t i = 0; i < map->points.size(); ++i){\n if(map->points.at(i).intensity >= limLow && map->points.at(i).intensity <= limHigh){\n map->points.at(i).intensity += change;\n }\n }\n}\n\nvoid sonarMap::changeIntensity(float limLow, float limHigh, float change, std::vector<int> idx){\n for(size_t i = 0; i < idx.size(); ++i){\n if(idx.at(i) >= map->points.size()){\n ROS_WARN(\"WHAT THE FUCKING FUCK\");\n break;\n }\n if(map->points.at(idx.at(i)).intensity >= limLow && map->points.at(idx.at(i)).intensity <= limHigh){\n map->points.at(idx.at(i)).intensity += change;\n }\n }\n}\n\nvoid sonarMap::pruneMap(std::vector<int> & erasedId ){\n int tmpi = map->points.size();\n int feat = 0;\n for(int i = 0; i < map->points.size(); ++i){\n if(map->points.at(i).intensity >= 2.0){\n feat++;\n }\n }\n pcl::PointIndices pi;\n pcl::PassThrough<PointT> pass(true);\n pass.setInputCloud(map);\n pass.setFilterFieldName(\"intensity\");\n pass.setFilterLimits(0.0, 30);\n //pass.setFilterLimitsNegative (true);\n pass.filter(*map);\n pass.getRemovedIndices(pi);\n\n int feat2 = 0;\n for(int i = 0; i < map->points.size(); ++i){\n if(map->points.at(i).intensity >= 2.0){\n feat2++;\n }\n }\n\n if(feat != feat2){\n ROS_WARN(\"############################################\");\n ROS_WARN(\"############################################\");\n ROS_WARN(\"FUCKING PASSTHROUGH\");\n ROS_WARN(\"############################################\");\n ROS_WARN(\"############################################\");\n }\n\n\n if(pi.indices.size() > 0){\n ROS_WARN(\"############################################\");\n std::cout << \"indices: \";\n }\n for(size_t i = pi.indices.size(); i-- > 0;){\n erasedId.push_back(pi.indices[i]);\n featureCount.erase(featureCount.begin() + pi.indices[i]);\n pillarPoints.erase(pillarPoints.begin() + pi.indices[i]);\n featurePoints.erase(featurePoints.begin() + pi.indices[i]);\n std::cout << pi.indices[i] << \", \";\n // featureCount.at(i) = -1;\n }\n if(pi.indices.size() > 0){\n std::cout << \"\" << std::endl;\n ROS_WARN(\"############################################\");\n }\n // featureCount.erase(remove(featureCount.begin(), featureCount.end(), -1), featureCount.end());\n}\n\nvoid sonarMap::minimizeEuler(Eigen::Vector3f &euler){\n if(euler[0] > M_PI/4){\n euler[0] = euler[0] - M_PI;\n euler[1] = -euler[1];\n euler[2] = euler[2] - M_PI;\n } else if(euler[0] < -M_PI/4){\n euler[0] = euler[0] + M_PI;\n euler[1] = -euler[1];\n euler[2] = euler[2] + M_PI;\n }\n if(euler[1] > M_PI/4){\n euler[0] = -euler[0];\n euler[1] = euler[1] - M_PI;\n euler[2] = -euler[2];\n } else if(euler[1] < -M_PI/4){\n euler[0] = -euler[0];\n euler[1] = euler[1] + M_PI;\n euler[2] = -euler[2];\n }\n if(euler[2] > M_PI)\n euler[2] = euler[2]-2*M_PI;\n if(euler[2] < -M_PI)\n euler[2] = euler[2]+2*M_PI;\n}\n\n\nbool sonarMap::matchTwoPoints(PointCloudT::Ptr map, PointCloudT::Ptr scan, const pcl::KdTreeFLANN<PointT> &kdtree, Eigen::Affine3f &trans){\n // ROS_ERROR(\"MatcheTwoPoints\");\n std::vector<int> pi1(2);\n std::vector<float> pd1(2);\n std::vector<int> pi2(2);\n std::vector<float> pd2(2);\n if(kdtree.nearestKSearch(scan->points.at(0), 2, pi1, pd1) > 0){\n if(kdtree.nearestKSearch(scan->points.at(1), 2, pi2, pd2) > 0){\n // get the distance between the two points from the scan\n float dstScannedP = utils::Distance(scan->points.at(0), scan->points.at(1));\n // Check to see of the closest points is the same for both scanned points.\n if(pi1[0] != pi2[0]){\n // The points are not closest to the same points, Check the distance between the found points\n float dstFoundP = utils::Distance(map->points.at(pi1[0]), map->points.at(pi2[0]));\n if(std::abs(dstFoundP - dstScannedP) < std::min(0.1*dstFoundP, 1.0)){\n // close to being the same distance between the points.\n // Find transformation between the points\n trans = sonarMap::transformationBetweenPairsOfPoints(map->points.at(pi1[0]), map->points.at(pi2[0]), scan->points.at(0), scan->points.at(1));\n return true;\n }\n }else{\n // Need to figure out if any other combination can be used (pi1[0],pi2[1]), (pi1[1],pi2[0])\n float d12 = utils::Distance(map->points.at(pi1[0]), map->points.at(pi2[1]));\n float d21 = utils::Distance(map->points.at(pi1[1]), map->points.at(pi2[0]));\n float combined1 = 0;\n float combined2 = 0;\n if(std::abs(d12 - dstScannedP) < 0.5){\n // Combination 12 is possible\n // Find distance between both Matches\n float p10 = utils::Distance(scan->points.at(0), map->points.at(pi1[0]));\n float p21 = utils::Distance(scan->points.at(1), map->points.at(pi2[1]));\n combined1 = p10 + p21 + std::abs(p10 - p21);\n\n if(std::abs(d21 - dstScannedP) < 0.5){\n // Combination 12 is possible\n // Find distance between both Matches\n float p11 = utils::Distance(scan->points.at(0), map->points.at(pi1[1]));\n float p20 = utils::Distance(scan->points.at(1), map->points.at(pi2[0]));\n combined2 = p11 + p20 + std::abs(p11 - p20);\n if(combined2 < combined1){\n trans = sonarMap::transformationBetweenPairsOfPoints(map->points.at(pi1[1]), map->points.at(pi2[0]), scan->points.at(0), scan->points.at(1));\n return true;\n }\n }\n\n trans = sonarMap::transformationBetweenPairsOfPoints(map->points.at(pi1[0]), map->points.at(pi2[1]), scan->points.at(0), scan->points.at(1));\n return true;\n }\n\n }\n }\n }\n return false;\n}\n\nEigen::Affine3f sonarMap::transformationBetweenPairsOfPoints(PointT &a1, PointT &a2, PointT &b1, PointT &b2){\n // get rotation and translation on xy plane\n\n // Find center of target\n float target_x = (a1.x + a2.x)/2;\n float target_y = (a1.y + a2.y)/2;\n\n // Find center of start\n float start_x = (b1.x + b2.x)/2;\n float start_y = (b1.y + b2.y)/2;\n\n // find ideal angle\n // float target_angle = std::atan2(a2.y-a1.y, a2.x-a1.x);\n // float start_angle = std::atan2(b2.y-b1.y, b2.x-b1.x);\n // float angleDiff = target_angle - start_angle;\n\n // Eigen::Affine3f transformT1 = Eigen::Affine3f::Identity();\n // transformT1.translation() << -start_x, -start_y, 0;\n // Eigen::Affine3f transformR = Eigen::Affine3f::Identity();\n // transformR.rotate(Eigen::AngleAxisf (angleDiff, Eigen::Vector3f::UnitZ()));\n Eigen::Affine3f transform = Eigen::Affine3f::Identity();\n transform.translation() << target_x-start_x, target_y-start_y, 0;\n\n return transform;\n // return transformT2*transformR*transformT1;\n}\n\nvoid sonarMap::publishSonarTransform(Eigen::Affine3f translation){\n\n tf::StampedTransform stampedTr;\n Eigen::Affine3d trans;\n trans.matrix() = translation.matrix().cast<double>();\n tf::transformEigenToTF(trans, stampedTr);\n\n geometry_msgs::Transform m;\n tf::transformEigenToMsg (trans, m);\n pubTransform_.publish(m);\n\n // tf::TransformBroadcaster br;\n // br.sendTransform(tf::StampedTransform(stampedTr, ros::Time::now(), \"/odom_sonar\", \"/odom\"));\n // sb.updateBuffer(translation);\n\n}\n\nbool sonarMap::publishWallTransform(Eigen::Affine3f translation){\n\n\n Eigen::Quaternionf q(translation.rotation());\n\n // Check that the rotation is not to exessive.\n Eigen::Vector3f ea = translation.rotation().eulerAngles(0, 1, 2);\n\n sonarMap::minimizeEuler(ea);\n // std::cout << \"Euler: \" << ea[0]/M_PI*180 << \" \" << ea[1]/M_PI*180 << \" \" << ea[2]/M_PI*180 << std::endl;\n if(std::abs(ea[2]) > M_PI/6) return false;\n if(translation.matrix()(0,3) > 3.0) return false;\n if(translation.matrix()(1,3) > 3.0) return false;\n if(translation.matrix()(2,3) > 3.0) return false;\n\n\n // // Get the current position and check translation\n // tf::StampedTransform transform;\n // try{\n // listener_.lookupTransform(\"/odom\", \"/body\", ros::Time(0), transform);\n // }\n // catch (tf::TransformException ex){\n // std::cout << \"exception: \" << ex.what() << std::endl;\n // }\n\n // Eigen::VectorXf v(4);\n // v(0) = transform.getOrigin().x();\n // v(1) = transform.getOrigin().y();\n // v(2) = transform.getOrigin().z();\n // v(3) = 1;\n // Eigen::VectorXf out(4);\n // out = translation.matrix()*v - v;\n // // std::cout << \"Translation: \" << out(0) << \" \" << out(1) << \" \" << out(2) << std::endl;\n\n // if(out.cwiseAbs().maxCoeff() > 5.0) return false;\n\n // // Lets find the new heading based on the translation and new heading\n // double roll, pitch, yaw;\n // tf::Matrix3x3(transform.getRotation()).getRPY(roll, pitch, yaw);\n // Eigen::Vector3f ea2;\n // ea2 << roll, pitch, yaw;\n // // std::cout << \"thetas \" << ea[2] << \" \" << ea2[2] << std::endl;\n // sonarMap::minimizeEuler(ea2);\n\n\n\n // geometry_msgs::Pose2D p2d;\n // p2d.x = out(0);\n // p2d.y = out(1);\n // p2d.theta = ea[2]-ea2[2];\n\n geometry_msgs::Transform m;\n m.rotation.x = q.x();\n m.rotation.y = q.y();\n m.rotation.z = q.z();\n m.rotation.w = q.w();\n\n m.translation.x = translation.matrix()(0,3);\n m.translation.y = translation.matrix()(1,3);\n m.translation.z = translation.matrix()(2,3);\n\n pubTransform_.publish(m);\n return true;\n\n}\n\nstd::vector<PointCloudT::Ptr> sonarMap::returnPillarPoints(void){\n std::vector<PointCloudT::Ptr> tmp;\n for(size_t i = 0; i < map->points.size(); ++i){\n if(map->points[i].intensity >= 2.0){\n tmp.push_back(pillarPoints[i]);\n }\n }\n return tmp;\n}\n\nstd::vector<PointCloudT::Ptr> sonarMap::returnFeaturePoints(void){\n std::vector<PointCloudT::Ptr> tmp;\n for(size_t i = 0; i < map->points.size(); ++i){\n if(map->points[i].intensity >= 2.0){\n tmp.push_back(featurePoints[i]);\n }\n }\n return tmp;\n}\n\nvoid sonarMap::setParameters(){\n par_.outputTransform = loadParam<string>(\"outputTransform\", nh_);\n par_.outputMapWalls = loadParam<string>(\"outputMapWalls\", nh_);\n par_.outputMapWallsAligned = loadParam<string>(\"outputMapWallsAligned\", nh_);\n}\n" }, { "alpha_fraction": 0.7324561476707458, "alphanum_fraction": 0.7390350699424744, "avg_line_length": 64.28571319580078, "blob_id": "b32c06331472323cfdbd3c470abf4ad48bdf8751", "content_id": "a184fb7842bf3fc5416a3aa8d03511e2698833e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 456, "license_type": "no_license", "max_line_length": 123, "num_lines": 7, "path": "/stevens_pier_project/move_videoray/catkin_generated/package.cmake", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "set(_CATKIN_CURRENT_PACKAGE \"move_videoray\")\nset(move_videoray_MAINTAINER \"nathaniel <[email protected]>\")\nset(move_videoray_DEPRECATED \"\")\nset(move_videoray_VERSION \"0.0.0\")\nset(move_videoray_BUILD_DEPENDS \"geometry_msgs\" \"pcl_conversions\" \"pcl_ros\" \"roscpp\" \"rospy\" \"sensor_msgs\" \"std_msgs\" \"tf\")\nset(move_videoray_RUN_DEPENDS \"geometry_msgs\" \"pcl_conversions\" \"pcl_ros\" \"roscpp\" \"rospy\" \"sensor_msgs\" \"std_msgs\" \"tf\")\nset(move_videoray_BUILDTOOL_DEPENDS \"catkin\")" }, { "alpha_fraction": 0.8169013857841492, "alphanum_fraction": 0.8169013857841492, "avg_line_length": 34.5, "blob_id": "213b23a820271da37d8eb62714d283349f15168e", "content_id": "6f3df4ec187f01098be5337d4bbd10a31f6a1d48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 71, "license_type": "no_license", "max_line_length": 51, "num_lines": 2, "path": "/README.md", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "# smarc_data_tools\nTools for processing data sets and sensor protocols\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6693121790885925, "avg_line_length": 20.05555534362793, "blob_id": "c72816ec32a3a76d6e0bcdacfd2f72c53664a085", "content_id": "a2c9780adda2c028f4d69156fb020c752b0cc232", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 378, "license_type": "no_license", "max_line_length": 91, "num_lines": 18, "path": "/stevens_pier_project/mapClasses/include/mapClasses/utils/utils.cpp", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "#include \"utils.h\"\n\nnamespace utils{\n\n\tbool listenForTransform(std::string from, std::string to, tf::StampedTransform transform){\n\t\t\n\t\ttf::TransformListener listener;\n\t\ttry{\n\t\t\tlistener.lookupTransform(from, to, ros::Time(0), transform);\n\t\t\treturn true;\n\t\t}\n\t\tcatch (tf::TransformException ex){\n\t\t\tstd::cout << \"exception: \" << ex.what() << std::endl;\n\t\t\treturn false;\n\t\t}\n\t}\n\n}" }, { "alpha_fraction": 0.7346938848495483, "alphanum_fraction": 0.7448979616165161, "avg_line_length": 38.20000076293945, "blob_id": "6fa442a2af865c139802a22dc1e3f0f9d5dd34bc", "content_id": "f244cb631ed017dec077d3853ee2c80a28ab8317", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 392, "license_type": "no_license", "max_line_length": 92, "num_lines": 10, "path": "/stevens_pier_project/mapClasses/CMakeFiles/mapclasses_generate_messages_py.dir/cmake_clean.cmake", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/mapclasses_generate_messages_py\"\n \"../../../devel/lib/python2.7/dist-packages/mapclasses/msg/_buff.py\"\n \"../../../devel/lib/python2.7/dist-packages/mapclasses/msg/__init__.py\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang)\n INCLUDE(CMakeFiles/mapclasses_generate_messages_py.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n" }, { "alpha_fraction": 0.6871351003646851, "alphanum_fraction": 0.6915643215179443, "avg_line_length": 28.742515563964844, "blob_id": "b4f2d689116c3e4c6c8077cb94785411899e4362", "content_id": "aa64985897825b1cc4445949e3ff0f91f4fd89f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4967, "license_type": "no_license", "max_line_length": 103, "num_lines": 167, "path": "/stevens_pier_project/mapping/src/beamClustering.cpp", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "//This program filters the map using algorithms in Probablist Robotics chapter 6\n#include <ros/ros.h>\n#include <vector>\n // PCL specific includes\n#include <pcl_ros/point_cloud.h>\n#include <pcl/common/transforms.h>\n#include <tf/transform_listener.h>\n#include <geometry_msgs/Twist.h>\n#include <pcl_conversions/pcl_conversions.h>\n#include <pcl/point_types.h>\n#include <pcl/PCLPointCloud2.h>\n#include <pcl_ros/transforms.h>\n#include <math.h>\n#include <pcl/kdtree/kdtree_flann.h>\n#include <pcl/filters/voxel_grid.h>\n#include <pcl/filters/radius_outlier_removal.h>\n#include <pcl/filters/passthrough.h>\n#include <pcl/filters/extract_indices.h>\n#include <pcl/segmentation/extract_clusters.h>\n#include \"utils/utils.h\"\n\n// #define inputSonarScan\t\"/beamFilter\"\n#define inputSonarScan\t\"/scanFilter/scanFilter\"\n#define outputSonarScan\t\"beamClustering\"\n\ntypedef pcl::PointXYZI PointT;\ntypedef pcl::PointCloud<PointT> PointCloudT;\n\nusing namespace std;\nusing namespace Eigen;\nusing namespace utils::params;\n\nclass beamClustering{\npublic:\n ros::NodeHandle nh;\nprivate:\n\tros::Subscriber sub;\n\tros::Publisher pub;\n\tstd::vector<PointCloudT::Ptr> clouds;\n\tstd::vector<int> clouds_count;\n\n\ttf::TransformListener listener;\n\n\tint bufferSize, scanSize;\n\tfloat radiusSearch, radiusMinNeighbours;\n\tfloat ecTolerance, ecMinClusterSize;\n\npublic:\n\tbeamClustering(){\n\t\tnh = ros::NodeHandle(\"~\");\n\t\tsub = nh.subscribe(inputSonarScan, 1, &beamClustering::manageBuffer, this);\n\t\tpub = nh.advertise<PointCloudT >(outputSonarScan, 1);\n\n\t\t// Parameters\t\t//\n\t\tscanSize = loadParam<int>(\"scanSize\", nh);\n\t\tbufferSize = loadParam<int>(\"bufferSize\", nh);;\n\t\tradiusSearch = loadParam<double>(\"radiusSearch\", nh);\n\t\tradiusMinNeighbours = loadParam<double>(\"radiusMinNeighbours\", nh);\n\t\tecTolerance = loadParam<double>(\"ecTolerance\", nh);\n\t\tecMinClusterSize = loadParam<double>(\"ecMinClusterSize\", nh);\n\n\t\t// Initialize vector containing all cloud buffers and vector containing\n\t\t// how many scans each vector holds\n\t\tfor(size_t i = 0; i < bufferSize; ++i){\n\t\t\tPointCloudT::Ptr cloud_tmp (new PointCloudT());\n\t\t\tclouds.push_back(cloud_tmp);\n\t\t\tclouds_count.push_back(-1);\n\t\t}\n\t}\n\n\tprivate:\n\n\t// Recieves a single sonarscan in odom frame and places it in buffers.\n\tvoid manageBuffer(PointCloudT::Ptr cloud_tr){\n\t\tPointCloudT::Ptr cloud (new PointCloudT() );\n\t\t//std::cout << \"frame_id : \" << cloud_tr->header.frame_id << std::endl;\n\t\tcloud_tr->header.frame_id = \"/sonar\";\n\t pcl_ros::transformPointCloud(\"/odom\", *cloud_tr, *cloud, listener );\n\n// filing buffer\n\t\tfor(size_t i = 0; i < clouds.size(); ++i){\n\t\t\tif( clouds_count.at(i) >= 0 || i == 0){ // has been initalized\n\t\t\t\t*clouds.at(i) += *cloud;\n\t\t\t\tclouds_count.at(i)++;\n\t\t\t} else if( clouds_count.at(i-1) >= scanSize/bufferSize ){ // Initialize it\n\t\t\t\t*clouds.at(i) += *cloud;\n\t\t\t\tclouds_count.at(i)++;\n\t\t\t} else { // Not time to initalize buffer\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\t// If buffer is full, process the data.\n\t\t\tif(clouds_count.at(i) >= scanSize){\n\t\t\t\tprocessBuffer(clouds.at(i));\n\t\t\t\tclouds.at(i)->points.clear();\n\t\t\t\tclouds_count.at(i) = 0;\n\t\t\t}\n\t\t}\n\t}\n\n\t// Takes in a buffer of sonarscans, removes outliers\n\t// and finds clusters and then the highest intensity within each cluster.\n\tvoid processBuffer(PointCloudT::Ptr cloud){\n\n\t\t// Start by removing noise/outliers using radius outlier removal.\n\t\tPointCloudT::Ptr cloud_fi (new PointCloudT());\n\n\t\tpcl::RadiusOutlierRemoval<PointT> outrem;\n\t // // build the filter\n\t // outrem.setInputCloud(cloud);\n\t // outrem.setRadiusSearch(radiusSearch);\n\t // outrem.setMinNeighborsInRadius(radiusMinNeighbours);\n\t // // apply filter\n\t // outrem.filter (*cloud_fi);\n\n\t\tcloud_fi = cloud;\n\n\t\t// Cluster the remaining data\n\t\tpcl::search::KdTree<PointT>::Ptr tree (new pcl::search::KdTree<PointT>);\n\t\ttree->setInputCloud (cloud_fi);\n\n\t\tstd::vector<pcl::PointIndices> cluster_indices;\n\t\tpcl::EuclideanClusterExtraction<PointT> ec;\n\t\tec.setClusterTolerance (ecTolerance); // 2cm\n\t\tec.setMinClusterSize (ecMinClusterSize);\n\t\tec.setMaxClusterSize (1000);\n\t\tec.setSearchMethod (tree);\n\t\tec.setInputCloud (cloud_fi);\n\t\tec.extract (cluster_indices);\n\n\t\tint j = 0, idx;\n\t\tPointT tmp_max;\n\t\tPointCloudT::Ptr cloud_cm (new PointCloudT());\n\t\tstd::vector<pcl::PointIndices>::const_iterator it;\n\t\tint i = 0;\n\t\tfor ( it = cluster_indices.begin (); it != cluster_indices.end (); ++it)\n\t\t{\n\t\t\tidx = 0;\n\t\t\ttmp_max.intensity = 0.0;\n\t\t\t// Find highest intensity point within each cluster.\n\t\t\tfor (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); ++pit){\n\t\t\t\tif(cloud_fi->points[*pit].intensity > tmp_max.intensity){\n\t\t\t\t\ttmp_max = cloud_fi->points[*pit];\n\t\t\t\t}\n\t\t\t}\n\t\t\tcloud_cm->points.push_back(tmp_max);\n\n\t\t}\n\t\tcloud_cm->header.frame_id = \"odom\";\n\t\tpub.publish(cloud_cm);\n\t}\n\n\n};\n\n\nint main (int argc, char** argv)\n{\n\tstd::cout << \"this\" << std::endl;\n\t// Initialize ROS\n\tros::init (argc, argv, \"beamClustering\");\n\n\tbeamClustering bc;\n\t// Spin\n\tros::spin ();\n\treturn(0);\n}\n" }, { "alpha_fraction": 0.6838799715042114, "alphanum_fraction": 0.7090020775794983, "avg_line_length": 34.775001525878906, "blob_id": "063f4d105bfe1ab016ebfc4315a686ecb57e3cf9", "content_id": "0e0d968c240129ddc3a5c6bbaf6b43c8f584017b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1433, "license_type": "no_license", "max_line_length": 114, "num_lines": 40, "path": "/stevens_pier_project/mapping/savedData/README.md", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "# SavedData\n This folder containes results of the filtering process code. \n You can find data collected after the scan matching slam \nand the graph-based slam. The pictures show the end map and \nrobot path but you can find more information in the Octave files. \n\n# Tree structure \n1. First level : name of the data set \nExample\n``` 2015-08-19-11-25-05 ```\n\n\n 2. Second level : pillar radius size \n Example\n ``` 02 ``` : Pillar radius of 0.2 m \n\n# File description \n* Data collected after the scan matching slam : \n``` pillar0X_path.txt ``` : Full 2D robot position (x,y).\n\n``` pillar0X.txt ``` : Map (i.e. landmark positiond) computed \nat each time of the scan matching.\n\n``` pillar02_P.png ``` : Picture of the result on [rviz](http://wiki.ros.org/rviz) where \nthe green line represent the robot path and the yellow points are \nthe map computed.\n\n* Data collected after the graph-based slam \n``` pillar02_OPTIM_B.g2o ``` : Back-end of the graph before the \noptimzation process\n\n``` pillar02_OPTIM_IG.g2o ``` : Back-end of the graph before the \noptimzation process and after the calculation of the best initial guess.\n\n``` pillar02_OPTIM_A.g2o ``` : Back-end of the graph after the \noptimization process \n\n``` pillar02_OPTIM_P.png ``` : Picture of the result on [Octave](http://octave.sourceforge.net/index.html) where \nthe green line represent the robot path and the yellow points are \nthe map computed.\n\n\n" }, { "alpha_fraction": 0.7760252356529236, "alphanum_fraction": 0.7760252356529236, "avg_line_length": 34.22222137451172, "blob_id": "b20da874cccef8707a008751738eafe22ae86b78", "content_id": "7b61a0da1902a23604ff59cb73172cba14a22719", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 317, "license_type": "no_license", "max_line_length": 94, "num_lines": 9, "path": "/stevens_pier_project/mapClasses/CMakeFiles/mapclasses_generate_messages_lisp.dir/cmake_clean.cmake", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/mapclasses_generate_messages_lisp\"\n \"../../../devel/share/common-lisp/ros/mapclasses/msg/buff.lisp\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang)\n INCLUDE(CMakeFiles/mapclasses_generate_messages_lisp.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n" }, { "alpha_fraction": 0.7162551879882812, "alphanum_fraction": 0.7223469018936157, "avg_line_length": 33.27472686767578, "blob_id": "4818c6638a3e443db39cbd733d051e8d26db228d", "content_id": "dd02ca80f8c058bfcb8f71aeeeb9cc6f9c44d594", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3119, "license_type": "no_license", "max_line_length": 88, "num_lines": 91, "path": "/stevens_pier_project/mapClasses/include/mapClasses/scanFilter/scanFilter.h", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "#ifndef SCANFILTER_H\n#define SCANFILTER_H\n\n#include <ros/ros.h>\n#include <pcl/point_cloud.h>\n#include <pcl/point_types.h>\n#include <mapClasses/utils/utils.h>\n#include <mapclasses/buff.h>\n\ntypedef pcl::PointXYZI PointT;\ntypedef pcl::PointCloud<PointT> PointCloudT;\n\nusing namespace std;\nusing namespace Eigen;\nusing namespace utils::params;\n\nstruct scanFilterParams{\n\tint delay; // number of sonar beams to ignore in the beginning\n\tfloat distLow; // minimum distance to object\n\tfloat distHigh; // maximum distance to object\n\tfloat intensityLow; // minimum intensity around object\n\tfloat intensityHigh; // minimum intensity of object\n\tfloat varLow; // minimum variance around object\n\tfloat varHigh; // maximum variance around object\n\tfloat peakDistance; // minimum distance between objects\n\tfloat pillarRadius; // radius of pillars\n\tstring inputSonarScan; // sonar topic straight from sonar\n\tstring outputSonarScan; // output topic\n\tstring outputBuff;\t\t// output buff\n\n\t// Initialize numbers as negative and strings as empty, that should never be the input.\n\tscanFilterParams():delay(-1),distLow(-1),distHigh(-1),intensityLow(-1),\n\t\tintensityHigh(-1),varLow(-1),varHigh(-1),peakDistance(-1),pillarRadius(-1),\n\t\tinputSonarScan(\"\"), outputSonarScan(\"\"){}\n};\n\nclass scanFilter{\n\n ros::NodeHandle* nh_;\n\t\tros::Subscriber sub;\n\t\tros::Publisher pub, pubPro, pubBuff_;\n\t\ttf::TransformListener listener;\n scanFilterParams par;\n int in;\n\npublic:\n\tscanFilter(ros::NodeHandle *nh){\n\t\t// nh = ros::NodeHandle(\"~\");\n\t\tnh_ = nh;\n\t\t//load parameters with mapping/parameters/scanFilter.yaml file\n\t\tsetParameters();\n\n\t\t// Suscribe to (mapping/parameters/scanFilter.yaml) inputSonarScan\n\t\t// In this case the topic is : /tritech_micron_node/sonarscan\n\t\t// param 1 : each message erase the next one\n\t\t// param scanFilter::subscribeCloud : function callback after each received msg\n\t\tsub = nh_->subscribe(par.inputSonarScan, 1, &scanFilter::subscribeCloud, this);\n\n\t\t// Publish to (mapping/parameters/scanFilter.yaml) outputSonarScan\n\t\t// In this case the topic is : scanFilter\n\t\t// a message of type PointCloud\n\t\t// param 1 : buffer size\n\t\tpub = nh_->advertise<pcl::PointCloud<PointT> >(par.outputSonarScan, 1);\n\n\t\t// Publish to (mapping/parameters/scanFilter.yaml) outputSonarScan\n\t\t// In this case the topic is : buff\n\t\t// a message of type mapclasses\n\t\t// param 1 : buffer size\n\t\tpubBuff_ = nh_->advertise<mapclasses::buff>(par.outputBuff, 1);\n\t\tin = 0;\n\t}\n\nprivate:\n\n\t// Takes in single sonar scan in form of a point cloud, filters it based on\n\t// distance and intensity and sends the maximum intensity point to a buffer\n\t// if it satisfies following criteria:\n\t// 1: Maximum intensity higher than a certain threshold.\n\t// 2: Has a certain number of high intensity neighboring points.\n\tvoid subscribeCloud(const pcl::PCLPointCloud2ConstPtr& inputCloud);\n\n\tvoid projectToPlane(PointCloudT::Ptr cloud);\n\n\tvoid processBeam(PointCloudT::Ptr cloud);\n\n\tvoid calcMeanVar(const std::vector<float> vec, float &ave, float &var);\n\n\tvoid setParameters();\n};\n\n#endif\n" }, { "alpha_fraction": 0.6266626119613647, "alphanum_fraction": 0.6363361477851868, "avg_line_length": 33.45833206176758, "blob_id": "7469b8dbe4be415adbf0cbec3a2a9edcab97dd22", "content_id": "7b64f52ce21b4ebd882c2ee90c51b75c14bd60b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9924, "license_type": "no_license", "max_line_length": 133, "num_lines": 288, "path": "/stevens_pier_project/mapClasses/src/scanFilter.cpp", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "//This program filters the map using algorithms in Probablist Robotics chapter 6\n#include <vector>\n // PCL specific includes\n#include <pcl_ros/point_cloud.h>\n#include <pcl/common/transforms.h>\n#include <tf/transform_listener.h>\n#include <geometry_msgs/Twist.h>\n#include <pcl_conversions/pcl_conversions.h>\n#include <pcl/PCLPointCloud2.h>\n#include <pcl_ros/transforms.h>\n#include <math.h>\n#include \"pcl_ros/point_cloud.h\"\n#include <pcl/kdtree/kdtree_flann.h>\n#include <pcl/filters/voxel_grid.h>\n#include <pcl/filters/radius_outlier_removal.h>\n#include <pcl/filters/passthrough.h>\n#include <pcl/filters/extract_indices.h>\n#include <pcl/segmentation/extract_clusters.h>\n#include <mapClasses/scanFilter/scanFilter.h>\n\n\n// Takes in single sonar scan in form of a point cloud, filters it based on\n// distance and intensity and sends the maximum intensity point to a buffer\n// if it satisfies following criteria:\n// 1: Maximum intensity higher than a certain threshold.\n// 2: Has a certain number of high intensity neighboring points.\n//\n// void scanFilter::subscribeCloud(PointCloudT::Ptr cloud)\nvoid scanFilter::subscribeCloud(const pcl::PCLPointCloud2ConstPtr& inputCloud)\n{\n\t// we ignore the first sonar scans\n\tif(in < par.delay){\n\t\tin++;\n\t\treturn;\n\t}\n\t// point cloud varibles\n\n\tPointCloudT::Ptr cloud (new PointCloudT()); // each point in a cloud is an intensity on one beam at the distance (x,y) of the sonar \n\n\n\t// // convert to pcl::PointCloud\n\tpcl::fromPCLPointCloud2(*inputCloud,*cloud);\n\n\t// pub.publish(cloud);\n\t// std::cout << \"hmmmmm\" << std::endl;\n\t// std::cout << \"topic : /tritech_micron_node/sonarscan : scanFilter\" << cloud->points.size() << std::endl; //797\n\tprocessBeam(cloud);\n\t// projectToPlane(cloud);\n}\n\nvoid scanFilter::projectToPlane(PointCloudT::Ptr cloud){\n\tfor(size_t i = 0; i < cloud->points.size(); ++i){\n\t\tcloud->points.at(i).z = 0;\n\t}\n\tprocessBeam(cloud);\n}\n\nvoid scanFilter::processBeam(PointCloudT::Ptr cloud){\n\t//std::cout << \" buffer \" << cloud->points.size();\n\n// param cloud_pu : will contain the local maximums selected\n\tPointCloudT::Ptr cloud_pu (new PointCloudT());\n\tcloud_pu->header.frame_id = cloud->header.frame_id; // Sonar\n\n\tfloat \tdist;\n\tbool\twithinRange = false;\n\tint \tmaxIdx = -1;\n\tstd::vector<int> vmaxIdx;\n\n\n\t// std::cout << \"dist 1: \" << utils::Distance(cloud->points[1])-utils::Distance(cloud->points[0]) << std::endl;\n\t// std::cout << \"dist 2: \" << utils::Distance(cloud->points[2])-utils::Distance(cloud->points[1]) << std::endl;\n\t// std::cout << \"dist 3: \" << utils::Distance(cloud->points[3])-utils::Distance(cloud->points[2]) << std::endl;\n\t// std::cout << \"dist 4: \" << utils::Distance(cloud->points[4])-utils::Distance(cloud->points[3]) << std::endl;\n\t// std::cout << \"dist 5: \" << utils::Distance(cloud->points[5])-utils::Distance(cloud->points[4]) << std::endl;\n\t// std::cout << \"dist 2: \" << utils::squaredDistance(cloud->points[1]) << std::endl;\n\t// std::cout << \"dist 3: \" << utils::squaredDistance(cloud->points[2]) << std::endl;\n\t// std::cout << \"dist 4: \" << utils::squaredDistance(cloud->points[4]) << std::endl;\n\t// std::cout << \"Intesity: \";\n\t// for(int i = 0; i < cloud->points.size(); ++i)\n\t// {\n\t// \tstd::cout << cloud->points[i].intensity << \", \";\n\t// }\n\t// std::cout << \" \" << std::endl;\n\n\t// first check if there is interference with the usbl system in this beam\n\t// Simple check if more than a certain number of points in a row is above a threshold discard beam\n\tint interference = 0;\n\tfor(size_t i = 0; i < cloud->points.size(); ++i){\n\t\tif(cloud->points[i].intensity > 130){\n\t\t\tinterference++;\n\t\t\t// return if we have found enough high intensity points, discard beam.\n\t\t\tif(interference > 20) return;\n\t\t} else {\n\t\t\tinterference = 0;\n\t\t}\n\t}\n\n\t// Walk through points\n\t// extract region of interest and extract the best local maximums\n\t//std::cout << \"extract region of interest\" << std::endl;\n\tint outOfRange = 0;\n\tfor(size_t i = 0; i < cloud->points.size(); ++i){\n\t\t// Check if point is within range\n\t\tdist = utils::squaredDistance(cloud->points.at(i));\n\t\tif(dist >= par.distLow && dist <= par.distHigh){\n\t\t\t// Check if Point is above threshold range.\n\t\t\tif(cloud->points.at(i).intensity > par.intensityHigh){\n\t\t\t\t// Check to see if we were within a high intensity area.\n\t\t\t\tif(outOfRange > 0) outOfRange=0;\n\t\t\t\tif(withinRange){\n\t\t\t\t\tif(cloud->points.at(i).intensity > cloud->points.at(maxIdx).intensity){\n\t\t\t\t\t\tmaxIdx = i;\n\t\t\t\t\t}\n\t\t\t\t} else { // Just entered a high intensity area.\n\t\t\t\t\tmaxIdx = i;\n\t\t\t\t\twithinRange = true;\n\t\t\t\t}\n\t\t\t} else if (withinRange && outOfRange < 5){\n\t\t\t\toutOfRange++;\n\n\t\t\t} else if (withinRange){ // Exited a high intensity area.\n\t\t\t\twithinRange = false;\n\t\t\t\toutOfRange = 0;\n\t\t\t\t// Check that the current max is far enough from the last max.\n\t\t\t\tif(vmaxIdx.size() > 0){\n\t\t\t\t\t// Far enough away from the last peak\n\t\t\t\t\tif(utils::squaredDistance(cloud->points.at(vmaxIdx.back()), cloud->points.at(maxIdx)) > par.peakDistance){\n\t\t\t\t\t\tvmaxIdx.push_back(maxIdx);\n\t\t\t\t\t} \n\t\t\t\t\t// Check if we should replace last peak.\n\t\t\t\t\telse if(cloud->points.at(maxIdx).intensity > cloud->points.at(vmaxIdx.back()).intensity) {\n\t\t\t\t\t\tvmaxIdx.back() = maxIdx;\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tvmaxIdx.push_back(maxIdx);\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\t}\n\t// if(vmaxIdx.size() > 0){\n\t// \tstd::cout << \"count: \" << vmaxIdx.size() << std::endl;\n\t// }\n\n\t// Process each extracted maximum based on neighbouring beam points.\n\tint idx;\n\tint count;\n\tfloat mean=0.0f, var=0.0f;\n\tstd::vector<float> meanVar;\n\tfor(size_t j = 0; j < vmaxIdx.size(); ++j){\n\t\tcount = 0;\n\t\tidx = vmaxIdx.at(j);\n\t\tmeanVar.clear();\n\t\n\t\tfor(size_t i = idx - 10; i <= idx + 10; ++i){\n\t\t\tif(i < 0 || i >= cloud->points.size()) continue;\n\t\t\tmeanVar.push_back(cloud->points.at(i).intensity);\n\n\t\t\tif(cloud->points.at(i).intensity > par.intensityLow){\n\t\t\t\tcount++;\n\t\t\t}\n\t\t}\n\t\tif(count > meanVar.size()/4){\n\t\t// if(count > 5){\n\t\t\tcalcMeanVar(meanVar, mean, var);\n\t\t\t// std::cout << \"mean: \" << mean << \", var: \" << var << std::endl;\n\t\t\tif(var > par.varLow && var < par.varHigh){\n\t\t\t\tcloud_pu->points.push_back(cloud->points.at(vmaxIdx.at(j)));\n\t\t\t\t// std::cout << \"idx: \" << vmaxIdx.at(j) << \", mean: \" << mean << \", var: \" << var << \", \";\n\t\t\t\t// std::cout << \"intensity: \";\n\t\t\t\t// for(size_t i = idx - 10; i <= idx + 10; ++i){\n\t\t\t\t// \tif(i < 0 || i >= cloud->points.size()) continue;\n\t\t\t\t// \t// std::cout << cloud->points.at(i).intensity << \", \";\n\t\t\t\t// }\n\t\t\t\t// std::cout << \"\" << std::endl;\n\t\t\t}\n\t\t}\n\n\n\t}\n\n\n\t// std::cout << \"\" << std::endl;\n\t// std::cout << \" _ \" << cloud_pu->header.frame_id << std::endl ;\n\t// Extend found points avail from robot to estimate middle of pillar.\n\tmapclasses::buff bu;\n\tfloat x,y,l,theta;\n\tfor(size_t i = 0; i < cloud_pu->points.size(); ++i){\n\t\tx = cloud_pu->points.at(i).x;\n\t\ty = cloud_pu->points.at(i).y;\n\t\ttheta = atan2(y,x);\n\t\tl = sqrt(x*x+y*y);\n\t\tbu.dists.push_back(l);\n\t\tcloud_pu->points.at(i).x = cos(theta)*(l+par.pillarRadius);\n\t\tcloud_pu->points.at(i).y = sin(theta)*(l+par.pillarRadius);\n\t}\n\n\n\n\n\n\n\n\n\n\t// Time in wicht the transformation ( from buffer(parent) to sonar(child) ) is performed\n\tros::Time now = ros::Time::now(); \n\t// Cloud transformation from buffer to sonar : allow to transform a point expressed in sonar frame into buffer frame\n\tpcl_ros::transformPointCloud(\"/buffer\", *cloud_pu, *cloud_pu, listener);\n\tcloud_pu->header.frame_id=\"buffer\";\n\tfor(size_t i = 0; i < cloud_pu->points.size(); ++i){\n\t\tcloud_pu->points.at(i).z = 0;\n\t}\n\n\t//std::cout << \"HEURE \" << now.toSec() << \" \" << ros::Time(0).toSec() << \" : scanFilter \"<<std::endl ;\n\t// from odom(parent) to body(child) : allow to transform a point expressed in body frame into odom frame\n\ttf::StampedTransform tfOdomtoBody; \n\t// tranformation at a precise time \n\tlistener.lookupTransform(\"/odom\", \"/body\", ros::Time(0), tfOdomtoBody);\n\n\n\tdouble toDegree = 180 / M_PI;\n\ttf::Quaternion q = tfOdomtoBody.getRotation();\n\ttf::Vector3 v = tfOdomtoBody.getOrigin();\n\tdouble yaw, pitch, roll;\n\t// euler angles in radian \n\ttfOdomtoBody.getBasis().getRPY(roll, pitch, yaw);\n\n\t// define a new PointT for the 2d pose where the landmark have been seen\n\t// X,Y,Z -> X,Y, yaw\n\n\tPointT poseXYyaw ; \n\tposeXYyaw.x = v.getX(); \n\tposeXYyaw.y = v.getY();\n\tposeXYyaw.z = yaw /* * toDegree */ ; // yaw in radian !!!\n\n\t//std::cout << \" size befor :\" << poseXYyaw.z << \" : scanFilter \"<<std::endl;\n\tcloud_pu->points.push_back(poseXYyaw);\n\t//std::cout << \" size after :\" <<cloud_pu->points.back().z << \" : scanFilter \"<<std::endl;\n\n\t\n\t// convert cloud_pu to bu.cloud, resutat in bu.cloud\n\t// bu.cloud = cloud_pu;\n\tpcl::toROSMsg(*cloud_pu, bu.cloud);\n\n\t// Publish in topic pub : /scanFilterMA/scanFilter\n\tpub.publish(cloud_pu);\n\t //std::cout << \"cloud_pu : scanFilter\" << cloud_pu->points.size() << std::endl; //797\n\t// Publish in topic pubBuff_ : /scanFilterMA/buff\n\tpubBuff_.publish(bu);\n\n}\n\n\nvoid scanFilter::calcMeanVar(const std::vector<float> vec, float &ave, float &var){\n\n\tfloat cnt = 0.0f;\n\tfloat sum = 0.0f;\n\tfloat cnt2= 0.0f;\n\n\tfor(size_t i=0; i < vec.size(); ++i){\n\t\tcnt += vec.at(i);\n\t\tsum += i*vec.at(i);\n\t\tcnt2 += i*i*vec.at(i);\n\t}\n\tave = sum/cnt;\n\tvar = cnt2/cnt - ave*ave;\n}\n\n\nvoid scanFilter::setParameters(){\n\n\tpar.delay = loadParam<int>(\"delay\", nh_);\n\tpar.distLow = pow(loadParam<double>(\"distLow\",nh_), 2);\n\tpar.distHigh = pow(loadParam<double>(\"distHigh\",nh_), 2);\n\tpar.intensityLow = loadParam<double>(\"intensityLow\", nh_);\n\tpar.intensityHigh = loadParam<double>(\"intensityHigh\", nh_);\n\tpar.varLow = loadParam<double>(\"varLow\", nh_);\n\tpar.varHigh = loadParam<double>(\"varHigh\", nh_);\n\tpar.peakDistance = loadParam<double>(\"peakDistance\", nh_);\n\tpar.pillarRadius = 0.2 /* loadParam<double>(\"pillarRadius\", nh_)*/;\n\tpar.inputSonarScan = loadParam<string>(\"inputSonarScan\", nh_);\n\tpar.outputSonarScan = loadParam<string>(\"outputSonarScan\", nh_);\n\tpar.outputBuff = loadParam<string>(\"outputBuff\", nh_);\n\n}\n" }, { "alpha_fraction": 0.742514967918396, "alphanum_fraction": 0.742514967918396, "avg_line_length": 32.400001525878906, "blob_id": "3c08a43bbd2f650f5a15410973e4ef20ceec2715", "content_id": "603e9451f9c1d5f0d367bd6bb925563db2e3bfa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 334, "license_type": "no_license", "max_line_length": 71, "num_lines": 10, "path": "/stevens_pier_project/move_videoray/CMakeFiles/propFilter.dir/cmake_clean.cmake", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/propFilter.dir/src/propFilter.cpp.o\"\n \"../../../devel/lib/move_videoray/propFilter.pdb\"\n \"../../../devel/lib/move_videoray/propFilter\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang CXX)\n INCLUDE(CMakeFiles/propFilter.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n" }, { "alpha_fraction": 0.6364716291427612, "alphanum_fraction": 0.6563706398010254, "avg_line_length": 36.831459045410156, "blob_id": "36f0b799b7a0225e5479a57fe6941ee39334842e", "content_id": "3ceb85314f478c798603df70e914d10f02f9ca72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 3367, "license_type": "no_license", "max_line_length": 526, "num_lines": 89, "path": "/stevens_pier_project/mapping/CMakeLists.txt", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "#set(CMAKE_CXX_FLAGS \"-std=c++0x ${CMAKE_CXX_FLAGS}\")\n#set(CMAKE_CXX_FLAGS \"-fPIC -std=c++0x -fpermissive\")\n\ncmake_minimum_required(VERSION 2.8.3)\nproject(mapping)\n\n\n ##---------------------------------------------------------------------------------- ## \n ##-------- useful for the dlib library --------------------------------------------- ##\n # Add the dlib library for svm classification\nfind_package(dlib 19.06 REQUIRED)\ninclude_directories(SYSTEM ${dlib_INCLUDE_DIRS})\n ## --------------------------------------------------------------------------------- ##\n\n## Find catkin macros and libraries\n## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)\n## is used, also find other catkin packages\nfind_package(catkin REQUIRED COMPONENTS\n geometry_msgs\n pcl_conversions\n pcl_ros\n roscpp\n sensor_msgs\n visualization_msgs\n std_msgs\n tf\n tf_conversions\n eigen_conversions\n mapclasses\n tf2_kdl\n)\n\ncatkin_package(\n DEPENDS\n CATKIN_DEPENDS roscpp\n INCLUDE_DIRS include\n LIBRARIES\n)\n\n #Enable support for C++11\n set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -std=c++11 -Wall\")\n SET(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib)\n LIST(APPEND CMAKE_MODULE_PATH \"${PROJECT_SOURCE_DIR}/cmake_modules\")\n# #ADD_SUBDIRECTORY(${PROJECT_SOURCE_DIR}/src)\n\n # find g2o lib\n find_package(G2O REQUIRED)\n IF(G2O_FOUND)\n include_directories(${G2O_INCLUDE_DIR})\n link_directories(${G2O_LIBRARY_DIRS})\n message(\"G2O lib is found:\" ${G2O_INCLUDE_DIR})\n ENDIF(G2O_FOUND)\n\n find_package(Eigen3 REQUIRED)\n find_package(CSparse REQUIRED)\n #find_package(Cholmod REQUIRED)\n include_directories(${CSPARSE_INCLUDE_DIR})\n include_directories(${EIGEN3_INCLUDE_DIR})\n #include_directories(${CHOLMOD_INCLUDE_DIR})\n\n ##---------------------------------------------------------------------------------- ##\n ##-------- useful for the G2O library ---------------------------------------------- ##\n\ninclude_directories(\n ${catkin_INCLUDE_DIRS}\n include\n)\n\n\n\nSET(G2O_LIBS g2o_cli g2o_ext_freeglut_minimal g2o_simulator g2o_solver_slam2d_linear g2o_types_icp g2o_types_slam2d g2o_core g2o_interface g2o_solver_csparse g2o_solver_structure_only g2o_types_sba g2o_types_slam3d g2o_csparse_extension g2o_opengl_helper g2o_solver_dense g2o_stuff g2o_types_sclam2d g2o_parser g2o_solver_pcg g2o_types_data g2o_types_sim3 cxsparse )\n\n ## --------------------------------------------------------------------------------- ##\n\n\nadd_library(lib_optim ${G2O_LIB_TYPE}\n include/optim/edge_se2_pointxy.cpp include/optim/se2.h include/optim/vertex_point_xy.h include/optim/edge_se2.cpp include/optim/edge_se2_pointxy.h include/optim/edge_se2.h include/optim/rand.h include/optim/vertex_point_xy.cpp include/optim/vertex_se2.h include/optim/vertex_se2.cpp include/optim/parameter_se2_offset.h include/optim/parameter_se2_offset.cpp include/optim/types_optim_slam2d.h include/optim/types_optim_slam2d.cpp include/optim/simulator.h include/optim/simulator.cpp include/optim/g2o_tutorial_slam2d_api.h\n)\n\nadd_library(optim include/optim/graphSlam.cpp )\n\ntarget_link_libraries(optim lib_optim ${G2O_LIBS})\n\n\nadd_executable(scanFilterMA src/scanFilter.cpp)\ntarget_link_libraries(scanFilterMA scanFilter ${catkin_LIBRARIES})\n\nadd_executable(mapBuilder src/mapBuilder.cpp)\ntarget_link_libraries(mapBuilder map scanBuffer optim ${dlib_LIBRARIES} ${catkin_LIBRARIES} )\n" }, { "alpha_fraction": 0.7601060271263123, "alphanum_fraction": 0.7680583000183105, "avg_line_length": 22.578125, "blob_id": "c2cfa3f608ce3b365557f63660e14f1992727d71", "content_id": "9fffbffb455993b04618bba082b578c2c0aae4b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 1509, "license_type": "no_license", "max_line_length": 91, "num_lines": 64, "path": "/stevens_pier_project/mapClasses/CMakeLists.txt", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8.3)\nproject(mapclasses)\n\n#set(CMAKE_CXX_FLAGS \"-O4 -g -fPIC -std=c++0x -fpermissive\")\nset (CMAKE_POSITION_INDEPENDENT_CODE TRUE)\n\n# Add the dlib library for svm classification\n# include(/home/nacho/workspace/dlib-19.6/dlib/cmake)\nfind_package(dlib 19.06 REQUIRED)\nIF(DLIB_FOUND)\n include_directories(${DLIB_INCLUDE_DIR})\n link_directories(${DLIB_LIBRARY_DIRS})\n message(\"DLIB lib is found:\" ${DLIB_INCLUDE_DIR})\nENDIF(DLIB_FOUND)\n\nfind_package(catkin REQUIRED COMPONENTS\n\troscpp\n\tpcl_conversions\n \tpcl_ros\n \tsensor_msgs\n \tstd_msgs\n \tgeometry_msgs\n \tmessage_generation\n\ttf\n \ttf_conversions\n \teigen_conversions\n)\n\nadd_message_files(FILES\n\tbuff.msg\n)\n\ngenerate_messages(DEPENDENCIES\n\tstd_msgs\n\tsensor_msgs\n)\n\ncatkin_package(\n\tDEPENDS\n\tCATKIN_DEPENDS roscpp\n\tINCLUDE_DIRS include\n\tLIBRARIES map scanBuffer dlib\n)\n\nset(MAPPINGlibs\n sonarMap\n scanBuffer\n scanFilter\n utils\n)\n\ninclude_directories(include ${catkin_INCLUDE_DIRS} ${Boost_INCLUDE_DIRS})\n\nadd_library(map src/map.cpp)\nadd_library(scanBuffer src/scanBuffer.cpp)\nadd_library(utils src/utils.cpp)\nadd_library(scanFilter src/scanFilter.cpp)\n\nadd_dependencies(scanFilter ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})\nadd_dependencies(scanBuffer ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})\n\ntarget_link_libraries(scanBuffer dlib ${catkin_LIBRARIES})\ntarget_link_libraries(map scanBuffer utils dlib ${catkin_LIBRARIES})\ntarget_link_libraries(scanFilter ${catkin_LIBRARIES})\n" }, { "alpha_fraction": 0.7325174808502197, "alphanum_fraction": 0.7377622127532959, "avg_line_length": 80.85713958740234, "blob_id": "4dd38f72ff731b5c904faa834adb7999c06e811f", "content_id": "5255609ca3543f7a1a5819d68628a9d29a993971", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 572, "license_type": "no_license", "max_line_length": 199, "num_lines": 7, "path": "/stevens_pier_project/mapping/catkin_generated/package.cmake", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "set(_CATKIN_CURRENT_PACKAGE \"mapping\")\nset(mapping_MAINTAINER \"unnar <[email protected]>\")\nset(mapping_DEPRECATED \"\")\nset(mapping_VERSION \"0.0.0\")\nset(mapping_BUILD_DEPENDS \"geometry_msgs\" \"pcl_conversions\" \"pcl_ros\" \"roscpp\" \"rospy\" \"sensor_msgs\" \"visualization_msgs\" \"std_msgs\" \"nav_msgs\" \"tf\" \"tf_conversions\" \"eigen_conversions\" \"mapclasses\")\nset(mapping_RUN_DEPENDS \"geometry_msgs\" \"pcl_conversions\" \"pcl_ros\" \"roscpp\" \"rospy\" \"sensor_msgs\" \"visualization_msgs\" \"std_msgs\" \"nav_msgs\" \"tf\" \"tf_conversion\" \"eigen_conversion\" \"mapclasses\")\nset(mapping_BUILDTOOL_DEPENDS \"catkin\")" }, { "alpha_fraction": 0.7454545497894287, "alphanum_fraction": 0.7454545497894287, "avg_line_length": 32, "blob_id": "90ef39118029ce1bfac992d7a932638d1c46e336", "content_id": "b310a0dbefeaea8bdb000a6e30178be12cba0fc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 330, "license_type": "no_license", "max_line_length": 73, "num_lines": 10, "path": "/stevens_pier_project/mapping/CMakeFiles/scanFilterMA.dir/cmake_clean.cmake", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/scanFilterMA.dir/src/scanFilter.cpp.o\"\n \"../../../devel/lib/mapping/scanFilterMA.pdb\"\n \"../../../devel/lib/mapping/scanFilterMA\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang CXX)\n INCLUDE(CMakeFiles/scanFilterMA.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n" }, { "alpha_fraction": 0.725579559803009, "alphanum_fraction": 0.7315236926078796, "avg_line_length": 34.30070114135742, "blob_id": "80060a503f538a648febca1ccc401a5006bb2228", "content_id": "a463d105d5d94bd34eb85cfe8686b27fca7abec9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5048, "license_type": "no_license", "max_line_length": 158, "num_lines": 143, "path": "/stevens_pier_project/mapping/include/map/map.h", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "/**\n *\tMaintaines a 2d feature map from a 3d sonar scanþ\n *\tEach feature is assumed to be infinetly high\n * @author Unnar Axelsson\n */\n\n#ifndef SONARMAP_H\n#define SONARMAP_H\n\n#include <ros/ros.h>\n#include <pcl_ros/point_cloud.h>\n#include <pcl/point_cloud.h>\n\n#include <pcl/point_types.h>\n#include <pcl/kdtree/kdtree_flann.h>\n#include <tf/transform_listener.h>\n#include <tf_conversions/tf_eigen.h>\n#include <std_msgs/Bool.h>\n#include \"scanBuffer/scanBuffer.h\"\n\ntypedef pcl::PointXYZI PointT;\ntypedef pcl::PointCloud<PointT> PointCloudT;\n\nnamespace mapping{\n\nclass sonarMap{\n\nPointCloudT::Ptr map;\nstd::vector<PointCloudT::Ptr> pillarPoints;\nstd::vector<PointCloudT::Ptr> featurePoints;\nstd::vector<int> featureCount;\nPointCloudT::Ptr aligned_tmp;\nPointCloudT::Ptr features;\nPointCloudT::Ptr pillars;\nscanBuffer sb;\ntf::TransformListener listener_;\nros::Publisher *pubTransform_;\n\npublic:\n\tsonarMap(){\n\t\tmap = PointCloudT::Ptr (new PointCloudT());\n\t\taligned_tmp = PointCloudT::Ptr (new PointCloudT());\n\t\taligned_tmp->header.frame_id=\"odom\";\n\t\tfeatures = PointCloudT::Ptr (new PointCloudT());\n\t\tpillars = PointCloudT::Ptr (new PointCloudT());\n\t\tpillars->header.frame_id=\"odom\";\n\t\tfeatures->header.frame_id=\"odom\";\n\t}\n\n\tvoid setPublisher(ros::Publisher *pubTransform, ros::Publisher *pubTransformSB){\n\t\tpubTransform_ = pubTransform;\n\t\tsb.setPublisher(pubTransformSB);\n\t}\n\n\t/**\n\t * Takes in a new sonar scan, processes the data and calles \n\t * the appropriate functions to update the map.\n\t * @param scan PointCloud where each point represents a possible feature. \n\t */\n\tbool newScan(PointCloudT::Ptr scan);\n\n\t// Returns the map in the form of pointCLoud.\n\tPointCloudT::Ptr returnMap(void){ return map; }\n\t// Returns the last scan aligned to the current map. \n\tPointCloudT::Ptr returnAligned(void){ return aligned_tmp; }\n\t// Returen the buffer that has the highest number of scans.\n\tPointCloudT::Ptr returnCurrentBuffer(void){ return features; }\n\n\tPointCloudT::Ptr returnPillars(void){ return pillars; }\n\n\tstd::vector<PointCloudT::Ptr> returnPillarPoints(void);\n\tstd::vector<PointCloudT::Ptr> returnFeaturePoints(void);\n\n\tvoid updateBuffer(void){\n\t\t// sb.updateBuffer();\n\t}\nprivate:\n\t/**\n\t * Function is called to initialize the map or check if it has been initialized.\n\t * @param scan PointCloud where each point is a possible feature, \n\t * assumes all points have been projected to the xy-plane.\n\t * @return Returns true if map has been initialized, false otherwise.\n\t */\n\tbool intializeMap(const PointCloudT::Ptr scan);\n\n\t// Simple passthrough filter, bad implementation, to be TERMINATED.\n\tvoid passThroughFilter(const PointCloudT::Ptr scan, PointCloudT::Ptr filtered, float lim_low);\n\t\n\t/**\n\t * Uses icp from pcl to aligne pointcloud to the map we have.\n\t * @param map 2d feature map to be aligned to. \n\t * @param scan Feature scan to align.\n\t * @param aligned The aligned feature scan\n\t * @param trans 4by4 transformation matrix describing the transformation.\n\t * @return Return true if icp was successful, false otherwise.\n\t */\n\tbool icpAlign(const PointCloudT::Ptr map, const PointCloudT::Ptr scan, const pcl::KdTreeFLANN<PointT> &kdtree, PointCloudT &aligned, Eigen::Affine3f &trans);\n\t\n\t/**\n\t * Takes in feature scan that is aligned to the map and find associations between them and updates the map accordingly.\n\t * @param aligned Aligned feature scan in the form of a pointcloud.\n\t */\n\tvoid dataAssociation(PointCloudT::Ptr aligned);\n\n\t// Functions that change the intensity of pointclouds based on intensity criterias.\n\tvoid changeIntensity(float limLow, float limHigh, float change);\n\tvoid changeIntensity(float limLow, float limHigh, float change, std::vector<int> idx);\n\t\n\t// Removes obsolete map features, to be COMBINED WITH PASSTHROUGH FILTER.\n\tvoid pruneMap(void);\n\n\t/**\n\t * Takes in 3d rotation as euler angles and minimizes the angles.\n\t * @param euler vector with 3d rotation in form of euler angles.\n\t */\n\tvoid minimizeEuler(Eigen::Vector3f &euler);\n\t\n\t/**\n\t * Finds the transformation from a feature scan with two points to the map. \n\t * @param map 2d feature map to be aligned to.\n\t * @param scan Feature scan to align, only 2 points.\n\t * @param kdtree KD-tree created from the map.\n\t * @param trans The resulting transformation\n\t * @return Returns true if alignment was successful, false otherwise.\n\t */\n\tbool matchTwoPoints(PointCloudT::Ptr map, PointCloudT::Ptr scan, const pcl::KdTreeFLANN<PointT> &kdtree, Eigen::Affine3f &trans);\n\n\t/**\n\t * Takes in two pairs of points and finds the transformation between them.\n\t * @param a1 Point 1 from the map, connected to b1.\n\t * @param a2 Point 2 from the map, connected to b2.\n\t * @param b1 Point 1 from the scan, connected to a1.\n\t * @param b2 Point 2 from the scan, connected to a2.\n\t * @return transformation between the two pairs of points.\n\t */\n\tEigen::Affine3f transformationBetweenPairsOfPoints(PointT &a1, PointT &a2, PointT &b1, PointT &b2);\n\n\tvoid publishSonarTransform(Eigen::Affine3f translation);\n\n};\n\n}\n#endif" }, { "alpha_fraction": 0.742514967918396, "alphanum_fraction": 0.742514967918396, "avg_line_length": 32.400001525878906, "blob_id": "2c2da22ec7ef7048bd37eed2f0792d43e4b872eb", "content_id": "3902dc6aaf9931edfa69b2ffa40f2ba823a6815d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 334, "license_type": "no_license", "max_line_length": 71, "num_lines": 10, "path": "/stevens_pier_project/move_videoray/CMakeFiles/beamFilter.dir/cmake_clean.cmake", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/beamFilter.dir/src/beamFilter.cpp.o\"\n \"../../../devel/lib/move_videoray/beamFilter.pdb\"\n \"../../../devel/lib/move_videoray/beamFilter\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang CXX)\n INCLUDE(CMakeFiles/beamFilter.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n" }, { "alpha_fraction": 0.5911910533905029, "alphanum_fraction": 0.5967742204666138, "avg_line_length": 22.72058868408203, "blob_id": "219bd45c345a33dd64a1b1bdaa8ebe2bbe8ad51b", "content_id": "2cc20ebb64af48d310350eec6853b0be57b13f9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1612, "license_type": "no_license", "max_line_length": 118, "num_lines": 68, "path": "/stevens_pier_project/mapping/include/utils/utils.hpp", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "namespace utils{\n\n\ttemplate<typename PointT>\n\tfloat Distance(PointT a, PointT b){\n\t\treturn std::sqrt(squaredDistance(a,b));\n\t}\n\n\ttemplate<typename PointT>\n\tfloat Distance(PointT a){\n\t\treturn std::sqrt(squaredDistance(a));\n\t}\n\n\ttemplate<typename PointT>\n\tfloat squaredDistance(PointT a, PointT b){\n\t\tfloat x = a.x-b.x;\n\t\tfloat y = a.y-b.y;\n\t\tfloat z = a.z-b.z;\n\t\treturn x*x+y*y+z*z;\n\t}\n\n\ttemplate<typename PointT>\n\tfloat squaredDistance(PointT a){\n\t\treturn a.x*a.x + a.y*a.y + a.z*a.z;\n\t}\n\n\ttemplate<typename PointCloudT>\n\tvoid printCloud(PointCloudT a){\n\t\tstd::cout << \" \" << std::endl;\n\t\tstd::cout << \"number of points: \" << a->points.size() << std::endl;\n\t\tfor(size_t i = 0; i < a->points.size(); ++i){\n\t\t\tstd::cout << \"x: \" << a->points.at(i).x << \" y: \" << a->points.at(i).y << \" z: \" << a->points.at(i).z << std::endl;\n\t\t}\n\t\tstd::cout << \" \" << std::endl;\n\t}\n\n\tbool poseTracker::newPose(tf::StampedTransform transform){\n\t\t\n\t\tEigen::Vector3f curr(transform.getOrigin().x(), transform.getOrigin().y(), transform.getOrigin().z());\n\t\tpose.push(curr);\n\t\tif(pose.size() < 50){\n\t\t\treturn true;\n\t\t}\n\n\t\tfloat squaredNorm = (pose.front() - curr).squaredNorm();\n\t\tpose.pop();\n\t\tbool tmp = squaredNorm > 0.2;\n\t\tstd::cout << tmp << \" Max: \" << squaredNorm << std::endl;\n\t\treturn squaredNorm > 0.2;\n\n\t}\n\n\t\n\tnamespace params{\n\t\t\n\t\ttemplate<typename T>\n\t\tT loadParam( std::string name, ros::NodeHandle &nh){\n\t\t\tT param;\n\t\t\tif (nh.hasParam( name )){\n\t\t\t nh.getParam( name, param);\n\t\t\t return param;\n\t\t\t} else {\n\t\t\t std::cout << \"Param \" << name << \" does not exist.\" << std::endl;\n\t\t\t exit(0);\n\t\t\t}\n\t }\n\n\t}\n}" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.739130437374115, "avg_line_length": 31.200000762939453, "blob_id": "7770f26e7b869f575b9377d5c8c2659a070013d1", "content_id": "1c868d54d3f6a2cab2e8616158cd256726e3d779", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 322, "license_type": "no_license", "max_line_length": 71, "num_lines": 10, "path": "/stevens_pier_project/mapping/CMakeFiles/mapBuilder.dir/cmake_clean.cmake", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/mapBuilder.dir/src/mapBuilder.cpp.o\"\n \"../../../devel/lib/mapping/mapBuilder.pdb\"\n \"../../../devel/lib/mapping/mapBuilder\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang CXX)\n INCLUDE(CMakeFiles/mapBuilder.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n" }, { "alpha_fraction": 0.5431289672851562, "alphanum_fraction": 0.5754172801971436, "avg_line_length": 50.28799819946289, "blob_id": "e251c4cb0494e281b23b0772a3e24d42923ac69c", "content_id": "aaa277141b15bfdbb0b597db89c56777a1274e27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6411, "license_type": "no_license", "max_line_length": 249, "num_lines": 125, "path": "/stevens_pier_project/mapping/include/optim/edge_se2.h", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "// g2o - General Graph Optimization\n// Copyright (C) 2011 R. Kuemmerle, G. Grisetti, W. Burgard\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n// * Redistributions of source code must retain the above copyright notice,\n// this list of conditions and the following disclaimer.\n// * Redistributions in binary form must reproduce the above copyright\n// notice, this list of conditions and the following disclaimer in the\n// documentation and/or other materials provided with the distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n// IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\n// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef G2O_TUTORIAL_EDGE_SE2_H\n#define G2O_TUTORIAL_EDGE_SE2_H\n\n#include \"vertex_se2.h\"\n#include \"g2o_tutorial_slam2d_api.h\"\n#include \"g2o/core/base_binary_edge.h\"\n\nnamespace g2o {\n\n namespace tutorial {\n\n /**\n * \\brief 2D edge between two Vertex2, i.e., the odometry\n */\n class G2O_TUTORIAL_SLAM2D_API EdgeSE2 : public BaseBinaryEdge<3, SE2, VertexSE2, VertexSE2>\n {\n public:\n EIGEN_MAKE_ALIGNED_OPERATOR_NEW;\n EdgeSE2();\n\n void computeError()\n {\n const VertexSE2* v1 = static_cast<const VertexSE2*>(_vertices[0]);\n const VertexSE2* v2 = static_cast<const VertexSE2*>(_vertices[1]);\n SE2 delta = _inverseMeasurement * (v1->estimate().inverse()*v2->estimate());\n // _error = delta.toVector();\n\n\n /* ..................................... */ \n // // x' = x * cos(theta) - y * sin(theta) )\n // double x2 = v2->estimate().toVector()(0)* std::cos(v2->estimate().toVector()(2)) - v2->estimate().toVector()(1) * std::sin(v2->estimate().toVector()(2)) ;\n // double x1 = v1->estimate().toVector()(0)* std::cos(v1->estimate().toVector()(2)) - v1->estimate().toVector()(1) * std::sin(v1->estimate().toVector()(2)) ;\n\n // // y' = y * ( sin(theta) + cos(theta) )\n // double y2 = v2->estimate().toVector()(1)* std::sin(v2->estimate().toVector()(2)) + v2->estimate().toVector()(0) * std::cos(v2->estimate().toVector()(2)) ;\n // double y1 = v1->estimate().toVector()(1)* std::sin(v1->estimate().toVector()(2)) + v1->estimate().toVector()(0) * std::cos(v1->estimate().toVector()(2)) ;\n\n // double theta2 = std::atan2(v2->estimate().toVector()(1) , v2->estimate().toVector()(0));\n // double theta1 = std::atan2(v1->estimate().toVector()(1) , v1->estimate().toVector()(0));\n\n // // deltax = x2' - x1' \n // double deltax = x2 - x1 ;\n // // deltay = y2' - y1' \n // double deltay = y2 - y1 ;\n // // deltaT = atan2( y2' / x2') - atan2( y1' / x1') \n // double deltaT = ( std::atan2( y2 , x2) - std::atan2( y1 , x1) );\n // double delta12 = normalize_theta( (theta2 - v2->estimate().toVector()(2)) - (theta1 - v1->estimate().toVector()(2)) ) ;\n\n // Eigen::Vector3d estimated(deltax,deltay,deltaT);\n \n \n\n // Eigen::Vector3d measurementEst(v2->estimate().toVector()(0) - v1->estimate().toVector()(0) , v2->estimate().toVector()(1) - v1->estimate().toVector()(1), normalize_theta( v2->estimate().toVector()(2) - v1->estimate().toVector()(2)) ) ;\n // SE2 delta(_measurement.toVector()(0) - estimated(0), _measurement.toVector()(1) - estimated(1), (_measurement.toVector()(2) - estimated(2)));\n /* ..................................... */ \n\n /* ..................................... */ \n //Eigen::Vector3d measurementEst(v2->estimate().toVector()(0) - v1->estimate().toVector()(0) , v2->estimate().toVector()(1) - v1->estimate().toVector()(1), normalize_theta( v2->estimate().toVector()(2) - v1->estimate().toVector()(2)) ) ;\n //SE2 delta(_measurement.toVector()(0) - measurementEst(0), _measurement.toVector()(1) - measurementEst(1), normalize_theta(_measurement.toVector()(2) - measurementEst(2)) );\n /* ..................................... */ \n\n\n /* ..................................... */ \n /* odom calculation : 1: inverse of the first rotation , 2: translation , 3: seconde rotation */\n // // x' = ( x2 - x1 ) * cos(theta1) + (y2 - y1) * sin(theta1) )\n // double xp = ( v2->estimate().toVector()(0) - v1->estimate().toVector()(0) ) * std::cos(v1->estimate().toVector()(2)) + ( v2->estimate().toVector()(1) - v1->estimate().toVector()(1)) * std::sin(v1->estimate().toVector()(2)) ;\n \n // // y' = - ( x2 - x1 ) * sin(theta1) + (y2 - y1) * cos(theta1) )\n // double yp = - ( v2->estimate().toVector()(0) - v1->estimate().toVector()(0) ) * std::sin(v1->estimate().toVector()(2)) + ( v2->estimate().toVector()(1) - v1->estimate().toVector()(1)) * std::cos(v1->estimate().toVector()(2)) ;\n\n // // theta\" = normAngle(theta2 - theta1)\n // double thetap = normalize_theta(v1->estimate().toVector()(2) - v2->estimate().toVector()(2)) ;\n \n // Eigen::Vector3d estimated(xp, yp, thetap);\n // SE2 delta(_measurement.toVector()(0) - estimated(0), _measurement.toVector()(1) - estimated(1), normalize_theta(_measurement.toVector()(2) - estimated(2)));\n\n\n /* ..................................... */ \n\n _error = delta.toVector();\n }\n \n void setMeasurement(const SE2& m){\n _measurement = m;\n _inverseMeasurement = m.inverse();\n }\n\n virtual bool read(std::istream& is);\n virtual bool write(std::ostream& os) const;\n\n protected:\n SE2 _inverseMeasurement;\n };\n\n }\n\n} // end namespace\n\n#endif\n" }, { "alpha_fraction": 0.6421923637390137, "alphanum_fraction": 0.656670093536377, "avg_line_length": 20.04347801208496, "blob_id": "02d8b6a02b572d8783eb73ccbe02bcd81355638c", "content_id": "6a83e0bad5f436d59aa4f689b80f485e73b494f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 967, "license_type": "no_license", "max_line_length": 64, "num_lines": 46, "path": "/stevens_pier_project/mapping/savedData/UnnarResults_Dec_10/round.py", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport pickle\nimport pylab\nimport random\nimport os.path\nimport csv\nfrom pylab import figure, show, rand\nfrom matplotlib.patches import Ellipse, Circle\nfrom scipy.stats import chi2\n\ndef num(s):\n try:\n return int(s)\n except ValueError:\n \tpass\n try:\n return round(float(s),2)\n except ValueError:\n \treturn s\n # return int(s)\n\n\ncsv_out2 = open('pillar-average.csv', 'w')\ncsvf2 = csv.writer(csv_out2)\n\nfor i in range(0,6):\n\n\t# print(\"hmmmm\")\n\t# file_path = os.path.join(parent_directory, 'savedData/')\n\tcsv_out = open('pillar0'+str(i)+'-new-rounded.csv', 'w')\n\tcsvf = csv.writer(csv_out)\n\n\n\twith open('pillar0'+str(i)+'-new.csv', 'r') as csvfile:\n\t\tspamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n\t\tfor row in spamreader:\n\t\t\ta=[]\n\t\t\tfor col in row:\n\t\t\t\ta.append(num(col))\n\t\t\t\t# print(num(col))\n\t\t\tcsvf.writerow(a)\n\ta[0] = i/10.0\n\tprint a\n\tcsvf2.writerow(a)" }, { "alpha_fraction": 0.7237896919250488, "alphanum_fraction": 0.728759229183197, "avg_line_length": 35.27325439453125, "blob_id": "f647921e9f959bf6b7fe619f17cfd4808a3314a2", "content_id": "744d0ecaf6e75fa9fa922b714b3b0c1b174e52ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6239, "license_type": "no_license", "max_line_length": 158, "num_lines": 172, "path": "/stevens_pier_project/mapClasses/include/mapClasses/map/map.h", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "/**\n *\tMaintaines a 2d feature map from a 3d sonar scanþ\n *\tEach feature is assumed to be infinetly high\n * @author Unnar Axelsson\n */\n\n#ifndef SONARMAP_H\n#define SONARMAP_H\n\n#include <ros/ros.h>\n#include <pcl_ros/point_cloud.h>\n#include <pcl/point_cloud.h>\n\n#include <pcl/point_types.h>\n#include <pcl/kdtree/kdtree_flann.h>\n#include <tf/transform_listener.h>\n#include <tf_conversions/tf_eigen.h>\n#include <std_msgs/Bool.h>\n\ntypedef pcl::PointXYZI PointT;\ntypedef pcl::PointXYZINormal PointTNormal;\ntypedef pcl::PointCloud<PointT> PointCloudT;\ntypedef pcl::PointCloud<PointTNormal> PointCloudTNormal;\n\nusing namespace std;\n\nstruct mapParams{\n\tstring outputTransform; // Output topic for sonar update\n\tstring outputMapWalls; // Output topic for map of walls\n\tstring outputMapWallsAligned; // Output topic for map of walls\n};\n\n\nclass sonarMap{\n\nros::NodeHandle* nh_;\nmapParams par_;\n\nPointCloudT::Ptr map;\nPointCloudTNormal::Ptr mapWalls;\nstd::vector<PointCloudT::Ptr> pillarPoints;\nstd::vector<PointCloudT::Ptr> featurePoints;\nstd::vector<int> featureCount;\nPointCloudT::Ptr aligned_tmp;\nPointCloudT::Ptr features; //initialize() -> creation , newWallScan() -> clear , newPillarScan() -> clear, dataAssociation(), icpAlign()\nPointCloudT::Ptr pillars;\ntf::TransformListener listener_;\nros::Publisher pubTransform_;\nros::Publisher pubMapWalls_;\nros::Publisher pubMapWallsAligned_;\n\n\npublic:\n\n\tsonarMap(){}\n\n\tsonarMap(ros::NodeHandle *nh){\n\t\tsonarMap::initialize(nh);\n\t}\n\n\tvoid initialize(ros::NodeHandle *nh);\n\n\t/**\n\t * Takes in a new sonar scan, processes the data and calles \n\t * the appropriate functions to update the map.\n\t * @param scan PointCloud where each point represents a possible feature. \n\t */\n\tbool newScan(PointCloudT::Ptr scan,std::vector<int> & TdataAss,std::vector<int> & erasedId, const int type);\n\n\t// Returns the map in the form of pointCLoud.\n\tPointCloudT::Ptr returnMap(void){ return map; }\n\t// Returns the last scan aligned to the current map. \n\tPointCloudT::Ptr returnAligned(void){ return aligned_tmp; }\n\t// Returen the buffer that has the highest number of scans.\n\t// PointCloudT::Ptr returnCurrentBuffer(void){ return features; }\n\n\tPointCloudT::Ptr returnPillars(void){ return pillars; }\n\n\tstd::vector<PointCloudT::Ptr> returnPillarPoints(void);\n\tstd::vector<PointCloudT::Ptr> returnFeaturePoints(void);\n\t\nprivate:\n\n\tbool newPillarScan(PointCloudT::Ptr beam,std::vector<int> & TdataAss, std::vector<int> & erasedId);\n\n\tbool newWallScan(PointCloudT::Ptr beam);\n\n\n\t/**\n\t * \\brief \t\t\tFunction is called to initialize the map or check if it has been initialized.\n\t * \\detail \t\t\tassume the PointCloud have been projected to the odom frame\n\t *\n\t * \\param[in] \t\tscan \t\t\tPointCloud where each point is a possible feature\n\t * \\param[in\\out] \tmapPtr \t\tPointCloud where each point is point in the map\n\t * \\param[in] \t\tsize\t \t\tminimal map size intended \n\t * \\param[in] \t\tsize\t \t\tminimal size between two different points in the map \n\t * \n\t * \\return \tReturns true if map has been initialized, false otherwise.\n\t */\n\tbool intializeMapPillar(const PointCloudT::Ptr scan, PointCloudT::Ptr mapPtr, const int size, int sqr_distance);\n\n\n\tbool intializeMapWall(const PointCloudTNormal::Ptr scan, PointCloudTNormal::Ptr mapPtr, const int size);\n\n\n\t/**\n\t * \\brief \t\t\tFunction is called to filter the input data according to the threshold\n\t * \\detail \t\t\tonly the data with an intensity higher than the threshold are return \n\t *\n\t * \\param[in] \t\t input \t\t\t\t\t\tPointCloud : input data \n\t * \\param[out] \t filtered_output \t\tPointCloud : ouput data \n\t * \\param[in] \t lim_low\t \t\t\t\tthreshold : low limite \n\t */\n\tvoid passThroughFilter(const PointCloudT::Ptr input, PointCloudT::Ptr filtered_output, float lim_low);\n\t\n\t/**\n\t * Uses icp from pcl to aligne pointcloud to the map we have.\n\t * @param map 2d feature map to be aligned to. \n\t * @param scan Feature scan to align.\n\t * @param aligned The aligned feature scan\n\t * @param trans 4by4 transformation matrix describing the transformation.\n\t * @return Return true if icp was successful, false otherwise.\n\t */\n\tbool icpAlign(const PointCloudT::Ptr map, const PointCloudT::Ptr scan, const pcl::KdTreeFLANN<PointT> &kdtree, PointCloudT &aligned, Eigen::Affine3f &trans);\n\t\n\t/**\n\t * Takes in feature scan that is aligned to the map and find associations between them and updates the map accordingly.\n\t * @param aligned Aligned feature scan in the form of a pointcloud.\n\t */\n\tvoid dataAssociation(PointCloudT::Ptr aligned, std::vector<int> & TdataAss, std::vector<int> & erasedId );\n\n\t// Functions that change the intensity of pointclouds based on intensity criterias.\n\tvoid changeIntensity(float limLow, float limHigh, float change);\n\tvoid changeIntensity(float limLow, float limHigh, float change, std::vector<int> idx);\n\t\n\t// Removes obsolete map features, to be COMBINED WITH PASSTHROUGH FILTER.\n\tvoid pruneMap(std::vector<int> & erasedId);\n\n\t/**\n\t * Takes in 3d rotation as euler angles and minimizes the angles.\n\t * @param euler vector with 3d rotation in form of euler angles.\n\t */\n\tvoid minimizeEuler(Eigen::Vector3f &euler);\n\t\n\t/**\n\t * Finds the transformation from a feature scan with two points to the map. \n\t * @param map 2d feature map to be aligned to.\n\t * @param scan Feature scan to align, only 2 points.\n\t * @param kdtree KD-tree created from the map.\n\t * @param trans The resulting transformation\n\t * @return Returns true if alignment was successful, false otherwise.\n\t */\n\tbool matchTwoPoints(PointCloudT::Ptr map, PointCloudT::Ptr scan, const pcl::KdTreeFLANN<PointT> &kdtree, Eigen::Affine3f &trans);\n\n\t/**\n\t * Takes in two pairs of points and finds the transformation between them.\n\t * @param a1 Point 1 from the map, connected to b1.\n\t * @param a2 Point 2 from the map, connected to b2.\n\t * @param b1 Point 1 from the scan, connected to a1.\n\t * @param b2 Point 2 from the scan, connected to a2.\n\t * @return transformation between the two pairs of points.\n\t */\n\tEigen::Affine3f transformationBetweenPairsOfPoints(PointT &a1, PointT &a2, PointT &b1, PointT &b2);\n\n\tvoid publishSonarTransform(Eigen::Affine3f translation);\n\tbool publishWallTransform(Eigen::Affine3f translation);\n\n\tvoid setParameters();\n\n};\n\n#endif" }, { "alpha_fraction": 0.5930635333061218, "alphanum_fraction": 0.6042047142982483, "avg_line_length": 30.33530616760254, "blob_id": "bc2eeaf4916d38777bce2e8265a9c2dd8826888d", "content_id": "c22efd878cb5bb4eb4c9efb6eb1dacd01c65e15c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 15887, "license_type": "no_license", "max_line_length": 205, "num_lines": 507, "path": "/stevens_pier_project/mapClasses/src/scanBuffer.cpp", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "#include \"mapClasses/scanBuffer/scanBuffer.h\"\n#include <pcl/kdtree/kdtree_flann.h>\n#include <pcl/filters/extract_indices.h>\n#include <pcl/filters/project_inliers.h>\n// #include <pcl/sample_consensus/ransac.h>\n#include <pcl/segmentation/sac_segmentation.h>\n#include <pcl/segmentation/extract_clusters.h>\n#include <pcl/sample_consensus/method_types.h>\n#include <pcl/sample_consensus/model_types.h>\n#include <pcl/common/common.h>\n#include <visualization_msgs/Marker.h>\n// #include <pcl/common/transforms.h>\n#include <pcl_ros/transforms.h>\n#include <algorithm>\n#include <mapClasses/utils/utils.h>\n#include <mapclasses/buff.h>\n// #include <ros/package.h>\n\nusing namespace utils::params;\n\nscanBuffer::scanBuffer(){}\n\nvoid scanBuffer::initialize(ros::NodeHandle *nh){\n\tnh_ = nh;\n\t nbrBeamReceived = 0;\n\tloadParams();\n\n\n\tisFirstPose = true ;\n\n\t// Publish to outputLines\n\t// In this case the topic is : /mapBuilder/lines_update\n\t// a message of type visualization_msgs::Marker\n\tpubLines_ = nh_->advertise<visualization_msgs::Marker>(par_.outputLines, 1);\n\n\t// Publish to outputBuffUpdate\n\t// In this case the topic is : /mapBuilder/buff_update\n\t// a message of type PointCloudT\n\tpubBuffUpdate_ = nh_->advertise<PointCloudT>(par_.outputBuffUpdate, 1);\n\n\t// Publish to outputBuff\n\t// In this case the topic is : /mapBuilder/buff\n\t// a message of type mapclasses::buff\n\tpubBuff_ = nh_->advertise<mapclasses::buff>(par_.outputBuff, 1);\n\n\n\tfor(size_t i = 0; i < par_.bufferSize; ++i){\n\t\tPointCloudT::Ptr cloud_tmp (new PointCloudT());\n\t\tcloud_tmp->header.frame_id=\"buffer\";\n\t\tclouds_.push_back(cloud_tmp);\n\t\tcloudsCount_.push_back(-1);\n\t}\n\tcountLastProcessed = 0;\n\n\t// ICP \n\tpoly_trainer.set_kernel(poly_kernel(0.6, 1, 2));\n\ttrainer.set_trainer(poly_trainer);\n\tstring pathPackage = ros::package::getPath(\"mapclasses\");\n\tdeserialize((pathPackage + \"/include/svm/df.dat\").c_str()) >> df;\n\tdeserialize((pathPackage + \"/include/svm/norm.dat\").c_str()) >> normalizer;\n\tdeserialize((pathPackage + \"/include/svm/df_1_3.dat\").c_str()) >> df_1_3;\n\n}\n\n\nbool scanBuffer::newScan(buff bu, PointCloudT::Ptr out, int &type){\n\n\tfloat dist, min;\n\tint idx;\n\tbuff tmpBuff;\n\n\t\n\t \tif (nbrBeamReceived >= 360 ){\n \t\t\tnbrBeamReceived = 0 ;\n \t\t}\n \t\telse {\n \t\t\tnbrBeamReceived ++ ;\n \t\t}\n \t\n \t//std::cout << \" new scan : \" << nbrBeamReceived << \" : scanBuffer\" << std::endl ;\n\n\t// find matching, search the smallest distance\n \tfor(size_t i = 0; i < bu.buffer->points.size(); ++i){\n\n \t\tidx = -1;\n \t\tmin = 1.5;\n \t\tfor(size_t j = 0; j < buff_.size(); ++j){\n \t\t\tdist = utils::Distance(buff_[j].buffer->points.back(), bu.buffer->points[i]);\n \t\t\tif( dist < min){\n \t\t\t\tidx = j;\n \t\t\t\tmin = dist;\n \t\t\t}\n \t\t}\n\n \t\t// if we find a match between new beam and the oldest :\n \t\t// we add the hits within the similar buffer\n \t\t// sinon we ceate an other buffer \n \t\tif(idx >= 0){ \n\t\t\t//std::cout << \"for i = \" << i << \" find matching with j= \"<< idx << std::endl;\n \t\t\tbuff_[idx].buffer->points.push_back(bu.buffer->points[i]);\n\t\t\tbuff_[idx].dist.push_back(bu.dist[i]);\n\t\t\tbuff_[idx].countLast = 0;\n \t\t}\n \t\telse{\n \t\t\t// No matching buffer has been found\n \t\t\tbuff bufftmp;\n \t\t\tbufftmp.buffer->points.push_back(bu.buffer->points[i]);\n \t\t\tbufftmp.buffer->header.frame_id = \"buffer\";\n \t\t\tbufftmp.dist.push_back(bu.dist[i]);\n\t\t\t// at the end of buff_ : the no matching features\n\t\t\t//std::cout << \"for i = \" << i << \" don't find matching\" <<std::endl;\n\t\t\t//std::cout << \" buff_ size, before pushback = \"<< buff_.size() << std::endl;\n\t\t\tbuff_.push_back(bufftmp);\n\t\t\t//std::cout << \" buff_ size, after pushback = \" << buff_.size() << std::endl;\n \t\t}\n\t}\n\tfor(size_t j = 0; j < buff_.size(); ++j){\n\t\tbuff_[j].countStart++;\n\t\tbuff_[j].countLast++;\n\t}\n\t//std::cout << \" buff_ size = \" << buff_.size() << \" : scanBuffer\" << std::endl ;\n\t\n\n\t// need to check what parts of the buffer we need to remove\n\tfor(size_t i = buff_.size(); i-- > 0;){\n\t std::cout << buff_[i].buffer->points.size() << std::endl;\n\t // Too old data\n\t\tif(buff_[i].countStart >= 120){\n\t\t\tbuff_.erase(buff_.begin() + i);\n\t\t\t //std::cout << \"erasing \" << std::endl;\n\t\t} else if (buff_[i].countLast > 7 && buff_[i].buffer->points.size() < 3){\n\t\t\tbuff_.erase(buff_.begin() + i);\n\t\t\t //std::cout << \"erasing\" << std::endl;\n\t\t}\n\t}\n\n\t// for each cluster on buff_ \n\t// if the cluster have been detected more than 5times and it contain more thant 3 hits \n\t// the cluster is added to the tmpbuffer\n\tfor(size_t j = 0; j < buff_.size(); ++j){\n\t\tif(buff_[j].countLast > 5 && buff_[j].buffer->points.size() >= 3){ // Probably not going to add a new point to it\n\t\t\t// buff_[j].buffer->header.frame_id = \"odom\";\n\t\t\t// pubBuff_.publish(buff_[j].buffer);\n\t\t\t// processBuffer(buff_[j], out, type);\n\t\t\t// buff_[j].countStart = 150;\n\t\t\t// std::cout << \"hmmmm\" << std::endl;\n\t\t\t*tmpBuff.buffer += *buff_[j].buffer;\n\t\t\ttmpBuff.dist.insert(tmpBuff.dist.end(), buff_[j].dist.begin(), buff_[j].dist.end());\n\t\t}\n\t}\n\n\tbool isFull = false;\n\tif(tmpBuff.buffer->points.size() > 0 && countLastProcessed > 8){\n\t\t//std::cout << \" new scan : taille du beffer tmpBuff : \" << tmpBuff.buffer->points.size()<< \" and \" << tmpBuff.buffer->header.frame_id << \" : scanBuffer.cpp \" << std::endl ; \n\t\tprocessBuffer(tmpBuff, out, type);\n\t\tif(type > 0) isFull = true;\n\t\tout->header.frame_id = \"buffer\";\n\t\tpcl_ros::transformPointCloud(\"/odom\", *out, *out, listener_);\n\n\t\t// ros::Time now = ros::Time::now() ;\n\t\t// tf::StampedTransform transform;\n\t\t// if (now > ros::Time(1439997923.0) && now < ros::Time(1439997930.0)){\n\t\t// \tlistener_.lookupTransform(\"/odom\", \"/body\",ros::Time(0), transform);\n\t\t \t\n\t\t// \ttf::Quaternion q = transform.getRotation();\n\t\t// \ttf::Vector3 v = transform.getOrigin();\n\t\t \t\n\t\t// \tdouble x = q.getX() ;\n\t // \t\tdouble y = q.getY() ;\n\t // \t\tdouble z = q.getZ() ;\n\t // \t\tdouble w = q.getW() ;\n\t // \t\tdouble toDegree = 180 / M_PI;\n\t // \t\tdouble yaw, pitch, roll;\n\t // \ttransform.getBasis().getRPY(roll, pitch, yaw);\n\n\t\t// \tofstream myfile;\n\t\t// \tstring file = \"/home/clarisse/Documents/srcTestOctave/lookupTransform.txt\";\n\n\t\t// \tmyfile.open(file.c_str(), std::ofstream::out | std::ofstream::app);\n\t\t// \tmyfile << \"\\n\" << \"body/odom \" << v.getX() << \" \" << v.getY() << \" \" << v.getZ() << \"\\n\";\n\t\t// \tmyfile << x << \" \" << y << \" \" << z << \" \" << w << \"\\n\";;\n\t\t// \tmyfile << \"deg : \"<< roll << \" \" << pitch<< \" \" << yaw << \"\\n\" ;\n\t\t// \tmyfile << \"rad : \"<< roll * toDegree << \" \" << pitch * toDegree << \" \" << yaw * toDegree << \"\\n\" ;\n\t\t// \tmyfile << ros::Time(0) << \"\\n\" ;\n\t\t\t\n\t\t// \tmyfile.close();\n\t\t// }\n\n\t\tcountLastProcessed = 0;\n\t\ttmpBuff.buffer->header.frame_id=\"buffer\";\n\t\tpubBuff_.publish(tmpBuff.buffer);\n\t} else {\n\t\tcountLastProcessed++;\n\t}\n\treturn isFull;\n\n}\n\n\nvoid scanBuffer::processBuffer(buff bu, PointCloudT::Ptr out, int &type){\n\n\t// Check if there are any points in the input cloud\n\tif(bu.buffer->points.size() < 5){\n\t\tstd::cout << \"Not enough points in buffer.... Nothing to do.\" << std::endl;\n\t\treturn;\n\t}\n\tstd::vector<PointCloudT::Ptr> pvec;\n\tscanBuffer::findWalls(bu, out, pvec);\n\tif(out->points.size() > 0){\n\t\tstd::cout << \"Found wall\" << std::endl;\n\t\ttype = 1;\n\t} else {\n\t\tscanBuffer::findPillars(bu, out);\n\t\tif(out->points.size() > 0){\n\t\t\tstd::cout << \"Found pillar\" << std::endl;\n\t\t\ttype = 2;\n\t\t} else {\n\t\t\tstd::cout << \"Found nothing\" << std::endl;\n\t\t\ttype = 0;\n\t\t}\n\t}\n\n}\n\n\nvoid scanBuffer::findPillars(const buff bu, PointCloudT::Ptr out){\n\t// Cluster the data\n\tpcl::search::KdTree<PointT>::Ptr tree (new pcl::search::KdTree<PointT>);\n\ttree->setInputCloud (bu.buffer);\n\n\tstd::vector<pcl::PointIndices> cluster_indices;\n\tpcl::EuclideanClusterExtraction<PointT> ec;\n\tec.setClusterTolerance(par_.ecTolerance); // 2cm\n\tec.setMinClusterSize(par_.ecMinClusterSize);\n\tec.setMaxClusterSize(1000);\n\tec.setSearchMethod(tree);\n\tec.setInputCloud(bu.buffer);\n\tec.extract(cluster_indices);\n\n\tPointCloudT::Ptr tmpcloud (new PointCloudT());\n\tstd::vector<float> tmpdists;\n\n\n\tstd::vector<PointT> vPoint;\n\tstd::vector<double> vProb;\n\tint j = 0, idx;\n\tPointT tmp_max;\n\tstd::vector<pcl::PointIndices>::const_iterator it;\n\tint i = 0;\n\tint cProb = 0;\n\tfor(it = cluster_indices.begin(); it != cluster_indices.end(); ++it)\n\t{\n\t\ttmpcloud->clear();\n\t\tidx = 0;\n\t\ttmp_max.intensity = 0.0;\n\n\n\t\tsample_type_short intensities;\n\t\tstd::vector<double> dists(20);\n\n\t\tfor(int k = 0; k < intensities.size(); ++k){\n\t\t\tintensities(k) = 0;\n\t\t\tdists[k] = 0;\n\t\t}\n\t\tif(it->indices.size() >= intensities.size()){\n\t\t\tfloat rem = (it->indices.size()-(intensities.size()))/2.0;\n\t\t\tfor(long k = 0; k < intensities.size(); ++k){\n\t\t\t\tintensities(k) = bu.buffer->points[it->indices[k]+std::ceil(rem)].intensity;\n\t\t\t\tdists[k] = bu.dist[it->indices[k]+std::ceil(rem)];\n\t\t\t}\n\t\t} else {\n\t\t\tfloat pad = (intensities.size()-it->indices.size())/2.0;\n\t\t\tfor(long k = 0; k < it->indices.size(); ++k){\n\t\t\t\tintensities(k+std::ceil(pad)) = bu.buffer->points[it->indices[k]].intensity;\n\t\t\t\tdists[k+std::ceil(pad)] = bu.dist[it->indices[k]];\n\t\t\t}\n\t\t}\n\n\t\tdouble result = *std::max_element(dists.begin(), dists.end());\n\n\t\tintensities = normalizer(intensities);\n\n\t\t// std::vector<double> dists(bu.dist.size());\n\t\tsample_type st;\n\t\tfor(int k = 0; k < dists.size(); ++k)\n\t\t{\n\t\t\tdists[k] = dists[k] / result;\n\t\t}\n\n\t\tfor(int k = 0; k < 20; ++k)\n\t\t{\n\t\t\tst(k) = intensities(k);\n\t\t\tst(k+20) = dists[k];\n\t\t}\n\n// ***** SVM !!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ***** //\n\t\t// std::cout << \"SVM: \" << df(st) << std::endl;\n\t\tif(df(st) == 2) continue;\n\t\tif(df(st) == 3){\n\t\t\tcProb++;\n\t\t}\n\t\t// std::cout << \"prob: \" << df_1_3(st) << std::endl;\n\t\t// if(df_1_3(st) < 0.3) continue;\n\n\n\t\tvProb.push_back(df_1_3(st));\n\t\t// std::cout << \"st: \" << std::endl;\n\t\t// for(int k = 0; k < st.size()/2.0; ++k)\n\t\t// {\n\t\t// \tstd::cout << st(k) << \", \";\n\t\t// }\n\t\t// std::cout << \" \" << std::endl;\n\t\t// for(int k = 0; k < st.size()/2.0; ++k)\n\t\t// {\n\t\t// \tstd::cout << st(k+20) << \", \" ;\n\t\t// }\n\t\t// std::cout << \" \" << std::endl;\n\n\n\n\t\t// std::cout << \"Intensity: \";\n\t\t// Find highest intensity point within each cluster.\n\t\tfor(std::vector<int>::const_iterator pit = it->indices.begin(); pit != it->indices.end(); ++pit){\n\t\t\t// std::cout << bu.buffer->points[*pit].intensity << \" \";\n\t\t\tif(bu.buffer->points[*pit].intensity > tmp_max.intensity){\n\t\t\t\ttmp_max = bu.buffer->points[*pit];\n\t\t\t}\n\t\t\ttmpcloud->points.push_back(bu.buffer->points[*pit]);\n\t\t\ttmpdists.push_back(bu.dist[*pit]);\n\t\t\t// out->points.push_back(cloud->points[*pit]);\n\t\t}\n\t\t//std::cout << tmpcloud->header.frame_id << \" tmpcloud \"<< tmpcloud->points.size() << \" : scanBuffer.cpp \" << std::endl ;\n\t\t// std::cout << \"\" << std::endl;\n\t\t// if(tmp_max.intensity > maxIntensity_){\n\t\tvPoint.push_back(tmp_max);\n\t\t// out->points.push_back(tmp_max);\n\t\ttmpcloud->header.frame_id=\"buffer\";\n\t\tmapclasses::buff buf;\n\t\t//std::cout << \" try to pucblish on buf ; \" << tmpcloud->points[1].x << \" ; \"<< tmpcloud->points[1].y <<\" : scanBuffer\" << std::endl;\n\t\tpcl::toROSMsg(*tmpcloud, buf.cloud);\n\t\tbuf.dists = tmpdists;\n\t\tpubBuff_.publish(buf);\n\t\tpubBuffUpdate_.publish(tmpcloud);\n\t\t// geometry_msgs::Point p;\n\t\t// p.x = cloud->points[it->indices.front()].x;\n\t\t// p.y = cloud->points[it->indices.front()].y;\n\t\t// p.z = cloud->points[it->indices.front()].z;\n\t\t// line_list.points.push_back(p);\n\t\t// p.x = cloud->points[it->indices.back()].x;\n\t\t// p.y = cloud->points[it->indices.back()].y;\n\t\t// p.z = cloud->points[it->indices.back()].z;\n\t\t// line_list.points.push_back(p);\n\t\t// }\n\t}\n\t// int cProb = 0;\n\tdouble lProb = 0.5;\n\tif(cProb >= 2){\n\t\tlProb = 0.3;\n\t}\n\t// std::cout << \"cprob: \" << cProb << \", lProp: \" << lProb << std::endl;\n\tfor(int i = 0; i < vProb.size(); ++i)\n\t{\n\n\t\tif(vProb[i] > lProb){\n\t\t\tout->points.push_back(vPoint[i]);\n\t\t}\n\t}\n\n\t// pubLines_.publish(line_list);\n}\n\nvoid scanBuffer::findWalls(const buff bu, PointCloudT::Ptr out, std::vector<PointCloudT::Ptr> &walls){\n\n\tPointCloudT::Ptr cloud_f (new PointCloudT ());\n\tPointCloudT::Ptr cloud_tmp (new PointCloudT ());\n\tstd::vector<float> tmpdist = bu.dist;\n\n\t// Create the segmentation object\n\tpcl::SACSegmentation<PointT> seg;\n\t// Optional\n\tseg.setOptimizeCoefficients (true);\n\t// Mandatory\n\tseg.setModelType (pcl::SACMODEL_LINE);\n\tseg.setMethodType (pcl::SAC_RANSAC);\n\tseg.setDistanceThreshold (0.5);\n\tseg.setMaxIterations (100);\n\n\tpcl::ProjectInliers<PointT> proj;\n\tproj.setModelType (pcl::SACMODEL_LINE);\n\n\t*cloud_tmp = *bu.buffer;\n\tint firstcount = 0;\n\tint i=0, nr_points = cloud_tmp->points.size ();\n\twhile (i < 5 && cloud_tmp->points.size() > 0 && cloud_tmp->points.size() > 0.05 * nr_points)\n\t{\n\t\tpcl::ModelCoefficients::Ptr coefficients (new pcl::ModelCoefficients);\n\t\tpcl::PointIndices::Ptr inliers (new pcl::PointIndices);\n\t\tPointCloudT::Ptr cloud_line (new PointCloudT ());\n\t\tseg.setInputCloud (cloud_tmp);\n\t\tseg.segment (*inliers, *coefficients);\n\n\t\tif(inliers->indices.size() < 15){\n\t\t\t++i;\n\t\t\tbreak;\n\t\t}\n\t\tstd::vector<int> new_indices;\n\t\tint count = 0;\n\t\tint inlSize = inliers->indices.size();\n\t\tfloat maxDist = inlSize/30.0 * 2.0;\n\t\tfor(size_t j = 0; j < inlSize-1; ++j){\n\t\t\tif(utils::Distance(cloud_tmp->points[inliers->indices[j]], cloud_tmp->points[inliers->indices[j+1]]) < maxDist){\n\t\t\t\tcount++;\n\t\t\t} else {\n\t\t\t\tif(count < 15){\n\t\t\t\t\tcount = 0;\n\t\t\t\t} else {\n\t\t\t\t\tfor(size_t k = j-count; k <= j; ++k){\n\t\t\t\t\t\tnew_indices.push_back(inliers->indices[k]);\n\t\t\t\t\t}\n\t\t\t\t\tcount = 0;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif(count >= 15){\n\t\t\tfor(size_t k = inlSize-1-count; k <= inlSize-1; ++k){\n\t\t\t\tnew_indices.push_back(inliers->indices[k]);\n\t\t\t}\n\t\t}\n\n\n\t\tif(new_indices.size() >= 15){\n\t\t\tinliers->indices = new_indices;\n\n\t\t mapclasses::buff buf;\n\n\t\t\t// Extract the planar inliers from the input cloud\n\t\t pcl::ExtractIndices<PointT> extract;\n\t\t extract.setInputCloud (cloud_tmp);\n\t\t extract.setIndices (inliers);\n\t\t extract.setNegative (false);\n\t\t extract.filter (*cloud_line);\n\n\t\t // Remove the planar inliers, extract the rest\n\t\t extract.setNegative (true);\n\t\t extract.filter (*cloud_f);\n\t\t cloud_tmp.swap (cloud_f);\n\n\t\t for(size_t i = inliers->indices.size(); i-- > 0;){\n \t\tbuf.dists.push_back(tmpdist[inliers->indices[i]]);\n \t\ttmpdist.erase(tmpdist.begin() + inliers->indices[i]);\n \t\t}\n\n\t\t\t// proj.setInputCloud (cloud_line);\n\t\t\t// proj.setModelCoefficients (coefficients);\n\t\t\t// proj.filter (*cloud_line);\n\n\t\t walls.push_back(cloud_line);\n\t\t cloud_line->header.frame_id=\"buffer\";\n\n\t\t\tpcl::toROSMsg(*cloud_line, buf.cloud);\n\t\t pubBuff_.publish(buf);\n\t\t pubBuffUpdate_.publish(cloud_line);\n\t\t *out += *cloud_line;\n\t\t}\n\t\ti++;\n\t}\n\n\tvisualization_msgs::Marker line_list;\n\tline_list.header.frame_id = \"buffer\";\n\tline_list.header.stamp = ros::Time::now();\n\tline_list.ns = \"lines\";\n\tline_list.type = visualization_msgs::Marker::LINE_LIST;\n\tline_list.scale.x = 0.2;\n\tline_list.color.b = 1.0;\n\tline_list.color.a = 1.0;\n\n\tgeometry_msgs::Point p;\n\tfor(size_t i = 0; i < walls.size(); ++i){\n\t\tp.x = walls[i]->points.front().x;\n\t\tp.y = walls[i]->points.front().y;\n\t\tp.z = walls[i]->points.front().z;\n\t\tline_list.points.push_back(p);\n\t\tp.x = walls[i]->points.back().x;\n\t\tp.y = walls[i]->points.back().y;\n\t\tp.z = walls[i]->points.back().z;\n\t\tline_list.points.push_back(p);\n\t}\n\tpubLines_.publish(line_list);\n}\n\n\n\nvoid scanBuffer::updateBuffer(Eigen::Affine3f trans){\n\tfor(size_t i = 0; i < clouds_.size(); ++i){\n\t\tpcl::transformPointCloud (*clouds_.at(i), *clouds_.at(i), trans);\n\t}\n}\n\nvoid scanBuffer::loadParams(void){\n\tpar_.scanSize = loadParam<int>(\"scanSize\", nh_);\n\tpar_.bufferSize = loadParam<int>(\"bufferSize\", nh_);\n\tpar_.ecTolerance = loadParam<double>(\"ecTolerance\", nh_);\n\tpar_.ecMinClusterSize = loadParam<double>(\"ecMinClusterSize\", nh_);\n\tpar_.outputLines = loadParam<string>(\"outputLines\",nh_);\n\tpar_.outputBuffUpdate = loadParam<string>(\"outputBuffUpdate\",nh_);\n\tpar_.outputBuff = loadParam<string>(\"outputBuff\",nh_);\n}\n" }, { "alpha_fraction": 0.512957751750946, "alphanum_fraction": 0.5322334170341492, "avg_line_length": 38.07389831542969, "blob_id": "f18ca39d9104d8cad3721e7337f747255f01063c", "content_id": "35a7187b34290ff11dc31a7297adc7744cd0859d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 24850, "license_type": "no_license", "max_line_length": 169, "num_lines": 636, "path": "/stevens_pier_project/mapping/include/map/map.cpp", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "#include \"map/map.h\"\n#include \"utils/utils.h\"\n\n#include <pcl/registration/icp.h>\n#include <pcl/filters/passthrough.h>\n#include <pcl/filters/extract_indices.h>\n#include <pcl/filters/filter_indices.h>\n// #include <pcl/common/transforms.h>\n#include <eigen_conversions/eigen_msg.h>\n#include <geometry_msgs/TransformStamped.h>\n#include <math.h>\n#include <vector>\n\nnamespace mapping{\n\nbool sonarMap::newScan(PointCloudT::Ptr beam){\n \n // PointCloudT::Ptr scan (new PointCloudT());\n features->clear();\n features->header.frame_id = \"odom\";\n if(!sb.newScan(beam, features)){\n // Can return as there is no full buffer to process.\n return false;\n }\n // return true;\n\n // Check to see if the map has been properly initialized.\n if(!sonarMap::intializeMap(features)) return false;\n \n // Passthrough Filter, only align to map features with intensity higher than 1.8\n PointCloudT::Ptr filtered_map (new PointCloudT());\n sonarMap::passThroughFilter(map, filtered_map, 2.0);\n\n // create a kd-tree from the filtered map\n std::vector<int> pidx;\n std::vector<float> psd;\n PointCloudT::Ptr filtered_scan (new PointCloudT());\n pcl::KdTreeFLANN<PointT> kdtree;\n kdtree.setInputCloud(filtered_map);\n\n\n // Filter out points far away from the map and only use those who are close for aligning\n std::vector<int> map_int(map->points.size());\n for(size_t i = 0; i < features->points.size(); ++i){\n if(kdtree.radiusSearch(features->points.at(i), 5.0, pidx, psd) > 0){\n // if(features->points.size() <= 2){\n if(pidx.size() > 0){\n filtered_scan->points.push_back(features->points.at(i));\n }\n // } else {\n // if(pidx.size() > 0 && (map_int[pidx[0]] == 0 || psd[0] < map_int[pidx[0]] == 0)){\n // filtered_scan->points.push_back(features->points.at(i));\n // map_int[pidx[0]] = psd[0];\n // }\n // }\n }\n }\n\n\n // align scan to map\n PointCloudT aligned;\n if(filtered_scan->points.size() > 2){\n Eigen::Affine3f transform;\n if(icpAlign(filtered_map, filtered_scan, kdtree, aligned, transform)){\n // ROS_ERROR(\"THREE OR MORE POINTS\");\n // Data association\n pcl::transformPointCloud (*features, *aligned_tmp, transform);\n sonarMap::dataAssociation(aligned_tmp);\n // for(int i = 0; i < aligned_tmp->points.size(); ++i){\n // std::cout << \"aligned i: \" << aligned_tmp->points[i].x << \", \" << aligned_tmp->points[i].y << std::endl;\n // }\n // for(int i = 0; i < features->points.size(); ++i){\n // std::cout << \"features i: \" << features->points[i].x << \", \" << features->points[i].y << std::endl;\n // }\n // std::cout << transform.matrix() << std::endl;\n sonarMap::publishSonarTransform(transform);\n return true;\n }else{\n }\n } else if(filtered_scan->points.size() > 1){\n Eigen::Affine3f transform;\n if(sonarMap::matchTwoPoints(filtered_map, filtered_scan, kdtree, transform)){\n // ROS_ERROR(\"Matched two points\");\n pcl::transformPointCloud (*features, *aligned_tmp, transform);\n if(std::abs(transform.matrix()(0,3)) > 1.5 || std::abs(transform.matrix()(1,3)) > 1.5){\n // std::cout << \"hahahahahahha\" << std::endl;\n return false;\n }\n ROS_ERROR(\"TWO POINTS\");\n sonarMap::dataAssociation(aligned_tmp);\n sonarMap::publishSonarTransform(transform);\n return true;\n }\n } else {\n // ROS_ERROR(\"LESS THAN TWO POINTS\");\n }\n aligned_tmp->clear();\n return false;\n}\n\n\nvoid sonarMap::dataAssociation(PointCloudT::Ptr aligned){\n // std::cout << \"associate\" << std::endl;\n pcl::KdTreeFLANN<PointT> kdtree5;\n kdtree5.setInputCloud(map);\n\n int K = 5;\n std::vector<int> pidx(K);\n std::vector<float> psq(K);\n\n std::vector<int> p1; // aligned index\n std::vector<int> p2; // map index\n std::vector<float> dist; // Indexes to points not associated\n std::vector<int> left; // Indexes to points not associated\n\n std::vector <int>::iterator it;\n int nPosition, pidxSize, idx;\n // map detected features to features on map. make sure mapping is unique.\n // std::cout << \"associated mapping\" << std::endl;\n for(size_t i = 0; i < aligned->points.size(); ++i){\n pidxSize = kdtree5.nearestKSearch(aligned->points.at(i), K, pidx, psq);\n if(pidxSize > 0)\n {\n // Find the closest definite feature within certain distance\n idx = 0;\n for (int j = 0; j < pidxSize; ++j)\n {\n if(pidx[j] < map->points.size() && map->points.at(pidx[j]).intensity >= 2.0 && psq[j] < 1.5){\n idx = j;\n break;\n }\n }\n\n if(psq[idx] < 1.5 && pidx[idx] < map->points.size()){\n // Close enough to a current feature: ASSOCIATED \n it = find(p2.begin(), p2.end(), pidx[idx]);\n if (it != p2.end()){\n nPosition = distance(p2.begin(), it);\n // left.push_back(p1.at(nPosition));\n if(psq[idx] < dist.at(nPosition)){\n p1.at(nPosition) = i;\n dist.at(nPosition) = psq[idx];\n }\n } else {\n p1.push_back(i);\n p2.push_back(pidx[idx]);\n dist.push_back(psq[idx]);\n }\n \n } else {\n // Probably a new feature\n left.push_back(i);\n }\n\n }\n }\n\n // std::cout << \"modify map\" << std::endl;\n // increase uncertain detected points. \n sonarMap::changeIntensity(0.0, 4.0, 0.2, p2);\n\n // Decrease uncertain points\n pcl::KdTreeFLANN<PointT> kdtree6;\n kdtree6.setInputCloud(aligned);\n std::vector<int> pointIdxRadiusSearch;\n std::vector<float> pointRadiusSquaredDistance;\n\n for(size_t i = 0; i < map->points.size(); ++i){\n try{\n if(map->points.at(i).intensity < 2.0){\n if ( kdtree6.radiusSearch (map->points.at(i), 5.0, pointIdxRadiusSearch, pointRadiusSquaredDistance) > 0 ){\n map->points.at(i).intensity -= 0.1;\n }\n }\n }\n catch(int e){\n // ROS_WARN(\"OUT OF BOUNDS\");\n // std::cout << \"i: \" << i << \"size: \" << map->points.size() << std::endl;\n // ROS_WARN(\"OUT OF BOUNDS\");\n }\n }\n // sonarMap::changeIntensity(-2.0, 2.0, -0.1);\n \n\n // Add new points\n // std::cout << \"new points\" << std::endl;\n for(size_t i = 0; i < left.size(); ++i){\n aligned->points.at(left.at(i)).intensity = 1.0;\n map->points.push_back(aligned->points.at(left.at(i)));\n PointCloudT::Ptr tmp (new PointCloudT());\n tmp->points.push_back(aligned->points.at(left.at(i)));\n pillarPoints.push_back(tmp);\n PointCloudT::Ptr tmp2 (new PointCloudT());\n // std::cout << \"add new feature\" << std::endl;\n tmp2->points.push_back(features->points.at(left.at(i)));\n featurePoints.push_back(tmp2);\n featureCount.push_back(1);\n // std::cout << \"finished adding new feature\" << std::endl;\n }\n\n\n \n // std::cout << \"heheh\" << std::endl;\n // std::cout << \"update featur\" << std::endl;\n // std::cout << \"aligned size: \" << aligned->points.size();\n // std::cout << \" feature size: \" << features->points.size();\n // std::cout << \" map size: \" << map->points.size() << std::endl;\n for(size_t i = 0; i < p2.size(); ++i){\n if(p2[i] >= map->points.size() && map->points[p2[i]].intensity < 2.0){\n continue;\n }\n featureCount[p2[i]]++;\n // std::cout << \"p1[i]: \" << p1[i] << \" p2[i]: \" << p2[i] << std::endl;\n pillarPoints[p2[i]]->points.push_back(aligned->points.at(p1[i]));\n featurePoints[p2[i]]->points.push_back(features->points.at(p1[i]));\n map->points.at(p2[i]).x = (map->points.at(p2[i]).x*(featureCount[p2[i]]-1)+aligned->points.at(p1[i]).x) / featureCount[p2[i]];\n map->points.at(p2[i]).y = (map->points.at(p2[i]).y*(featureCount[p2[i]]-1)+aligned->points.at(p1[i]).y) / featureCount[p2[i]];\n }\n // std::cout << \"finished updating featur\" << std::endl;\n\n // std::cout << \"pillars\" << std::endl;\n // add pillars estimated outlines\n tf::StampedTransform transform;\n try{\n listener_.lookupTransform(\"/odom\", \"/body\", ros::Time(0), transform);\n }\n catch (tf::TransformException ex){\n std::cout << \"exception: \" << ex.what() << std::endl;\n }\n float theta;\n // PointCloudT::Ptr tmp (new PointCloudT());\n PointT p;\n // tmp->width = p1.size();\n // tmp->height = 1;\n // tmp->points.resize (tmp->width * tmp->height);\n for(size_t i = 0; i < p1.size(); ++i){\n if(p2[i] < map->points.size() && map->points[p2[i]].intensity >= 2.0){\n theta = atan2(aligned->points.at(p1.at(i)).y - transform.getOrigin().y(), aligned->points.at(p1.at(i)).x - transform.getOrigin().x());\n p.x = aligned->points.at(p1.at(i)).x - cos(theta)*0.0;\n p.y = aligned->points.at(p1.at(i)).y - sin(theta)*0.0;\n pillars->points.push_back(p);\n }\n }\n // std::cout << \"associated\" << std::endl;\n \n // std::cout << \"prune\" << std::endl;\n // Prune Map\n sonarMap::pruneMap();\n}\n\nbool sonarMap::intializeMap(PointCloudT::Ptr scan){\n if(map->points.size() >= 3) return true;\n\n // use for loops to check new points, ok due to small size.\n bool toClose;\n for(size_t i = 0; i < scan->points.size(); ++i){\n toClose = false;\n for(size_t j = 0; j < map->points.size(); ++j){\n if(utils::Distance(map->points.at(j), scan->points.at(i)) < 1.0){\n toClose = true;\n }\n }\n // if(!toClose && map->points.size() < 4){ // Add new point to map\n if(!toClose){ // Add new point to map\n map->points.push_back(scan->points.at(i));\n map->points.back().intensity = 4.0;\n PointCloudT::Ptr tmp (new PointCloudT());\n tmp->points.push_back(scan->points.at(i));\n pillarPoints.push_back(tmp);\n PointCloudT::Ptr tmp2 (new PointCloudT());\n *tmp2 += *tmp; \n featurePoints.push_back(tmp2);\n featureCount.push_back(1);\n }\n }\n map->header.frame_id = scan->header.frame_id;\n // Check to see if map is initialized now\n if(map->points.size() >= 3){\n return true;\n } \n \n return false;\n}\n\nvoid sonarMap::passThroughFilter(const PointCloudT::Ptr scan, PointCloudT::Ptr filtered, const float lim_low){\n filtered->header = scan->header;\n for(size_t i = 0; i < scan->points.size(); ++i){\n if(scan->points.at(i).intensity >= lim_low)\n filtered->points.push_back(scan->points.at(i));\n }\n}\n\nbool sonarMap::icpAlign(const PointCloudT::Ptr map, const PointCloudT::Ptr scan, const pcl::KdTreeFLANN<PointT> &kdtree, PointCloudT &aligned, Eigen::Affine3f &trans){\n pcl::IterativeClosestPoint<PointT, PointT> icp;\n icp.setInputSource(scan);\n icp.setInputTarget(map);\n icp.setMaxCorrespondenceDistance (10.0);\n // icp.setRANSACOutlierRejectionThreshold (1.0);\n // icp.setMaximumIterations(500);\n // icp.setTransformationEpsilon (1e-8);\n icp.setEuclideanFitnessEpsilon (1.0);\n icp.align(aligned);\n\n if(icp.getFitnessScore() < 1.0 && icp.hasConverged()){\n trans = icp.getFinalTransformation();\n Eigen::Vector3f ea= trans.matrix().block<3,3>(0,0).eulerAngles(0, 1, 2);\n sonarMap::minimizeEuler(ea);\n if(std::abs(ea[2]) > M_PI/9){\n std::cout << \" ROTATION TO BIG\" << std::endl;\n return false;\n }\n \n // PointCloudT::Ptr tmp_aligned(&aligned);\n // double free or corruption error\n // Stupid, use for loop to copy to PointCloud::Ptr\n PointCloudT::Ptr tmp_aligned(new PointCloudT());\n for(size_t i = 0; i < aligned.points.size(); ++i){\n tmp_aligned->points.push_back(aligned.points[i]);\n }\n \n pcl::KdTreeFLANN<PointT> kdtree3;\n kdtree3.setInputCloud(tmp_aligned);\n\n // std::vector<int> pointIdxRadiusSearch;\n // std::vector<float> pointRadiusSquaredDistance;\n\n // std::vector<int> featureIdx(features->points.size());\n // int count = 0;\n // for(size_t i = 0; i < map->points.size(); ++i){\n // if ( kdtree3.radiusSearch (map->points.at(i), 3.0, pointIdxRadiusSearch, pointRadiusSquaredDistance) > 0 ){\n // if(featureIdx[pointIdxRadiusSearch[0]] == 0){\n // featureIdx[pointIdxRadiusSearch[0]] = 1;\n // count++;\n // }\n // }\n // }\n\n\n std::vector<int> pointIdxRadiusSearch;\n std::vector<float> pointRadiusSquaredDistance;\n std::vector<int> pIdxRS;\n std::vector<float> pRSD;\n\n std::vector<int> featureIdx(features->points.size());\n int count = 0;\n int featureNeighbours = 0;\n int mapNeighbours = 0;\n for(size_t i = 0; i < map->points.size(); ++i){\n // std::cout << \"i: \" << i << \" size: \" << map->points.size() << std::endl;\n featureNeighbours = kdtree3.radiusSearch (map->points.at(i), 2.0, pointIdxRadiusSearch, pointRadiusSquaredDistance);\n mapNeighbours = kdtree.radiusSearch (map->points.at(i), 4.0, pIdxRS, pRSD);\n if(featureNeighbours > 0){\n if(featureNeighbours-mapNeighbours > 0){\n featureIdx[pointIdxRadiusSearch[0]] = 1;\n count++;\n } \n else{\n for(size_t j = 0; j < featureNeighbours; ++j){\n // std::cout << \"hmmmmm: \" << j << \"max: \" << featureNeighbours << \"map\" << mapNeighbours << std::endl;\n featureIdx[pointIdxRadiusSearch[j]] = 1;\n }\n }\n }\n }\n\n\n if(count > 0){\n ROS_WARN(\"##############\");\n ROS_WARN(\"ALIGNING AGAIN\");\n ROS_WARN(\"##############\");\n // More than one detected feature has ben aligned to the same map feature. \n // Need to remove and align again. \n PointCloudT::Ptr new_aligned (new PointCloudT());\n for(size_t i = 0; i < tmp_aligned->points.size(); ++i){\n if(featureIdx[i] == 1){\n new_aligned->points.push_back(tmp_aligned->points[i]);\n }\n }\n\n icp.setInputSource(new_aligned);\n icp.setInputTarget(map);\n icp.setMaxCorrespondenceDistance (3.0);\n // icp.setRANSACOutlierRejectionThreshold (1.0);\n // icp.setMaximumIterations(500);\n // icp.setTransformationEpsilon (1e-8);\n icp.setEuclideanFitnessEpsilon (1.0);\n icp.align(aligned);\n if(icp.getFitnessScore() < 1.0 && icp.hasConverged()){\n Eigen::Affine3f trans2;\n trans2 = icp.getFinalTransformation();\n trans = trans2 * trans;\n }\n }\n else{\n // std::cout << \"NOT aligned again\" << std::endl;\n }\n\n\n }\n\n // check that the transformation is sensible(does not have really small or really big values\n // Just put the values small and big enough\n for(int i = 0; i < trans.matrix().rows(); ++i){\n for(int j = 0; j < trans.matrix().cols(); ++j){\n if(trans.matrix()(i,j) != 0){\n if( std::abs(trans.matrix()(i,j)) < 1*std::pow(10,-20) || 1*std::pow(10,10) < std::abs(trans.matrix()(i,j)) ){\n // if( 10000000 < std::abs(trans.matrix()(i,j)) ){\n ROS_WARN(\"Nonsensical transformation\");\n // std::cout << trans.matrix() << std::endl;\n return false;\n }\n }\n }\n }\n return true;\n}\n\n\nvoid sonarMap::changeIntensity(float limLow, float limHigh, float change){\n for(size_t i = 0; i < map->points.size(); ++i){\n if(map->points.at(i).intensity >= limLow && map->points.at(i).intensity <= limHigh){\n map->points.at(i).intensity += change;\n }\n }\n}\n\nvoid sonarMap::changeIntensity(float limLow, float limHigh, float change, std::vector<int> idx){\n for(size_t i = 0; i < idx.size(); ++i){\n if(idx.at(i) >= map->points.size()){\n ROS_WARN(\"WHAT THE FUCKING FUCK\");\n break;\n }\n if(map->points.at(idx.at(i)).intensity >= limLow && map->points.at(idx.at(i)).intensity <= limHigh){\n map->points.at(idx.at(i)).intensity += change;\n }\n }\n}\n\nvoid sonarMap::pruneMap(void){\n int tmpi = map->points.size();\n int feat = 0;\n for(int i = 0; i < map->points.size(); ++i){\n if(map->points.at(i).intensity >= 2.0){\n feat++;\n }\n }\n pcl::PointIndices pi;\n pcl::PassThrough<PointT> pass(true);\n pass.setInputCloud(map);\n pass.setFilterFieldName(\"intensity\");\n pass.setFilterLimits(0.0, 30);\n //pass.setFilterLimitsNegative (true);\n pass.filter(*map);\n pass.getRemovedIndices(pi);\n // for(size_t i = 0; i < pi.indices.size(); ++i){\n \n int feat2 = 0;\n for(int i = 0; i < map->points.size(); ++i){\n if(map->points.at(i).intensity >= 2.0){\n feat2++;\n }\n }\n\n if(feat != feat2){\n ROS_WARN(\"############################################\");\n ROS_WARN(\"############################################\");\n ROS_WARN(\"FUCKING PASSTHROUGH\");\n ROS_WARN(\"############################################\");\n ROS_WARN(\"############################################\");\n }\n\n\n if(pi.indices.size() > 0){\n ROS_WARN(\"############################################\");\n std::cout << \"indices: \";\n }\n for(size_t i = pi.indices.size(); i-- > 0;){\n featureCount.erase(featureCount.begin() + pi.indices[i]);\n pillarPoints.erase(pillarPoints.begin() + pi.indices[i]);\n featurePoints.erase(featurePoints.begin() + pi.indices[i]);\n std::cout << pi.indices[i] << \", \";\n // featureCount.at(i) = -1;\n }\n if(pi.indices.size() > 0){\n std::cout << \"\" << std::endl;\n ROS_WARN(\"############################################\");\n }\n // featureCount.erase(remove(featureCount.begin(), featureCount.end(), -1), featureCount.end());\n}\n\nvoid sonarMap::minimizeEuler(Eigen::Vector3f &euler){\n if(euler[0] > M_PI/4){\n euler[0] = euler[0] - M_PI;\n euler[1] = -euler[1];\n euler[2] = euler[2] - M_PI;\n } else if(euler[0] < -M_PI/4){\n euler[0] = euler[0] + M_PI;\n euler[1] = -euler[1];\n euler[2] = euler[2] + M_PI;\n }\n if(euler[1] > M_PI/4){\n euler[0] = -euler[0];\n euler[1] = euler[1] - M_PI;\n euler[2] = -euler[2];\n } else if(euler[1] < -M_PI/4){\n euler[0] = -euler[0];\n euler[1] = euler[1] + M_PI;\n euler[2] = -euler[2];\n }\n if(euler[2] > M_PI)\n euler[2] = euler[2]-2*M_PI;\n if(euler[2] < -M_PI)\n euler[2] = euler[2]+2*M_PI;\n}\n\n\nbool sonarMap::matchTwoPoints(PointCloudT::Ptr map, PointCloudT::Ptr scan, const pcl::KdTreeFLANN<PointT> &kdtree, Eigen::Affine3f &trans){\n // ROS_ERROR(\"MatcheTwoPoints\");\n std::vector<int> pi1(2);\n std::vector<float> pd1(2);\n std::vector<int> pi2(2);\n std::vector<float> pd2(2);\n if(kdtree.nearestKSearch(scan->points.at(0), 2, pi1, pd1) > 0){\n if(kdtree.nearestKSearch(scan->points.at(1), 2, pi2, pd2) > 0){\n // get the distance between the two points from the scan\n float dstScannedP = utils::Distance(scan->points.at(0), scan->points.at(1));\n // Check to see of the closest points is the same for both scanned points.\n if(pi1[0] != pi2[0]){\n // The points are not closest to the same points, Check the distance between the found points\n float dstFoundP = utils::Distance(map->points.at(pi1[0]), map->points.at(pi2[0]));\n if(std::abs(dstFoundP - dstScannedP) < std::min(0.1*dstFoundP, 1.0)){\n // close to being the same distance between the points.\n // Find transformation between the points\n trans = sonarMap::transformationBetweenPairsOfPoints(map->points.at(pi1[0]), map->points.at(pi2[0]), scan->points.at(0), scan->points.at(1));\n return true;\n }\n }else{\n // Need to figure out if any other combination can be used (pi1[0],pi2[1]), (pi1[1],pi2[0])\n float d12 = utils::Distance(map->points.at(pi1[0]), map->points.at(pi2[1]));\n float d21 = utils::Distance(map->points.at(pi1[1]), map->points.at(pi2[0]));\n float combined1 = 0;\n float combined2 = 0;\n if(std::abs(d12 - dstScannedP) < 0.5){\n // Combination 12 is possible\n // Find distance between both Matches \n float p10 = utils::Distance(scan->points.at(0), map->points.at(pi1[0]));\n float p21 = utils::Distance(scan->points.at(1), map->points.at(pi2[1]));\n combined1 = p10 + p21 + std::abs(p10 - p21);\n \n if(std::abs(d21 - dstScannedP) < 0.5){\n // Combination 12 is possible\n // Find distance between both Matches \n float p11 = utils::Distance(scan->points.at(0), map->points.at(pi1[1]));\n float p20 = utils::Distance(scan->points.at(1), map->points.at(pi2[0]));\n combined2 = p11 + p20 + std::abs(p11 - p20);\n if(combined2 < combined1){\n trans = sonarMap::transformationBetweenPairsOfPoints(map->points.at(pi1[1]), map->points.at(pi2[0]), scan->points.at(0), scan->points.at(1));\n return true;\n }\n }\n\n trans = sonarMap::transformationBetweenPairsOfPoints(map->points.at(pi1[0]), map->points.at(pi2[1]), scan->points.at(0), scan->points.at(1));\n return true;\n }\n\n }\n }\n }\n return false;\n}\n\nEigen::Affine3f sonarMap::transformationBetweenPairsOfPoints(PointT &a1, PointT &a2, PointT &b1, PointT &b2){\n // get rotation and translation on xy plane\n \n // Find center of target\n float target_x = (a1.x + a2.x)/2;\n float target_y = (a1.y + a2.y)/2;\n\n // Find center of start\n float start_x = (b1.x + b2.x)/2;\n float start_y = (b1.y + b2.y)/2;\n \n // find ideal angle\n // float target_angle = std::atan2(a2.y-a1.y, a2.x-a1.x);\n // float start_angle = std::atan2(b2.y-b1.y, b2.x-b1.x);\n // float angleDiff = target_angle - start_angle;\n\n // Eigen::Affine3f transformT1 = Eigen::Affine3f::Identity();\n // transformT1.translation() << -start_x, -start_y, 0; \n // Eigen::Affine3f transformR = Eigen::Affine3f::Identity();\n // transformR.rotate(Eigen::AngleAxisf (angleDiff, Eigen::Vector3f::UnitZ()));\n Eigen::Affine3f transform = Eigen::Affine3f::Identity();\n transform.translation() << target_x-start_x, target_y-start_y, 0; \n \n return transform;\n // return transformT2*transformR*transformT1;\n}\n\nvoid sonarMap::publishSonarTransform(Eigen::Affine3f translation){\n \n tf::StampedTransform stampedTr;\n Eigen::Affine3d trans;\n trans.matrix() = translation.matrix().cast<double>();\n tf::transformEigenToTF(trans, stampedTr);\n\n geometry_msgs::Transform m;\n tf::transformEigenToMsg (trans, m);\n pubTransform_->publish(m);\n\n // tf::TransformBroadcaster br;\n // br.sendTransform(tf::StampedTransform(stampedTr, ros::Time::now(), \"/odom_sonar\", \"/odom\"));\n // sb.updateBuffer(translation);\n\n}\n\nstd::vector<PointCloudT::Ptr> sonarMap::returnPillarPoints(void){\n std::vector<PointCloudT::Ptr> tmp;\n for(size_t i = 0; i < map->points.size(); ++i){\n if(map->points[i].intensity >= 2.0){\n tmp.push_back(pillarPoints[i]);\n }\n }\n return tmp;\n}\n\nstd::vector<PointCloudT::Ptr> sonarMap::returnFeaturePoints(void){\n std::vector<PointCloudT::Ptr> tmp;\n for(size_t i = 0; i < map->points.size(); ++i){\n if(map->points[i].intensity >= 2.0){\n tmp.push_back(featurePoints[i]);\n }\n }\n return tmp;\n}\n\n}" }, { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 38, "blob_id": "386def4af333cf99b20399ce9660c9cac3ba68e5", "content_id": "e921f8d62d387fe82601ceacaa3b87fe9467b28f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 78, "license_type": "no_license", "max_line_length": 44, "num_lines": 2, "path": "/stevens_pier_project/mapClasses/catkin_generated/installspace/mapclasses-msg-extras.cmake", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "set(mapclasses_MESSAGE_FILES \"msg/buff.msg\")\nset(mapclasses_SERVICE_FILES \"\")\n" }, { "alpha_fraction": 0.6432983875274658, "alphanum_fraction": 0.6532366275787354, "avg_line_length": 28.547618865966797, "blob_id": "b886e1e4c28b6a18a251b005ee178d42d85adce8", "content_id": "29a29f27cefb0e5744d97d878e37369cbba5a102", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3723, "license_type": "no_license", "max_line_length": 99, "num_lines": 126, "path": "/stevens_pier_project/mapClasses/include/mapClasses/scanBuffer/scanBuffer.cpp", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "// inutile !!!! voir mapclasses/src/scanBuffer.cpp\n#include \"scanBuffer.h\"\n#include <pcl/kdtree/kdtree_flann.h>\n#include <pcl/filters/extract_indices.h>\n#include <pcl/segmentation/extract_clusters.h>\n#include <visualization_msgs/Marker.h>\n// #include <pcl/common/transforms.h>\n#include <pcl_ros/transforms.h>\n#include <algorithm>\n\nscanBuffer::scanBuffer(void){\n\n\tloadParams();\n\tfor(size_t i = 0; i < bufferSize_; ++i){\n\t\tPointCloudT::Ptr cloud_tmp (new PointCloudT());\n\t\tcloud_tmp->header.frame_id=\"buffer\";\n\t\tclouds_.push_back(cloud_tmp);\n\t\tcloudsCount_.push_back(-1);\n\t}\n}\n\nbool scanBuffer::newScan(const PointCloudT::Ptr scan, PointCloudT::Ptr out){\n\n\n\t// pcl_ros::transformPointCloud(\"/odom\", *scan, *scan, listener);\n\n\tidxMax = 0;\n\tbool isFull = false;\n for(size_t i = 0; i < clouds_.size(); ++i){\n\t\tif( cloudsCount_.at(i) >= 0 || i == 0){ // has been initalized\n\t\t\t*clouds_.at(i) += *scan;\n\t\t\tcloudsCount_.at(i)++;\n\t\t} else if( cloudsCount_.at(i-1) >= scanSize_/bufferSize_ ){ // Initialize it\n\t\t\t*clouds_.at(i) += *scan;\n\t\t\tcloudsCount_.at(i)++;\n\t\t} else { // Not time to initalize buffer\n\t\t\tbreak;\n\t\t}\n\n\n\t\t// If buffer is full, process the data.\n\t\tif(cloudsCount_.at(i) >= scanSize_){\n\t\t\tprocessBuffer(clouds_.at(i), out);\n\t\t\tclouds_.at(i)->points.clear();\n\t\t\tcloudsCount_.at(i) = 0;\n\t\t\tif(++idxMax >= bufferSize_) idxMax = 0;\n\t\t\tisFull = true;\n\t\t\tout->header.frame_id = \"buffer\";\n\t\t\tpcl_ros::transformPointCloud(\"/odom\", *out, *out, listener);\n\t\t}\n\t}\n\treturn isFull;\n}\n\n\nvoid scanBuffer::processBuffer(PointCloudT::Ptr cloud, PointCloudT::Ptr out){\n\n\tvisualization_msgs::Marker line_list;\n\tline_list.header.frame_id = \"buffer\";\n\tline_list.header.stamp = ros::Time::now();\n\tline_list.ns = \"lines\";\n\tline_list.type = visualization_msgs::Marker::LINE_LIST;\n\tline_list.scale.x = 0.1;\n\tline_list.color.b = 1.0;\n\tline_list.color.a = 1.0;\n\n\t// Cluster the data\n\tpcl::search::KdTree<PointT>::Ptr tree (new pcl::search::KdTree<PointT>);\n\ttree->setInputCloud (cloud);\n\n\tstd::vector<pcl::PointIndices> cluster_indices;\n\tpcl::EuclideanClusterExtraction<PointT> ec;\n\tec.setClusterTolerance(ecTolerance_); // 2cm\n\tec.setMinClusterSize(ecMinClusterSize_);\n\tec.setMaxClusterSize(1000);\n\tec.setSearchMethod(tree);\n\tec.setInputCloud(cloud);\n\tec.extract(cluster_indices);\n\n\tint j = 0, idx;\n\tPointT tmp_max;\n\tstd::vector<pcl::PointIndices>::const_iterator it;\n\tint i = 0;\n\tfor(it = cluster_indices.begin(); it != cluster_indices.end(); ++it)\n\t{\n\t\tidx = 0;\n\t\ttmp_max.intensity = 0.0;\n\t\tstd::cout << \"Intensity: \";\n\t\t// Find highest intensity point within each cluster.\n\t\tfor(std::vector<int>::const_iterator pit = it->indices.begin(); pit != it->indices.end(); ++pit){\n\t\t\tstd::cout << cloud->points[*pit].intensity << \" \";\n\t\t\tif(cloud->points[*pit].intensity > tmp_max.intensity){\n\t\t\t\ttmp_max = cloud->points[*pit];\n\t\t\t}\n\t\t\t// out->points.push_back(cloud->points[*pit]);\n\t\t}\n\t\tstd::cout << \"\" << std::endl;\n\t\t// if(tmp_max.intensity > maxIntensity_){\n\t\tout->points.push_back(tmp_max);\n\t\tgeometry_msgs::Point p;\n\t\tp.x = cloud->points[it->indices.front()].x;\n\t\tp.y = cloud->points[it->indices.front()].y;\n\t\tp.z = cloud->points[it->indices.front()].z;\n\t\tline_list.points.push_back(p);\n\t\tp.x = cloud->points[it->indices.back()].x;\n\t\tp.y = cloud->points[it->indices.back()].y;\n\t\tp.z = cloud->points[it->indices.back()].z;\n\t\tline_list.points.push_back(p);\n\t\t// }\n\t}\n\tpubLines_->publish(line_list);\n}\n\nvoid scanBuffer::updateBuffer(Eigen::Affine3f trans){\n\tfor(size_t i = 0; i < clouds_.size(); ++i){\n\t\tpcl::transformPointCloud (*clouds_.at(i), *clouds_.at(i), trans);\n\t}\n}\n\nvoid scanBuffer::loadParams(void){\n\tscanSize_ = 120;\n\tbufferSize_ = 10;\n\tecTolerance_ = 0.8;\n\tecMinClusterSize_ = 3;\n\tmaxIntensity_ = 90;\n}\n" }, { "alpha_fraction": 0.7910447716712952, "alphanum_fraction": 0.7940298318862915, "avg_line_length": 36.22222137451172, "blob_id": "000c9bcedce7212bbff6b9dfc39ea2e46a7579cf", "content_id": "a045a6fd97321f7c25bff91906d0a9e63e584bf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 335, "license_type": "no_license", "max_line_length": 112, "num_lines": 9, "path": "/stevens_pier_project/OctaveFile/README.md~", "repo_name": "ignaciotb/smarc_data_tools", "src_encoding": "UTF-8", "text": "# Presentation\nThis folder contained all the program to display the scan matching slam and graph-based slam.\n# Main programs\n* scan matchin slam results : \n>scanMatchingMap.m\n* graph-based slam results\n>g2oGraphDisplay.m\n\nAt the beginning of these files you can found a *TODO* rubric to customise the code and display your own results\n" } ]
41
wd18535470628/Crawlerjd
https://github.com/wd18535470628/Crawlerjd
ee6e643748ab830ea8323a0694aa4b4d8320725a
d05e4cb25a7168fce322b6af24e6d742e7e1ce85
b1c192468477b4893fd6f7a66f1e6bdee0516590
refs/heads/master
2020-03-20T13:03:48.797397
2018-06-15T06:09:38
2018-06-15T06:09:38
137,447,582
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.4735487699508667, "alphanum_fraction": 0.48112666606903076, "avg_line_length": 59.81739044189453, "blob_id": "3f070b2b1e66bc383a8e509a91896cc0850d0b97", "content_id": "c82c37ff5c9726cdb83533fcb87df7ed3a3cda1f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7218, "license_type": "permissive", "max_line_length": 908, "num_lines": 115, "path": "/auctionJdCrawler/mysql.py", "repo_name": "wd18535470628/Crawlerjd", "src_encoding": "UTF-8", "text": "#coding=utf-8\nimport MySQLdb\nimport time\n\nclass Mysql:\n # 数据库初始化\n def __init__(self):\n try:\n self.db = MySQLdb.connect('localhost', 'root', 'root', 'jd_crawler',3306)\n self.cur = self.db.cursor()\n except MySQLdb.Error, e:\n print self.getCurrentTime(), \"连接数据库错误,原因%d: %s\" % (e.args[0], e.args[1])\n def getCurrentTime(self):\n return time.strftime('[%Y-%m-%d %H:%M:%S]', time.localtime(time.time()))\n def dateFormat(self,msec):\n date = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(msec/1000))\n return date\n def insertData(self,my_dict,dataTime):\n try:\n self.db.set_character_set('utf8')\n for col in my_dict.keys():\n if type(col) == unicode:\n col = col.decode(\"utf-8\").encode(\"utf-8\")\n if col == 'end':\n my_dict[col] = self.dateFormat(my_dict[col])\n if col == 'start':\n my_dict[col] = self.dateFormat(my_dict[col])\n if col == 'itemUrl':\n my_dict[col] = my_dict[col][2:]\n sqlCheck = 'SELECT count(*) FROM jd_auction_id where auction_id='+ str(my_dict['id'])\n self.cur.execute(sqlCheck)\n if self.cur._rows[0][0] == 0:\n #缓存表\n sqlAche = \"INSERT INTO jd_auction_id (auction_id) value (\"+ str(my_dict['id']) +\")\"\n sqlInfo = \"insert into %s (%s) VALUES (%s)\" % (\"jd_auction_info\", \"id,start_time,end_time,paimai_status,paimai_times,current_price,auction_type,court_id,bid_count,start_price,assess_price,title,corporate_agent,phone,data_time,main_status,service,buyerName,level,remark,position,enrollment,onlookers\",\n my_dict['id']+\",'\"+my_dict['start_time'] +\"','\"\n + my_dict['end_time']+\"',\"+ my_dict['paimai_status']\n +\",\"+my_dict['paimai_times']+\",\"+my_dict['current_price']\n +\",\"+my_dict[\"auction_type\"]+\",\"+my_dict['court_id']\n +\",\"+my_dict['bid_count']+\",'\"+my_dict['start_price']\n +\"','\"+my_dict['assess_price']+\"','\"+my_dict['title']\n +\"','\"+my_dict['corporate_agent']+\"','\"+my_dict['phone']\n +\"','\"+str(dataTime)+\"','1','\"+my_dict['service']\n +\"','\"+my_dict['buyerName']+\"','\"+my_dict['level']\n +\"','\"+my_dict['remark']+\"','\"+my_dict['position']+\"','\"+str(my_dict['enrollment'])+\"','\"+str(my_dict['onlookers'])+\"'\")\n print sqlInfo\n try:\n self.cur.execute(sqlAche)\n result = self.cur.execute(sqlInfo)\n insert_id = self.db.insert_id()\n self.db.commit()\n # 判断是否执行成功\n if result:\n return insert_id\n else:\n return 0\n except MySQLdb.Error, e:\n # 发生错误时回滚\n self.db.rollback()\n # 主键唯一,无法插入\n if \"key 'PRIMARY'\" in e.args[1]:\n print self.getCurrentTime(), \"数据已存在,未插入数据\"\n else:\n print self.getCurrentTime(), \"插入数据失败,原因 %d: %s\" % (e.args[0], e.args[1])\n else:\n sqlUpdate = 'update %s set '%(\"jd_auction_info\")\n sqlUpdate = sqlUpdate + 'start_time=\"' + my_dict['start_time'] + '\",end_time=\"'+my_dict['end_time']+'\",paimai_status='+ my_dict['paimai_status'] + ',paimai_times='+my_dict['paimai_times']+',current_price='+ my_dict['current_price']+',auction_type='+my_dict['auction_type'] + ',court_id='+my_dict['court_id']+ ',bid_count='+my_dict['bid_count']+',start_price='+my_dict['start_price']+',assess_price='+my_dict['assess_price']+',title=\"'+my_dict['title']+'\",corporate_agent=\"'+my_dict['corporate_agent']+'\",phone=\"'+my_dict['phone']+'\",data_time=\"'+str(dataTime)+'\",main_status=\"2'+'\",service=\"'+str(my_dict['service'])+''+'\",buyerName=\"'+str(my_dict['buyerName'])+''+'\",level=\"'+str(my_dict['level'])+''+'\",remark=\"'+str(my_dict['remark'])+'\",position=\"'+str(my_dict['position'])+'\",enrollment=\"'+str(my_dict['enrollment'])+'\",onlookers=\"'+str(my_dict['onlookers'])+'\" where id= '+my_dict['id']\n print sqlUpdate\n result = self.cur.execute(sqlUpdate)\n insert_id = self.db.insert_id()\n self.db.commit()\n # 判断是否执行成功\n if result:\n return insert_id\n else:\n return 0\n except MySQLdb.Error, e:\n print self.getCurrentTime(), \"数据库错误,原因%d: %s\" % (e.args[0], e.args[1])\n def getKindList(self):\n try:\n self.db.set_character_set('utf8')\n sqlGetData = \"select id,auction_type from auction_kind \"\n self.cur.execute(sqlGetData)\n return list(self.cur._rows)\n except MySQLdb.Error, e:\n print self.getCurrentTime(), \"数据库错误,原因%d: %s\" % (e.args[0], e.args[1])\n def getCourtList(self):\n try:\n self.db.set_character_set('utf8')\n #sqlGetData = \"select id,court_id,court_province,court_city,court_name from court_info\"\n sqlGetData = \"select id,court_id,court_province,court_city,court_name from court_info where id>='2' and id<='1830'\"\n #sqlGetData = \"select id,court_id,court_province,court_city,court_name from court_info where id='294'\"\n self.cur.execute(sqlGetData)\n return list(self.cur._rows)\n except MySQLdb.Error, e:\n print self.getCurrentTime(), \"数据库错误,原因%d: %s\" % (e.args[0], e.args[1])\n def getStatusList(self):\n try:\n self.db.set_character_set('utf8')\n sqlGetData = \"select id,auction_status from auction_status where id!=7\"\n self.cur.execute(sqlGetData)\n return list(self.cur._rows)\n except MySQLdb.Error, e:\n print self.getCurrentTime(), \"数据库错误,原因%d: %s\" % (e.args[0], e.args[1])\n def getCheckData(self,dict):\n try:\n self.db.set_character_set('utf8')\n sqlGetData = 'SELECT count(*) FROM jd_auction_info where id='+ str(dict['id'])\n self.cur.execute(sqlGetData)\n if self.cur._rows[0][0] == 0:\n return 0\n else:\n return 1\n except MySQLdb.Error, e:\n print self.getCurrentTime(), \"数据库错误,原因%d: %s\" % (e.args[0], e.args[1])\n" }, { "alpha_fraction": 0.8709677457809448, "alphanum_fraction": 0.8709677457809448, "avg_line_length": 14.5, "blob_id": "1ba3d52408c1e381a778e22c23476abbec813bf7", "content_id": "86387016587724bf04a61d326ce21246b012e13c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 55, "license_type": "permissive", "max_line_length": 18, "num_lines": 2, "path": "/README.md", "repo_name": "wd18535470628/Crawlerjd", "src_encoding": "UTF-8", "text": "# Crawlerjd\npython爬虫抓取京东司法拍卖数据\n" } ]
2
bluepanee/dem-generator
https://github.com/bluepanee/dem-generator
48561f66edbd59f5da8ee5ce32f097d1ee5c3dbd
af484adc4a9a5db6b931b0cbd33d594eb78ec43e
f7ff28c2976b45a87b99860261973098099eeb00
refs/heads/master
2022-12-15T05:38:19.057058
2020-09-25T06:42:49
2020-09-25T06:42:49
298,343,236
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.837837815284729, "alphanum_fraction": 0.837837815284729, "avg_line_length": 36, "blob_id": "3940c4f1ddd8bfbf6cbe8839f4a0f592c573d24d", "content_id": "dbb327c7029b28a93b94061e18094ab5b7d8bbf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37, "license_type": "no_license", "max_line_length": 36, "num_lines": 1, "path": "/dem_generator/__init__.py", "repo_name": "bluepanee/dem-generator", "src_encoding": "UTF-8", "text": "from .dem_generator import Generator\n" }, { "alpha_fraction": 0.6873857378959656, "alphanum_fraction": 0.6983546614646912, "avg_line_length": 17.233333587646484, "blob_id": "61f9365c3e329cb503cb1223e99fd4775b630596", "content_id": "06b3a7c07f9c7e1e5feba54082ef6f1720209473", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 547, "license_type": "no_license", "max_line_length": 79, "num_lines": 30, "path": "/README.md", "repo_name": "bluepanee/dem-generator", "src_encoding": "UTF-8", "text": "# dem-generator: Shitcode Edition\nA simple shitcode-lib for creating demotivators\n\n**Install:**\n```\ngit clone https://github.com/bluepanee/dem-generator\n```\n```\ncd dem-generator\n```\n```\npython setup.py install\n```\n\n**Usage:**\n```python\nimport dem_generator\nfrom PIL import Image\n\nimg = Image.open(\"test.jpg\")\ndem = dem_generator.Generator()\n\ntop_text = \"Top text\"\nbottom_text = \"Bottom text\"\ncopyright_ = \"@welcomeza\"\nmin_size, max_size = 720, 720\n\nresult = dem.create(img, top_text, bottom_text, copyright_, min_size, max_size)\nresult.show()\n```\n" }, { "alpha_fraction": 0.6107594966888428, "alphanum_fraction": 0.6170886158943176, "avg_line_length": 21.571428298950195, "blob_id": "0b17f9f29de3832b486ee71cdb3d8956c1d00b16", "content_id": "fd1b464f9f37a22100c67edb0b42735a935b9a41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 316, "license_type": "no_license", "max_line_length": 48, "num_lines": 14, "path": "/setup.py", "repo_name": "bluepanee/dem-generator", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nsetup(\n name='dem-generator',\n version='1.0',\n packages=['dem_generator'],\n package_data={'dem_generator': ['fonts/*']},\n include_package_data=True,\n url='https://github.com/bluepanee',\n license='',\n author='bluepanee',\n author_email='',\n description=''\n)\n" }, { "alpha_fraction": 0.6964856386184692, "alphanum_fraction": 0.7156549692153931, "avg_line_length": 23.076923370361328, "blob_id": "0c4edfb95793d686a9fd8bbaf29eb54f0b2cd340", "content_id": "e5f161b32657465d8e7a2eac532aa67f8ac88dec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "no_license", "max_line_length": 79, "num_lines": 13, "path": "/tests/example.py", "repo_name": "bluepanee/dem-generator", "src_encoding": "UTF-8", "text": "import dem_generator\nfrom PIL import Image\n\nimg = Image.open(\"tests/test.jpg\")\ndem = dem_generator.Generator()\n\ntop_text = \"Top text\"\nbottom_text = \"Bottom text\"\ncopyright_ = \"@welcomeza\"\nmin_size, max_size = 720, 720\n\nresult = dem.create(img, top_text, bottom_text, copyright_, min_size, max_size)\nresult.show()\n" }, { "alpha_fraction": 0.47493818402290344, "alphanum_fraction": 0.4899977445602417, "avg_line_length": 39.81651306152344, "blob_id": "4715656e4c401c4cbbbbe2cb3af6c6302c3d2abf", "content_id": "a69a63e72f4700a77bc33c21f2d710937110b447", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4449, "license_type": "no_license", "max_line_length": 115, "num_lines": 109, "path": "/dem_generator/dem_generator.py", "repo_name": "bluepanee/dem-generator", "src_encoding": "UTF-8", "text": "# ---------------------------------------------------- #\n# !!! SHITCODE ALERT !!! #\n# ---------------------------------------------------- #\n\nfrom PIL import Image, ImageDraw, ImageFont\nimport textwrap\nimport os\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n\nclass Generator:\n def __init__(self):\n self.img = None\n\n def create(self, img: Image, top_text: str, bottom_text=\"\", copyright_=\"\", max_size: int = 1280,\n min_size: int = 720) -> Image:\n self.img = img.convert(\"RGBA\")\n self.__resize_image(max_size, min_size)\n h, w = self.img.size\n\n if w > h:\n border_width = round(w * 8 / 100)\n scale = w * 1 / 100\n else:\n border_width = round(h * 8 / 100)\n scale = h * 1 / 100\n\n top_font, top_multiline_text = self.__wrap_text(f\"{dir_path}/fonts/times-new-roman.ttf\",\n round(border_width * 1.5), top_text)\n\n bottom_font, bottom_multiline_text = self.__wrap_text(f\"{dir_path}/fonts/tahoma.ttf\",\n round(border_width * 0.6), bottom_text)\n\n copyright_font = ImageFont.truetype(f\"{dir_path}/fonts/fulbo.otf\", round(scale))\n\n copyright_h, copyright_w = copyright_font.getsize(copyright_)\n top_h, top_w = top_font.getsize_multiline(top_multiline_text)\n\n bottom_h, bottom_w = bottom_font.getsize_multiline(bottom_multiline_text)\n\n background = Image.new(\"RGB\", (border_width * 3 + h, w + top_w + bottom_w + border_width * 3))\n background_h, background_w = background.size\n\n background.paste(self.img, (round((background_h - h) / 2), border_width), self.img)\n\n d = ImageDraw.Draw(background)\n\n d.rectangle(((background_h - h) / 2 - (scale * 1.1), border_width - (scale * 1.1), # Image outline\n (background_h + h) / 2 + (scale * 1.1), (w + border_width) + (scale * 1.1)),\n width=round(scale * 42 / 100))\n\n d.rectangle(((background_h - h) / 2 - copyright_w / 4, (w + border_width), # Copyright background\n (background_h - h) / 2 + copyright_h + copyright_w / 4,\n (w + border_width + copyright_w)), fill=0)\n\n d.text(((background_h - h) / 2, (w + border_width) + copyright_w / 4), # Copyright text\n text=copyright_,\n font=copyright_font)\n\n d.multiline_text(((background_h - top_h) / 2, w + round(border_width * 1.7)), # Top text\n text=top_multiline_text,\n font=top_font,\n align=\"center\")\n d.multiline_text(((background_h - bottom_h) / 2, w + round(border_width * 2.1) + top_w), # Bottom text\n text=bottom_multiline_text,\n font=bottom_font,\n align=\"center\")\n\n return background\n\n \"\"\"Proportional resize an image by min & max size\"\"\"\n def __resize_image(self, max_size, min_size):\n h, w = self.img.size\n\n if h > max_size or w > max_size:\n if h > w:\n new_w = round(w / (h / max_size))\n self.img = self.img.resize((max_size, new_w), )\n else:\n new_h = round(h / (w / max_size))\n self.img = self.img.resize((new_h, max_size))\n\n elif h < min_size and w < min_size:\n if h > w:\n new_w = round(w * (min_size / h))\n self.img = self.img.resize((min_size, new_w))\n else:\n new_h = round(h * (min_size / w))\n self.img = self.img.resize((new_h, min_size))\n\n def __wrap_text(self, font_name: str, font_size: int, text: str):\n font = ImageFont.truetype(font_name, font_size)\n text_height = font.getsize_multiline(text)[0]\n font_size = font.size\n x = 0\n\n while text_height > self.img.size[0]:\n font_size -= round(font_size * 0.02)\n font = ImageFont.truetype(font_name, font_size)\n text_height = font.getsize_multiline(text)[0]\n\n if x >= font_size:\n y = text_height / self.img.size[0]\n text = textwrap.fill(text, round(len(text) / y))\n return font, text\n x += 1\n\n return font, text\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.71875, "avg_line_length": 15.5, "blob_id": "32c8c8a354df98ba7aeacae42259ddfaabeb1ccd", "content_id": "599a99e926b12e96d2c600a79e750f235e928e11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 32, "license_type": "no_license", "max_line_length": 18, "num_lines": 2, "path": "/requirements.txt", "repo_name": "bluepanee/dem-generator", "src_encoding": "UTF-8", "text": "Pillow~=7.2.0\nsetuptools~=50.3.0" } ]
6
todokku/IP-address-auth-system
https://github.com/todokku/IP-address-auth-system
ec7a3506234616b8aa26a50adb0e10fb1e992f5f
61962ef1df0f28b4595227784e629526b08226fc
0ba4b70da920c3d953455cf5c5213f942fa2aaff
refs/heads/master
2022-09-30T01:22:03.629920
2020-06-03T19:28:23
2020-06-03T19:28:23
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5930599570274353, "alphanum_fraction": 0.5962145328521729, "avg_line_length": 31.36842155456543, "blob_id": "13ec7f0b368f08cfec72e87012ff44c5af5dcffa", "content_id": "d1e4974cbdbc8ef697c4e2064fbeb131768ae3e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 634, "license_type": "no_license", "max_line_length": 113, "num_lines": 19, "path": "/Auth.py", "repo_name": "todokku/IP-address-auth-system", "src_encoding": "UTF-8", "text": "from colorama import Fore, init, Style\r\nimport requests\r\nimport ctypes\r\nimport os\r\n\r\ndef Auth():\r\n IP = requests.get('http://api.ipify.org/').text\r\n r = requests.get('https://pastebin.com/raw/...').text #Raw Pastebin URL where the IP addresses will be stored\r\n if IP in r:\r\n pass\r\n else:\r\n os.system('cls')\r\n ctypes.windll.kernel32.SetConsoleTitleW('Unauthorized')\r\n print(Fore.RED + '\\n -- ' + Fore.WHITE + Style.BRIGHT + 'IP address unauthorized!')\r\n print(Style.RESET_ALL + Fore.RED + ' -- ' + Fore.WHITE + Style.BRIGHT + 'Your IP: ' + IP)\r\n input()\r\n quit()\r\n\r\nAuth()\r\n" } ]
1
GustavoAT/back-end-challenge-2021
https://github.com/GustavoAT/back-end-challenge-2021
fdf6e46d786ce78d54ec0656075b80945c2eac71
846cce0beec4e60e9cb1476198bb74f4aa316b0f
fdbb820fb97936f68ce3a3a6a3226c3c89f95765
refs/heads/master
2023-07-09T13:06:33.971298
2021-08-15T22:19:27
2021-08-15T22:19:27
387,282,214
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5656160712242126, "alphanum_fraction": 0.6137535572052002, "avg_line_length": 30.178571701049805, "blob_id": "a1d2c2c31049f5ce138223b88ef9b4da5dd3c4a6", "content_id": "6c2a73ae3a3c498cfb176132010e096113d171fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1747, "license_type": "no_license", "max_line_length": 83, "num_lines": 56, "path": "/tests/test_endpoints.py", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "from typing import List\nfrom fastapi.testclient import TestClient\nfrom app.main import app\n\n\n\n\nclient = TestClient(app)\n\ndef test_main():\n '''Test root of API'''\n response = client.get('/')\n assert response.status_code == 200\n assert response.json()['message'] == 'REST Back-end Challenge 20201209 Running'\n \n\ndef test_get_users():\n '''Test the /users endpoint'''\n response = client.get('/users/')\n assert response.status_code == 200\n assert type(response.json()) == list\n\n\ndef test_get_user():\n '''Test the /users/:user_id endpoint'''\n response = client.get('/users/')\n response_list = response.json()\n if len(response_list) > 0:\n user_id = response_list[0]['id']\n response = client.get(f'/users/{user_id}')\n assert response.status_code == 200\n\n\ndef test_create_update_delete_user():\n '''Test creation, updating and deletion of an user'''\n user_data = {\n \"gender\": \"male\",\n \"name_first\": \"J\",\n \"name_last\": \"D\",\n \"email\": \"[email protected]\",\n \"login_uuid\": \"20c83553-1551-4e76-2234-4181ea561139\",\n \"login_username\": \"goodusername\",\n \"login_password\": \"mypassisnotsafe\",\n \"dob_date\": \"2000-04-01T15:52:08\",\n \"imported_t\": \"2021-08-06T21:20:00\"\n }\n response = client.post('/users/', json=user_data)\n assert response.status_code == 200\n user_id = response.json()['id']\n user_data['name_last'] = 'Doe'\n response = client.put(f'/users/{user_id}', json=user_data)\n assert response.status_code == 200\n assert response.json()['name_last'] == 'Doe'\n response = client.delete(f'/users/{user_id}')\n assert response.status_code == 200\n assert response.json()['message'] == f'Usuário de id {user_id} excluído'" }, { "alpha_fraction": 0.6300092935562134, "alphanum_fraction": 0.6314072608947754, "avg_line_length": 31.029850006103516, "blob_id": "fc92fe1fb7260fa2b80e1f3f205a4313181a0357", "content_id": "8b78439b2a4a4bb9e1d89ecd0bfe1ed4501f16a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2146, "license_type": "no_license", "max_line_length": 77, "num_lines": 67, "path": "/app/persistence/userDAO.py", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "from sqlalchemy.orm import Session\nfrom sqlalchemy import update\nfrom . import models, pdmodels\nfrom .security.hash import create_salt, get_hashed_password\n\n\ndef get_user(db: Session, user_id: int):\n return db.query(models.User).filter(models.User.id == user_id).first()\n\n\ndef get_user_by_unique_data(db: Session, user: pdmodels.UserCreate):\n query_user = db.query(models.User).filter(\n models.User.email == user.email,\n models.User.login_username == user.login_username,\n models.User.login_uuid == user.login_uuid\n )\n db_user = query_user.first()\n return db_user\n\n\ndef get_users(db: Session, skip: int = 0, limit: int = 20):\n return db.query(models.User).slice(skip, limit).all()\n\n\ndef create_user(db: Session, user: pdmodels.UserCreate):\n if user.login_salt is None or user.login_salt == '':\n user.login_salt = create_salt()\n user.login_password = get_hashed_password(user.login_password,\n user.login_salt)\n db_user = models.User(**user.dict())\n db.add(db_user)\n db.commit()\n db.refresh(db_user)\n return db_user\n\n\ndef update_user(db: Session, user_id: int, user: pdmodels.UserBase):\n update_statement = update(models.User).where(models.User.id == user_id).\\\n values(**user.dict()).execution_options(synchronize_session='fetch')\n db.execute(update_statement)\n db.commit()\n return get_user(db, user_id)\n\n\ndef upsert_user(db: Session, user: pdmodels.UserCreate):\n if user.login_salt is None or user.login_salt == '':\n user.login_salt = create_salt()\n user.login_password = get_hashed_password(user.login_password,\n user.login_salt)\n db_user = get_user_by_unique_data(db, user)\n if db_user:\n update_user(db, db_user.id, user)\n else:\n db_user = models.User(**user.dict())\n db.add(db_user)\n db.commit()\n db.refresh(db_user)\n return db_user\n\n\ndef delete_user(db: Session, user_id: int):\n db_user = get_user(db, user_id)\n if db_user:\n db.delete(db_user)\n db.commit()\n return True\n return False\n" }, { "alpha_fraction": 0.6955380439758301, "alphanum_fraction": 0.7401574850082397, "avg_line_length": 21.47058868408203, "blob_id": "3a5d4232a6d0d607e2f9229580fbc621ea2da026", "content_id": "4d9b3d0a09bd85c18e0de312b3046a101b2cbef0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 381, "license_type": "no_license", "max_line_length": 71, "num_lines": 17, "path": "/Dockerfile", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "FROM python:3.9.6\n\nCOPY requirements.txt ./requirements.txt\n\nRUN pip install -r requirements.txt\n\nRUN apt-get update && apt-get install -yqq --no-install-recommends cron\n\nCOPY app/ /back-end-challenge-2021/app\n\nCOPY cron/ /cron\n\nRUN crontab /cron/cronfile && chmod u+x /cron/apigetuser.sh\n\nWORKDIR /back-end-challenge-2021\n\nCMD cron && uvicorn app.main:app --host 0.0.0.0 --port 80" }, { "alpha_fraction": 0.6932772994041443, "alphanum_fraction": 0.6932772994041443, "avg_line_length": 33, "blob_id": "52b3d83ca1112d948e9fecc1f7ff193c9aa89bce", "content_id": "1b6729db73edd98c7aff841f626fbbb5fa49de55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 242, "license_type": "no_license", "max_line_length": 57, "num_lines": 7, "path": "/cron/apigetuser.sh", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "#Save next page from randomuser and output\noutput=$(python -m app.apiclient.randomuser savenextpage)\nif [[ $output == *'Página'*'salva'* ]]; then\n echo $(date): $output\nelse\n echo $(date): \"Erro na execução do módulo randomuser\"\nfi\n" }, { "alpha_fraction": 0.7665995955467224, "alphanum_fraction": 0.7665995955467224, "avg_line_length": 30.0625, "blob_id": "27e6bde883aab6a2cd818e9677b37377d17a7e73", "content_id": "d2694665baf1f6d5d6622b0bc704434820a272a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 497, "license_type": "no_license", "max_line_length": 76, "num_lines": 16, "path": "/app/persistence/database.py", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom ..settings import SQLALCHEMY_DATABASE_URL\n\n\nif 'sqlite://' in SQLALCHEMY_DATABASE_URL:\n engine = create_engine(\n SQLALCHEMY_DATABASE_URL, connect_args={\"check_same_thread\": False}\n )\nelse:\n engine = create_engine(SQLALCHEMY_DATABASE_URL)\n\nSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine,)\n\nBase = declarative_base()\n" }, { "alpha_fraction": 0.5963506102561951, "alphanum_fraction": 0.6016371250152588, "avg_line_length": 28.918367385864258, "blob_id": "49c1adb7314388ed4d3df83cbde450edff21381d", "content_id": "e5cdb38a7be8b6f8cdca4af502bc07cf8422af81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5870, "license_type": "no_license", "max_line_length": 79, "num_lines": 196, "path": "/app/apiclient/randomuser.py", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "import sys\nimport requests\nfrom pydantic import parse_obj_as\nfrom ..persistence.models import Pag\nfrom ..persistence.pdmodels import UserCreate\nfrom ..persistence.database import SessionLocal\nfrom ..persistence.userDAO import upsert_user\nfrom ..persistence.pagDAO import get_first_pag, create_pag\nfrom datetime import datetime\n\n\nRANDOMUSER_URL = 'https://randomuser.me/api/'\nRANDOMUSER_NESTED_LEVELS = 2\n\n\ndef get_users(n_users: int = 10, seed: str = None, page: int = None):\n \"\"\"Get users from randomuser API\n\n Args:\n n_users (int, optional): Number of results. Defaults to 10.\n seed (str, optional): Seed to find the same users again. Defaults to\n None.\n page (int, optional): Page of results. Defaults to None.\n\n Returns:\n Response\n \"\"\"\n params = {'nat': 'BR,CA,ES,GB,NZ,US'}\n if seed:\n params['seed'] = seed\n if page:\n params['page'] = page\n if n_users > 1:\n params['results'] = n_users\n result = requests.get(RANDOMUSER_URL, params=params)\n return result\n\n\ndef save_random_users(n_users: int = 10):\n \"\"\"Save new random users on the database\n\n Get a number of users from randomuser API and save them on the\n database.\n\n Args:\n n_users (int, optional): Number os users to get. Defaults to 10.\n \"\"\"\n result = get_users(n_users)\n if result.status_code == 200:\n save_users(result)\n\n\ndef save_random_users_paginated():\n \"\"\"Save random users on the database folowing a pagination rule\n\n The pagination rule should be previously saved on the database.\n The actual page is retrieved and next page is requested to\n randomuser API. Case all pages from pagination rule is done,\n next page would be 1.\n\n Returns:\n int: Page number saved\n \"\"\"\n with SessionLocal() as db:\n pagination = get_first_pag(db)\n total = pagination.actual_page * pagination.step\n step = pagination.step\n if total >= pagination.total_records:\n page = 1\n else:\n page = pagination.actual_page + 1\n total = page * step\n difference = total - pagination.total_records\n if difference > 0:\n step -= difference\n result = get_users(step, pagination.seed, page)\n if result.status_code == 200:\n pagination.actual_page = page\n pagination.date = datetime.now()\n db.commit()\n save_users(result)\n return page\n return None\n\n\ndef save_users(request_result):\n \"\"\"Save users on the database\n\n Args:\n request_result (Response): response object from randomuser API\n \"\"\"\n users_list = request_result.json()['results']\n users_list = [\n flatten_many_levels(d, RANDOMUSER_NESTED_LEVELS) for d in users_list\n ]\n users_list = [add_imported_time(d) for d in users_list]\n users = parse_obj_as(list[UserCreate], users_list)\n with SessionLocal() as db:\n for user in users:\n upsert_user(db, user)\n\n\ndef flatten_one_level(target: dict):\n \"\"\"Make second level from nested dictionary equal to the first level\n\n Flatten a dictionary by find inner dictionaries and unpack it.\n The inner keys are concated to outer key.\n\n Args:\n target (dict): dictionary to be flatten\n\n Returns:\n dict: one level flatten dict\n \"\"\"\n flat_dict = {}\n for key in target:\n if type(target[key]) == dict:\n for key2, value in target[key].items():\n flat_dict[key + '_' + key2] = value\n else:\n flat_dict[key] = target[key]\n return flat_dict\n\n\ndef flatten_many_levels(target: dict, levels: int = 2):\n \"\"\"Make nested levels on a dictionary equal to the first\n\n Args:\n target (dict): dictionary to be flatten\n levels (int, optional): nested levels on the dictionary. Defaults to 2.\n\n Returns:\n dict: n levels flatten dict\n \"\"\"\n flat_dict = target\n for _ in range(levels):\n flat_dict = flatten_one_level(flat_dict)\n return flat_dict\n\n\ndef add_imported_time(target: dict):\n '''Add imported time to a dict'''\n target['imported_t'] = datetime.now()\n return target\n\n\ndef init_pagination(step: int, total: int):\n \"\"\"Create a pagination rule on the database\n\n Args:\n step (int): Records per page\n total (int): Total records to request\n \"\"\"\n pagination = Pag()\n pagination.actual_page = 0\n pagination.step = step\n pagination.total_records = total\n pagination.date = datetime.now()\n rec = get_users(1)\n pagination.seed = rec.json()['info']['seed']\n with SessionLocal() as db:\n create_pag(db, pagination)\n\n\nif __name__ == '__main__':\n command = sys.argv[1]\n if command == 'initpag':\n if len(sys.argv) > 3:\n try:\n step = int(sys.argv[2])\n total = int(sys.argv[3])\n init_pagination(step, total)\n print(\n f'Paginação configurada para {total} ' +\n f'usuarios de {step} em {step}'\n )\n except TypeError:\n print('Valores de passo e total devem ser inteiros positivos')\n else:\n print('Argumentos ausentes para esse comando')\n if command == 'save':\n try:\n number_users = int(sys.argv[2])\n save_random_users(number_users)\n print(f'{number_users} usuarios inseridos ou atualizados no banco')\n except TypeError:\n print('Insira um valor inteiro positivo como argumento')\n if command == 'savenextpage':\n r = save_random_users_paginated()\n if r:\n print(f'Página {r} salva')\n else:\n print(\n 'Erro no salvamento, não foi possível ' +\n 'obter usuários do randomuser'\n )\n" }, { "alpha_fraction": 0.6570069193840027, "alphanum_fraction": 0.6764705777168274, "avg_line_length": 31.56338119506836, "blob_id": "1247f9430c4c97793012a96ef8ed987cac5df039", "content_id": "84d82f8f29e30d7aa30abd42248ab5fcbb38cf19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2312, "license_type": "no_license", "max_line_length": 68, "num_lines": 71, "path": "/app/persistence/models.py", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "import sys\nfrom sqlalchemy import Column, Integer, String, Enum, Float\nfrom sqlalchemy import DateTime, SmallInteger\nfrom .database import Base, engine\nfrom .enumerators import GendersEnum, StatusEnum\n\n\nclass User(Base):\n __tablename__ = \"users\"\n\n id = Column(Integer, primary_key=True, index=True)\n gender = Column(Enum(GendersEnum))\n name_title = Column(String(4))\n name_first = Column(String(30), nullable=False)\n name_last = Column(String(30), nullable=False)\n location_street = Column(String(40))\n location_city = Column(String(80))\n location_state = Column(String(40))\n location_postcode = Column(String(20))\n location_coordinates_latitude = Column(Float)\n location_coordinates_longitude = Column(Float)\n location_timezone_offset = Column(String(6))\n location_timezone_description = Column(String(80))\n email = Column(String(60), unique=True)\n login_uuid = Column(String(40), unique=True)\n login_username = Column(String(40), unique=True, nullable=False)\n login_password = Column(String(70), nullable=False)\n login_salt = Column(String(20), nullable=False)\n dob_date = Column(DateTime, nullable=False)\n dob_age = Column(SmallInteger)\n registered_date = Column(DateTime)\n registered_age = Column(SmallInteger)\n phone = Column(String(15))\n cell = Column(String(15))\n id_name = Column(String(10))\n id_value = Column(String(20))\n picture_large = Column(String(60))\n picture_medium = Column(String(60))\n picture_thumbnail = Column(String(60))\n nat = Column(String(2))\n imported_t = Column(DateTime, nullable=False)\n status = Column(Enum(StatusEnum), nullable=False)\n\n\nclass Pag(Base):\n __tablename__ = \"pagination\"\n\n id = Column(Integer, primary_key=True, index=True)\n seed = Column(String(40))\n actual_page = Column(Integer)\n step = Column(Integer)\n total_records = Column(Integer)\n date = Column(DateTime)\n\n\ndef create_tables():\n Base.metadata.create_all(bind=engine)\n\n\ndef delete_tables():\n Base.metadata.drop_all(bind=engine)\n\n\nif __name__ == '__main__':\n if sys.argv[1]:\n if sys.argv[1] == 'createtables':\n delete_tables()\n create_tables()\n print('Tabelas criadas')\n else:\n print('Este comando espera pelo menos um argumento')\n" }, { "alpha_fraction": 0.7551020383834839, "alphanum_fraction": 0.7591836452484131, "avg_line_length": 27.882352828979492, "blob_id": "0694593b5caadc10994644866914fa1ed2bffa0f", "content_id": "124449694d0ffad6dbc71ba06c37e77e156f18ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 490, "license_type": "no_license", "max_line_length": 77, "num_lines": 17, "path": "/app/settingsexample.py", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "\"\"\" Environment settings\n\nEdit database credentials\nuser, password, host and database on the url, \nthe APIkey and so on, then\nrename the file to 'settings.py'.\nThe url is for MySQL, to use other database engines,\nsearch documentation on SQLAlchemy:\nhttps://docs.sqlalchemy.org/en/14/dialects/index.html\n\"\"\"\n\n#Use format:\n#mysql+pymysql://[user]:[password]@[host][/database][?options]\n\nSQLALCHEMY_DATABASE_URL = 'mysql+pymysql://root:rootpass@db/backendchallenge'\n\nAPI_KEY = 'mysecretapikey'" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.6542553305625916, "avg_line_length": 34.56756591796875, "blob_id": "aff2b5ad8d6db59559656ad8a1a47934debeb940", "content_id": "f3eaa6672635343179334f6642c4c018578c6980", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2646, "license_type": "no_license", "max_line_length": 78, "num_lines": 74, "path": "/app/routes/users.py", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "from typing import List\nfrom fastapi import APIRouter\nfrom fastapi import Depends, HTTPException\nfrom fastapi.param_functions import Header\nfrom sqlalchemy.orm import Session\nfrom ..persistence.database import SessionLocal\nfrom ..persistence import pdmodels, userDAO\nfrom ..settings import API_KEY\n\n\nrouter = APIRouter()\n\n\n# Dependency\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\[email protected]('/users/', response_model=List[pdmodels.User])\nasync def get_users(skip: int = 0, limit: int = 10,\n db: Session = Depends(get_db), key: str = Header(None)):\n if key != API_KEY:\n raise HTTPException(status_code=401, detail='Não autorizado')\n users = userDAO.get_users(db, skip, limit)\n return users\n\n\[email protected]('/users/', response_model=pdmodels.User)\nasync def create_user(user: pdmodels.UserCreate,\n db: Session = Depends(get_db), key: str = Header(None)):\n if key != API_KEY:\n raise HTTPException(status_code=401, detail='Não autorizado')\n db_user = userDAO.get_user_by_unique_data(db, user)\n if db_user:\n raise HTTPException(status_code=400,\n detail='e-mail, login ou uuid já cadastrado')\n return userDAO.create_user(db, user)\n\n\[email protected]('/users/{user_id}', response_model=pdmodels.User)\nasync def get_user(user_id: int, db: Session = Depends(get_db),\n key: str = Header(None)):\n if key != API_KEY:\n raise HTTPException(status_code=401, detail='Não autorizado')\n db_user = userDAO.get_user(db, user_id)\n if db_user is None:\n raise HTTPException(status_code=404, detail='Usuário não encontrado')\n return db_user\n\n\[email protected]('/users/{user_id}')\nasync def update_user(user_id: int, user: pdmodels.UserBase,\n db: Session = Depends(get_db), key: str = Header(None)):\n if key != API_KEY:\n raise HTTPException(status_code=401, detail='Não autorizado')\n db_user = userDAO.get_user(db, user_id)\n if db_user is None:\n raise HTTPException(status_code=404, detail='Usuário não encontrado')\n return userDAO.update_user(db, user_id, user)\n\n\[email protected]('/users/{user_id}')\nasync def delete_user(user_id: int, db: Session = Depends(get_db),\n key: str = Header(None)):\n if key != API_KEY:\n raise HTTPException(status_code=401, detail='Não autorizado')\n success = userDAO.delete_user(db, user_id)\n if not success:\n raise HTTPException(status_code=404, detail='Usuário não encontrado')\n return {'message': f'Usuário de id {user_id} excluído'}\n" }, { "alpha_fraction": 0.6597937941551208, "alphanum_fraction": 0.6597937941551208, "avg_line_length": 23.25, "blob_id": "469a8eb669aadc8b70b40c0bb252566a9f12ae1e", "content_id": "0eab1ac8932845c9c142c25ff1e37047964abc15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 873, "license_type": "no_license", "max_line_length": 66, "num_lines": 36, "path": "/app/persistence/pagDAO.py", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "from sqlalchemy.orm import Session\nfrom .models import Pag\n\n\ndef get_pag(db: Session, pag_id: int):\n return db.query(Pag).filter(Pag.id == pag_id).first()\n\n\ndef get_first_pag(db: Session):\n return db.query(Pag).first()\n\n\ndef create_pag(db: Session, pagination: Pag):\n db.add(pagination)\n db.commit()\n db.refresh(pagination)\n return pagination\n\n\ndef update_pag(db: Session, pagination: Pag):\n db_pag = db.query(Pag).filter(Pag.id == pagination.id).first()\n db_pag.seed = pagination.seed\n db_pag.actual_page = pagination.actual_page\n db_pag.step = pagination.step\n db_pag.total_records = pagination.total_records\n db_pag.date = pagination.date\n db.commit()\n db.refresh(db_pag)\n return db_pag\n\n\ndef delete_pag(db: Session, pag_id: int):\n db_pag = get_pag(db, pag_id)\n if db_pag:\n db.delete(db_pag)\n db.commit()\n" }, { "alpha_fraction": 0.4720149338245392, "alphanum_fraction": 0.6940298676490784, "avg_line_length": 15.242424011230469, "blob_id": "2a9105ddaf2ac8026c9e06e45851fb05bc9ada56", "content_id": "379417739ba7a87f256eb8458415c617f9d544c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 536, "license_type": "no_license", "max_line_length": 27, "num_lines": 33, "path": "/requirements.txt", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "asgiref==3.4.1\nattrs==21.2.0\ncertifi==2021.5.30\ncffi==1.14.6\ncharset-normalizer==2.0.3\nclick==8.0.1\ncryptography==3.4.7\nfastapi==0.66.0\ngreenlet==1.1.0\nh11==0.12.0\nhttptools==0.2.0\nidna==3.2\niniconfig==1.1.1\npackaging==21.0\npluggy==0.13.1\npy==1.10.0\npycparser==2.20\npydantic==1.8.2\nPyMySQL==1.0.2\npyparsing==2.4.7\npytest==6.2.4\npython-dotenv==0.18.0\nPyYAML==5.4.1\nrequests==2.26.0\nSQLAlchemy==1.4.21\nstarlette==0.14.2\ntoml==0.10.2\ntyping-extensions==3.10.0.0\nurllib3==1.26.6\nuvicorn==0.14.0\nuvloop==0.15.3\nwatchgod==0.7\nwebsockets==9.1\n" }, { "alpha_fraction": 0.6829268336296082, "alphanum_fraction": 0.7219512462615967, "avg_line_length": 17.727272033691406, "blob_id": "ef531cffdc07a29cd1c0ddd5edc44a01fde1c806", "content_id": "1a65cc8896f28be2173a4e4e1c1bdf150a318536", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 66, "num_lines": 11, "path": "/app/main.py", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "from fastapi import FastAPI\nfrom .routes import users\n\n\napp = FastAPI()\n\napp.include_router(users.router)\n\[email protected]('/')\nasync def root():\n return {'message': 'REST Back-end Challenge 20201209 Running'}" }, { "alpha_fraction": 0.6269429922103882, "alphanum_fraction": 0.6269429922103882, "avg_line_length": 15.083333015441895, "blob_id": "6a2420e4a6cc8b001bcb2c6f7942062179429089", "content_id": "d7ea8d5d6de89e987fdbab3dd508aa81184e1b59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 193, "license_type": "no_license", "max_line_length": 29, "num_lines": 12, "path": "/app/persistence/enumerators.py", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "from enum import Enum\n\n\nclass GendersEnum(str, Enum):\n male = 'male'\n female = 'female'\n\n\nclass StatusEnum(str, Enum):\n draft = 'draft'\n trash = 'trash'\n published = 'published'\n" }, { "alpha_fraction": 0.6950704455375671, "alphanum_fraction": 0.6950704455375671, "avg_line_length": 28.58333396911621, "blob_id": "34a7256c9a1f018f04dfa1b5fa1964a6122e4270", "content_id": "af7a523ff4aeeaf99e62c92755e0e4f933cb88fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1420, "license_type": "no_license", "max_line_length": 58, "num_lines": 48, "path": "/app/persistence/pdmodels.py", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom typing import Optional\nfrom pydantic import BaseModel\nfrom .enumerators import GendersEnum, StatusEnum\n\n\nclass UserBase(BaseModel):\n gender: GendersEnum\n name_title: Optional[str] = None\n name_first: Optional[str] = None\n name_last: Optional[str] = None\n location_street: Optional[str] = None\n location_city: Optional[str] = None\n location_state: Optional[str] = None\n location_postcode: Optional[str] = None\n location_coordinates_latitude: Optional[float] = None\n location_coordinates_longitude: Optional[float] = None\n location_timezone_offset: Optional[str] = None\n location_timezone_description: Optional[str] = None\n email: str\n login_uuid: str\n login_username: str\n dob_date: datetime\n dob_age: Optional[int] = None\n registered_date: Optional[datetime] = None\n registered_age: Optional[int] = None\n phone: Optional[str] = None\n cell: Optional[str] = None\n id_name: Optional[str] = None\n id_value: Optional[str] = None\n picture_large: Optional[str] = None\n picture_medium: Optional[str] = None\n picture_thumbnail: Optional[str] = None\n nat: Optional[str] = None\n imported_t: datetime\n status: StatusEnum = StatusEnum.draft\n\n\nclass UserCreate(UserBase):\n login_password: str\n login_salt: Optional[str] = None\n\n\nclass User(UserBase):\n id: int\n\n class Config:\n orm_mode = True\n" }, { "alpha_fraction": 0.7380785346031189, "alphanum_fraction": 0.75, "avg_line_length": 36.53947448730469, "blob_id": "8773b84c13d784832418ad71fa28eb6c43be0873", "content_id": "74f9ebf6af880031722502016cb406e773669441", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2892, "license_type": "no_license", "max_line_length": 206, "num_lines": 76, "path": "/README.md", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "# Back-end Challenge 🏅 2021\n\nEste projeto consiste em uma API REST onde é possível fazer operações de CRUD na classe usuário. Também implementa uma atualização automática de usuários obtidos na API [randomuser](https://randomuser.me/).\n\nDesenvolvido com:\n * Python\n * FastAPI\n * SQLAlchemy\n * MySQL\n * Docker\n\n## Desafio\nEste é um desafio para testar conhecimentos em Back-end da Coodesh.\nRepositório original: [back-end-challenge-2021](https://lab.coodesh.com/public-challenges/back-end-challenge-2021.git)\n\n\n\n## Instalação e uso\nEdite as variáveis de ambiente conforme a necessidade em `app/settingsexample.py` e renomeie o arquivo para `settings.py`.\n ### Sem docker\n Use um ambiente virtual, ex.(linux):\n `python -m venv /envs/meuvenv && source /envs/meuvenv/bin/activate`\n \n Instale os requisitos:\n `pip install -r requirements.txt`\n\n Certifique-se de configurar o url do banco de dados em `settings.py` e que o banco esteja rodando. Também pode usar sqlite.\n\n Crie as tabelas no banco de dados no primeiro uso:\n `python -m app.persistence.models createtables`\n\n Inicie o uvicorn:\n `uvicorn app.main:app --reload`\n\n ### Com Docker\n Certifique-se de configurar o url do banco de dados em `settings.py` e que o banco esteja rodando. Também pode usar sqlite.\n \n Crie a imagem:\n `sudo docker build --rm -t imagename .`\n\n Rode o container:\n `sudo docker run -d -p 80:80 --name containername imagename`\n\n Crie as tabelas no banco de dados no primeiro uso:\n `sudo docker exec containername python -m app.persistence.models createtables`\n\n ### Com Docker Compose (recomendado)\n Certifique-se de configurar o url do banco de dados em `settings.py` o host como 'db' (o nome do serviço do banco de dados no arquivo docker-compose.yml)\n \n Crie e rode os serviços com:\n `sudo docker-compose up -d`\n\n Crie as tabelas no banco de dados no primeiro uso:\n `sudo docker-compose exec app python -m app.persistence.models createtables`\n\n ### Acesso e documentação\n Se tudo estiver funcionando, você poderá acesssar a documentação da API e os endpoints em http://localhost/docs \n\n \n\n ### Cron\n Se rodando em conteiner, vai ser instalado cron e crontab. Se não desejar usar, remova as linhas referentes ao cron no Dockerfile.\n \n Para que o cron funcione corretamente, é preciso gerar um registro sobre a paginação para consumo do randomuser.\n Ex.:`sudo docker-compose exec app python -m app.apiclient.randomuser initpag 10 100`\n Para serem buscados e inseridos 100 usuários de 10 em 10, ou seja, 10 usuários cada vez que o cron executar o script.\n O crontab está configurado para todo dia às 08:00.\n Você pode verificar o log deste script em `/var/log/cron.log` dentro do conteiner.\n\n## Testes\nTestes unitários dos endpoints estão em `tests/tests_endpoints.py`\n\nPara testar, use o comando `pytest`\n\n\nEste é um challenge by coodesh." }, { "alpha_fraction": 0.6986899375915527, "alphanum_fraction": 0.7139738202095032, "avg_line_length": 21.799999237060547, "blob_id": "b48c0918aa69c424bf96756133cf485e3c61ec0d", "content_id": "5c270ce863fdfdddb079b40cc828d6fbccb3a67f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 67, "num_lines": 20, "path": "/app/persistence/security/hash.py", "repo_name": "GustavoAT/back-end-challenge-2021", "src_encoding": "UTF-8", "text": "\"\"\"Hash and cryptografy functions.\n\n Helper on password and secrets security;\n\"\"\"\nimport string\nfrom hashlib import sha256\nimport secrets\n\n\n\nALPHABET = string.ascii_letters + string.digits\n\ndef get_hashed_password(password: str, salt: str):\n salted_password = password + salt\n return sha256(salted_password.encode()).hexdigest()\n\n\ndef create_salt(size: int = 8):\n salt = ''.join([secrets.choice(ALPHABET) for i in range(size)])\n return salt\n\n\n" } ]
16
Mas515/bread-eating-jeddi
https://github.com/Mas515/bread-eating-jeddi
513f43dcc24845afe7c5bcae521989d0ae9b8bd6
972378e3a348c80e364a1467cfc60063280fc061
ba5f88a2152f78dc413dae75599e481d532250b8
refs/heads/main
2023-04-01T00:18:31.433715
2021-03-07T17:51:41
2021-03-07T17:51:41
345,138,904
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6198691129684448, "alphanum_fraction": 0.630577027797699, "avg_line_length": 32.599998474121094, "blob_id": "99470e0b06b844cc446425344be1e08477e9f0b0", "content_id": "3284dc0c58453de6c729a195c242c53a88c325a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1681, "license_type": "no_license", "max_line_length": 98, "num_lines": 50, "path": "/takepic.py", "repo_name": "Mas515/bread-eating-jeddi", "src_encoding": "UTF-8", "text": "from kivy.app import App\nfrom kivy.uix.camera import Camera\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.button import Button\n\n\nclass CameraExample(App):\n def build(self):\n layout = BoxLayout(orientation='vertical')\n # Create a camera object\n self.cameraObject = Camera(play=False)\n self.cameraObject.play = True\n self.cameraObject.resolution = (1000, 1000) # Specify the resolution\n # Create a button for taking photograph\n self.camaraClick = Button(text=\"Take Photo\")\n self.camaraClick.size_hint=(.5, .2)\n self.camaraClick.pos_hint={'x': .25, 'y':.75}\n # bind the button's on_press to onCameraClick\n self.camaraClick.bind(on_press=self.onCameraClick)\n # add camera and button to the layout\n layout.add_widget(self.cameraObject)\n layout.add_widget(self.camaraClick)\n # return the root widget\n return layout\n # Take the current frame of the video as the photo graph\n def onCameraClick(self, *args):\n self.cameraObject.export_to_png('/Users/marine/hackherthon/bread-eating-jeddi/selfie.png')\n App.get_running_app().stop()\n\n# Start the Camera App\nif __name__ == '__main__':\n CameraExample().run()\n\n\n\n #Display the image \nfrom kivy.uix.image import Image\n\nclass MainApp(App):\n\n# #This works to just display the image\n def build(self):\n img = Image(source='/Users/marine/hackherthon/bread-eating-jeddi/selfie.png',\n size_hint=(2, 1),\n pos_hint={'center_x':.5, 'center_y':.5})\n\n return img\nif __name__ == '__main__':\n app = MainApp()\n app.run()\n\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 20, "blob_id": "3edb245859df8d08640722d9d3ebdd71d6e3b409", "content_id": "2b6ef4a6fd11e1336394b9c2752ba7dc66bac89d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 20, "num_lines": 1, "path": "/README.md", "repo_name": "Mas515/bread-eating-jeddi", "src_encoding": "UTF-8", "text": "# bread-eating-jeddi" }, { "alpha_fraction": 0.6160714030265808, "alphanum_fraction": 0.693657636642456, "avg_line_length": 49.55555725097656, "blob_id": "247204503ea45f063d8d08f863a5150fb8c913fd", "content_id": "9b5df6d0c7d88140dd196feaff3f45523302f46d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3248, "license_type": "no_license", "max_line_length": 182, "num_lines": 63, "path": "/feelgood.py", "repo_name": "Mas515/bread-eating-jeddi", "src_encoding": "UTF-8", "text": "from os import path\r\nimport PIL.Image\r\nimport requests\r\nfrom io import BytesIO\r\nimport webbrowser\r\nimport random\r\n\r\nanimal1 = requests.get('https://images.pexels.com/photos/148182/pexels-photo-148182.jpeg?auto=compress&cs=tinysrgb&dpr=3&h=750&w=1260')\r\na_1 = PIL.Image.open(BytesIO(animal1.content))\r\nanimal2 = requests.get('https://images.pexels.com/photos/374906/pexels-photo-374906.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=650&w=940')\r\na_2 = PIL.Image.open(BytesIO(animal2.content))\r\nanimal3 = requests.get('https://images.pexels.com/photos/127027/pexels-photo-127027.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500')\r\na_3 = PIL.Image.open(BytesIO(animal3.content))\r\nanimal4 = requests.get('https://images.pexels.com/photos/257558/pexels-photo-257558.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500')\r\na_4 = PIL.Image.open(BytesIO(animal4.content))\r\nanimal5 = requests.get('https://images.pexels.com/photos/4588028/pexels-photo-4588028.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500')\r\na_5 = PIL.Image.open(BytesIO(animal5.content))\r\n\r\nanimals = [animal1,animal2,animal3,animal4,animal5]\r\n\r\nideas = [\"Go for a run\",\"Bake a cake\",\"Call a friend\",\"Read a book\", \"Go outside\"]\r\n \r\nfrom tkinter import * \r\ntop = Tk() \r\n \r\ntop.geometry(\"200x100\") \r\n \r\ndef fun(): \r\n print(random.choice(ideas))\r\n \r\ndef song():\r\n webbrowser.open_new(r\"https://open.spotify.com/playlist/2E6fOraA1wbcvsCxHL3F1E\")\r\n \r\ndef film():\r\n webbrowser.open_new(r\"https://www.netflix.com/browse/genre/10579\")\r\n \r\ndef cute2():\r\n animal1 = requests.get('https://images.pexels.com/photos/148182/pexels-photo-148182.jpeg?auto=compress&cs=tinysrgb&dpr=3&h=750&w=1260')\r\n animal2 = requests.get('https://images.pexels.com/photos/374906/pexels-photo-374906.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=650&w=940')\r\n animal3 = requests.get('https://images.pexels.com/photos/127027/pexels-photo-127027.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500')\r\n animal4 = requests.get('https://images.pexels.com/photos/257558/pexels-photo-257558.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500')\r\n animal5 = requests.get('https://images.pexels.com/photos/4588028/pexels-photo-4588028.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500')\r\n animals = [animal1,animal2,animal3,animal4,animal5]\r\n img = (PIL.Image.open(BytesIO((random.choice(animals)).content)))\r\n img.show()\r\n\r\nb1 = Button(top,text = \"Idea\", command = fun,fg = \"teal\",bg = \"thistle\", relief = \"ridge\",activeforeground = \"red\",activebackground = \"pink\",pady=10,padx=10,borderwidth = 5) \r\n \r\nb2 = Button(top, text = \"Cute\", command = cute2,fg = \"teal\",bg = \"lightpink\", relief = \"ridge\",activeforeground = \"blue\",activebackground = \"pink\",pady=10,padx=10,borderwidth = 5) \r\n \r\nb3 = Button(top, text = \"Song\",command = song,fg = \"goldenrod\",bg = \"skyblue1\", relief = \"ridge\", activeforeground = \"green\",activebackground = \"pink\",pady = 10,borderwidth = 5) \r\n \r\nb4 = Button(top, text = \"Film\",command = film,fg = \"darkkhaki\",bg = \"mistyrose2\", relief = \"ridge\", activeforeground = \"yellow\",activebackground = \"pink\",pady = 10,borderwidth = 5) \r\n \r\nb1.pack(side = LEFT) \r\n \r\nb2.pack(side = RIGHT) \r\n \r\nb3.pack(side = TOP) \r\n \r\nb4.pack(side = BOTTOM) \r\n \r\ntop.mainloop() " }, { "alpha_fraction": 0.6080313324928284, "alphanum_fraction": 0.6362389922142029, "avg_line_length": 31.94193458557129, "blob_id": "93983da960083f6647b9fe898a8781dc7d2c47cb", "content_id": "4fe1c8ca96d5a17965908f48d2f80aa096e73bc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5107, "license_type": "no_license", "max_line_length": 139, "num_lines": 155, "path": "/main.py", "repo_name": "Mas515/bread-eating-jeddi", "src_encoding": "UTF-8", "text": "#import kivy\n\nfrom kivy.app import App\nfrom kivy.uix.label import Label\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.button import Button\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.lang import Builder\n\nfrom os import path\nimport PIL.Image\nimport requests\nfrom io import BytesIO\nimport webbrowser\nimport random\n\nideas = [\"Go for a run\",\"Bake a cake\",\"Call a friend\",\"Read a book\", \"Go outside\"]\n\ndef fun(): \n ideas = [\"Go for a run\",\"Bake a cake\",\"Call a friend\",\"Read a book\", \"Go outside\"]\n print(random.choice(ideas))\n \ndef song():\n webbrowser.open_new(r\"https://open.spotify.com/playlist/2E6fOraA1wbcvsCxHL3F1E\")\n \ndef film():\n webbrowser.open_new(r\"https://www.netflix.com/browse/genre/10579\")\n\ndef cute2():\n animal1 = requests.get('https://images.pexels.com/photos/148182/pexels-photo-148182.jpeg?auto=compress&cs=tinysrgb&dpr=3&h=750&w=1260')\n animal2 = requests.get('https://images.pexels.com/photos/374906/pexels-photo-374906.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=650&w=940')\n animal3 = requests.get('https://images.pexels.com/photos/127027/pexels-photo-127027.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500')\n animal4 = requests.get('https://images.pexels.com/photos/257558/pexels-photo-257558.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500')\n animal5 = requests.get('https://images.pexels.com/photos/4588028/pexels-photo-4588028.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500')\n animals = [animal1,animal2,animal3,animal4,animal5]\n img = (PIL.Image.open(BytesIO((random.choice(animals)).content)))\n img.show()\n\nBuilder.load_string(\"\"\"\n<HomeScreen>:\n BoxLayout:\n Button:\n text: 'Gimme something to smash'\n on_press: root.manager.current = 'smash'\n Button:\n text: 'Gimme some love'\n on_press: root.manager.current = 'love'\n\n<TakePicScreen>:\n BoxLayout:\n Button:\n text: 'What would you like to explode today?'\n\n<GetLoveScreen>:\n BoxLayout:\n Button:\n text: 'Film'\n on_press: root.manager.current = 'film'\n Button:\n text: 'Idea'\n on_press: root.manager.current = 'idea'\n Button:\n text: 'Song'\n on_press: root.manager.current = 'song'\n Button:\n text: 'Cute animal'\n on_press: root.manager.current = 'animal'\n<FilmScreen>:\n BoxLayout:\n Button:\n text: 'Here is your feel-good film'\n<IdeaScreen>:\n BoxLayout:\n Button:\n text: \"Here is your idea\"\n<SongScreen>:\n BoxLayout:\n Button:\n text: 'Here is a song for you'\n<AnimalScreen>:\n BoxLayout:\n Button:\n text: 'What would you like to explode today?'\n\n\"\"\")\n\n# Add the screens\nclass HomeScreen(Screen):\n pass\nclass TakePicScreen(Screen):\n pass\nclass GetLoveScreen(Screen):\n pass\nclass FilmScreen(Screen):\n pass\nclass IdeaScreen(Screen):\n pass\nclass SongScreen(Screen):\n pass\nclass AnimalScreen(Screen):\n pass\n\nclass crapapp(App):\n def build(self):\n # Create the manager\n sm = ScreenManager()\n\n sm.add_widget(HomeScreen(name='home'))\n sm.add_widget(TakePicScreen(name='smash'))\n sm.add_widget(GetLoveScreen(name='love'))\n sm.add_widget(FilmScreen(name='film'))\n sm.add_widget(IdeaScreen(name='idea'))\n sm.add_widget(SongScreen(name='song'))\n sm.add_widget(AnimalScreen(name='animal'))\n\n return sm\n\n # home_layout = BoxLayout(padding = 2)\n # btn1 = Button(text = \"Gimme something to smash\", font_size = 50, background_color = red)\n # layout.add_widget(btn1)\n # btn2 = Button(text = \"Gimme some love\", font_size = 50, background_color = purple)\n # home_layout.add_widget(btn2)\n # button.bind(on_press=self.on_press_button)\n # return home_layout\n\n # def on_button_press(self, instance):\n # button_text = instance.text\n # if button_text == \"Gimme something to smash\":\n # return Label(text=\"What would you like to explode today?\")\n\n\n # def __init__(self, **kwargs):\n # super(HomeScreen, self).__init__(**kwargs)\n # self.cols = 1\n # btn1 = Button(text='Gimme something to smash', font_size = 50, background_normal = '', background_color = [0, 0, 0, 1])\n # btn2 = Button(text='Gimme some love', font_size = 50, background_normal = '', background_color = [156, 39, 176, 0.3])\n # btn1.bind(on_press=self.callback)\n # btn2.bind(on_press=self.callback)\n # self.add_widget(btn1)\n # self.add_widget(btn2)\n # def callback(self, instance):\n # print('The button %s state is <%s>' % (instance, instance.state))\n\n\n# class crapapp(App):\n# \tdef build(self):\n# \t\t#return Label(text=\"Welcome to the crap app – it's okay to feel crap\")\n# \t\treturn HomeScreen()\n\nif __name__ == '__main__':\n crapapp().run()" } ]
4
DarkmatterVale/HaikuPorts-Cleaner
https://github.com/DarkmatterVale/HaikuPorts-Cleaner
a24014b4fb76ad77b52cfb35505d48e069f98abf
bc66e4696599cd69b24fb1a4c384881a64197f70
9219a8d1e0ef730bbdeafc074001fe253f9f0467
refs/heads/master
2016-08-13T01:06:17.837210
2016-04-09T00:47:22
2016-04-09T00:47:22
51,781,786
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7685950398445129, "alphanum_fraction": 0.7685950398445129, "avg_line_length": 17.615385055541992, "blob_id": "72e5f19a388cfd4b167f8c0150903ab90d3a461a", "content_id": "1a5eeb33b24e2cc5dc25005b1a9443c78874c859", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 242, "license_type": "permissive", "max_line_length": 53, "num_lines": 13, "path": "/README.md", "repo_name": "DarkmatterVale/HaikuPorts-Cleaner", "src_encoding": "UTF-8", "text": "# HaikuPorts-Cleaner\nA tool to automatically clean up HaikuPorts\n\nWritten in Python and does not have any dependencies.\n\n## Usage\n\nTo run HaikuPorts-Cleaner:\n\n```\ncd HaikuPorts-Cleaner\npython hp-cleaner.py -d /absolute/path/to/haikuports\n```\n" }, { "alpha_fraction": 0.5745595097541809, "alphanum_fraction": 0.576708197593689, "avg_line_length": 32.72463607788086, "blob_id": "eae2e90a2f442b195a232f60d1049dc0e4df985d", "content_id": "e3964d9cc9cdd11f9ad6c838972fa17fef027144", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2327, "license_type": "permissive", "max_line_length": 95, "num_lines": 69, "path": "/cleaner.py", "repo_name": "DarkmatterVale/HaikuPorts-Cleaner", "src_encoding": "UTF-8", "text": "from Options import getOption\nfrom Recipe import RecipeFixer\n\nimport os\nimport timeit\n\n\nclass Cleaner():\n \"\"\"\n Main class for the ports cleaner. This class handles the management of\n each individual \"clean\" task/process\n \"\"\"\n\n def __init__(self, options, args):\n \"\"\"\n Clean the haikuports tree\n \"\"\"\n # Creating a timer for the start of the program\n start = timeit.default_timer()\n\n # Setting build-dependent variables\n self.directory = getOption(\"directory\")\n\n # Setting up log file\n self.logFile = \"log\"\n with open(os.path.join(os.getcwd(), self.logFile), 'w') as log_file:\n log_file.write(\"\")\n log_file.close()\n\n # Cleaning all files within the base directory\n self.clean_directory(self.directory)\n\n # Creating a timer for the end of the program\n stop = timeit.default_timer()\n\n # Printing the total time it took to run the program\n print(\"Total time to clean \" + self.directory + \" : \" + str(stop - start) + \" seconds\")\n\n def clean_directory(self, directory_to_clean):\n \"\"\"\n Cleans the main haikuports directory & all its subfolders\n \"\"\"\n total_recipes = self.tally_recipes(directory_to_clean)\n recipe_index = 0\n for root, dirs, files in os.walk(directory_to_clean):\n path = root.split('/')\n print (len(path) - 1) *'---' , os.path.basename(root)\n for test_file in files:\n if test_file.endswith(\".recipe\"):\n recipe_index += 1\n print len(path)*'---', test_file, ' ', recipe_index, '/', total_recipes\n current_recipe_fixer = RecipeFixer(root, test_file, self.logFile)\n current_recipe_fixer.clean()\n\n # Printing out the total recipe count\n print(\"Cleaned \" + str(recipe_index) + \" recipes\")\n\n def tally_recipes(self, base_directory):\n \"\"\"\n Returns the total number of recipes located within the directory\n base_directory\n \"\"\"\n total_recipes = 0\n for root, dirs, files in os.walk(base_directory):\n for test_file in files:\n if test_file.endswith(\".recipe\"):\n total_recipes += 1\n\n return total_recipes\n" }, { "alpha_fraction": 0.4301075339317322, "alphanum_fraction": 0.44354838132858276, "avg_line_length": 25.571428298950195, "blob_id": "9d5ce36b71b331bbbf5047553a9d3ff0d07d0f17", "content_id": "a55cae17f9727f2c164b3b28a15ac4773063f85f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "permissive", "max_line_length": 79, "num_lines": 14, "path": "/hp-cleaner.py", "repo_name": "DarkmatterVale/HaikuPorts-Cleaner", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2016 Vale Tolpegin\n# Distributed under the terms of the MIT License.\n\n# -- Modules ------------------------------------------------------------------\n\nfrom cleaner import Cleaner\nfrom Options import parseOptions\n\n# -- Start --------------------------------------------------------------------\n\nCleaner(*parseOptions())\n" }, { "alpha_fraction": 0.48612353205680847, "alphanum_fraction": 0.493285596370697, "avg_line_length": 23.822221755981445, "blob_id": "472c15d29b27ab322805e3b5cb9d3a534643fa41", "content_id": "e57703e50c406a7034f8da88f533d911778610ff", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1117, "license_type": "permissive", "max_line_length": 94, "num_lines": 45, "path": "/Options.py", "repo_name": "DarkmatterVale/HaikuPorts-Cleaner", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#\n# Copyright 2016 Vale Tolpegin\n# Distributed under the terms of the MIT License.\n\n# -- Modules ------------------------------------------------------------------\n\nfrom optparse import OptionParser\n\n\n# -- global options -----------------------------------------------------------\n\nglobal __Options__\n\n\n# -- getOption ===-------------------------------------------------------------\n\ndef getOption(string):\n\t\"\"\"\n\tFetches an option by name\n\t\"\"\"\n\n\treturn getattr(__Options__, string)\n\n\n# -- splitCommaSeparatedList --------------------------------------------------\n\ndef setCommaSeparatedList(option, opt, value, parser):\n\tsetattr(parser.values, option.dest, value.split(','))\n\n\n# -- parseOptions -------------------------------------------------------------\n\ndef parseOptions():\n\t\"\"\"\n\tDoes command line argument parsing\n\t\"\"\"\n\tparser = OptionParser(usage='usage: %prog [options] portname[-portversion]', version='0.0.1')\n\tparser.add_option('-d', '--directory', dest='directory', help=\"haikuports directory\")\n\n\tglobal __Options__\n\n\t(__Options__, args) = parser.parse_args()\n\n\treturn (__Options__, args)\n" }, { "alpha_fraction": 0.5020908713340759, "alphanum_fraction": 0.506342351436615, "avg_line_length": 48.518550872802734, "blob_id": "68e2c683a8aaf41142ed793b78686603d9623d1a", "content_id": "de55b3b1d27295727b1695ac52e3b770cb272caa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 57392, "license_type": "permissive", "max_line_length": 252, "num_lines": 1159, "path": "/recipe.py", "repo_name": "DarkmatterVale/HaikuPorts-Cleaner", "src_encoding": "UTF-8", "text": "import os\nimport re\n\n\nclass RecipeFixer():\n \"\"\"\n Parses an individual recipe and fixes it.\n \"\"\"\n\n def __init__(self, baseDir, name, log):\n # Set up the ordering for recipe files\n self.order = [\n \"SUMMARY\",\n \"DESCRIPTION\",\n \"HOMEPAGE\",\n \"COPYRIGHT\",\n \"LICENSE\",\n \"REVISION\",\n \"SOURCE_URI\",\n \"CHECKSUM_MD5\",\n \"CHECKSUM_SHA256\",\n \"SOURCE_DIR\",\n \"PATCHES\",\n \"ADDITIONAL_FILES\",\n \"ARCHITECTURES\",\n \"SECONDARY_ARCHITECTURES\",\n \"PROVIDES\",\n \"REQUIRES\",\n \"PROVIDES_devel\",\n \"REQUIRES_devel\",\n \"BUILD_REQUIRES\",\n \"BUILD_PREREQUIRES\",\n \"PATCH()\",\n \"BUILD()\",\n \"INSTALL()\",\n \"TEST()\",\n ]\n\n self.remove_components = [\n \"STATUS_HAIKU\",\n \"CHECKSUM_MD5\",\n \"DEPEND\"\n ]\n\n self.component_ordering = {\n \"SUMMARY\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"SUMMARY\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"DESCRIPTION\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"DESCRIPTION\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"HOMEPAGE\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"HOMEPAGE\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"COPYRIGHT\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"COPYRIGHT\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"LICENSE\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"LICENSE\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"REVISION\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"REVISION\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"SOURCE_URI\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"SOURCE_URI\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"CHECKSUM_SHA256\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"CHECKSUM_SHA256\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"SOURCE_DIR\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"SOURCE_DIR\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"PATCHES\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"PATCHES\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"ADDITIONAL_FILES\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"ADDITIONAL_FILES\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"ARCHITECTURES\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"ARCHITECTURES\",\n \"join\" : \"=\",\n \"pre_requests\" : [\"\\n\"]\n },\n \"SECONDARY_ARCHITECTURES\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"SECONDARY_ARCHITECTURES\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"PROVIDES\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"PROVIDES\",\n \"join\" : \"=\",\n \"pre_requests\" : [\"\\n\"]\n },\n \"REQUIRES\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"REQUIRES\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"PROVIDES_devel\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"PROVIDES_devel\",\n \"join\" : \"=\",\n \"pre_requests\" : [\"\\n\"]\n },\n \"REQUIRES_devel\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"REQUIRES_devel\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"BUILD_REQUIRES\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"BUILD_REQUIRES\",\n \"join\" : \"=\",\n \"pre_requests\" : [\"\\n\"]\n },\n \"BUILD_PREREQUIRES\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"BUILD_PREREQUIRES\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"PATCH()\" : {\n \"begin_id\" : '{',\n \"end_id\" : '}',\n \"name\" : \"PATCH()\",\n \"join\" : \"\\n\",\n \"pre_requests\" : [\"\\n\"]\n },\n \"BUILD()\" : {\n \"begin_id\" : '{',\n \"end_id\" : '}',\n \"name\" : \"BUILD()\",\n \"join\" : \"\\n\",\n \"pre_requests\" : [\"\\n\"]\n },\n \"INSTALL()\" : {\n \"begin_id\" : '{',\n \"end_id\" : '}',\n \"name\" : \"INSTALL()\",\n \"join\" : \"\\n\",\n \"pre_requests\" : [\"\\n\"]\n },\n \"TEST()\" : {\n \"begin_id\" : '{',\n \"end_id\" : '}',\n \"name\" : \"TEST()\",\n \"join\" : \"\\n\",\n \"pre_requests\" : [\"\\n\"]\n },\n \"STATUS_HAIKU\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"STATUS_HAIKU\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"DEPEND\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"DEPEND\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n },\n \"CHECKSUM_MD5\" : {\n \"begin_id\" : '\"',\n \"end_id\" : '\"',\n \"name\" : \"CHECKSUM_MD5\",\n \"join\" : \"=\",\n \"pre_requests\" : []\n }\n }\n\n # Setting up logging information\n self.logFile = log\n\n # Setting general variables\n self.baseDir = baseDir\n self.name = name\n\n def clean(self):\n \"\"\"\n Fix the given recipe\n \"\"\"\n # Reset variables\n self.content = \"\"\n self.corrected_content = \"\"\n self.logData = \"\"\n\n # Adding log data\n self.logData += (\"*\" * 70) + \"\\n\"\n self.logData += re.sub(\".recipe\", \"\", self.name) + \"\\n\"\n self.logData += (\"*\" * 70) + \"\\n\"\n\n # Read the recipe file\n with open(os.path.join(self.baseDir, self.name), 'r') as content_file:\n self.content = content_file.read()\n content_file.close()\n\n # Updating corrected_content\n self.corrected_content = self.content\n\n # Determine whether the recipe is of the old format\n if self.should_update_format(self.content):\n # Apply updating\n self.corrected_content = self.convert_old_format(self.content)\n self.content = self.corrected_content\n self.corrected_content = self.correct_ordering()\n # Determine whether clean the recipe\n elif self.should_clean_recipe(self.content):\n # Apply cleaning\n self.corrected_content = self.correct_ordering()\n\n # Save new data to the recipe file\n with open(os.path.join(self.baseDir, self.name), 'w') as content_file:\n content_file.seek(0)\n content_file.write(self.corrected_content)\n content_file.close()\n\n # Save the log data\n with open(os.path.join(os.getcwd(), self.logFile), 'a') as log_file:\n log_file.write(self.logData)\n log_file.close()\n\n def correct_ordering(self):\n \"\"\"\n Corrects the ordering of the content within recipes\n \"\"\"\n original_content = self.content\n ordered_content = \"\"\n extracted_component_list = {}\n\n # For each component, go through the recipe, find it, and correctly\n # place it into the new recipe\n for component in self.order:\n start_, end_ = self.extract_component(original_content, component)\n\n if start_ != -1 and end_ != -1:\n extracted_component_list[component] = {\n \"text\" : str(self.content)[start_:end_] + \"\\n\",\n \"clean_text\" : re.sub(component + self.component_ordering[component][\"join\"], \"\", str(self.content)[start_:end_] + \"\\n\")[1:-2]\n }\n\n # Correcting mistakes in each component\n for component in self.order:\n # Correcting SUMMARY related issues\n if component == \"SUMMARY\" and \"SUMMARY\" in extracted_component_list:\n # Make sure it is only one line long\n if len(extracted_component_list[component][\"text\"]) > 70:\n print(\"\\033[91mERROR: \\033[00m{}\".format(\"SUMMARY must be less than 80 characters long\"))\n self.logData += \"WARNING: SUMMARY must be less than 70 characters long\\n\"\n if len(extracted_component_list[component][\"text\"].split(\"\\n\")) > 2:\n extracted_component_list[component][\"text\"] = re.sub(r\"\\n\", \"\", extracted_component_list[component][\"text\"]) + \"\\n\"\n self.logData += \"WARNING: Removing extra newline characters in SUMMARY\\n\"\n\n # Make sure it does not end in a period\n end_character_index = self.find_previous_non_whitespace_character(extracted_component_list[component][\"text\"], [self.component_ordering[component][\"end_id\"]], 1)\n if end_character_index != -1:\n if \".\" == extracted_component_list[component][\"text\"][end_character_index]:\n extracted_component_list[component][\"text\"] = extracted_component_list[component][\"text\"][:end_character_index] + extracted_component_list[component][\"text\"][(end_character_index + 1):]\n self.logData += \"WARNING: Removing extra period at the end of SUMMARY\\n\"\n elif component == \"SUMMARY\" and \"SUMMARY\" not in extracted_component_list:\n print(\"\\033[91mERROR: \\033[00m{}\".format(\"Cannot find SUMMARY in recipe\"))\n self.logData += \"ERROR: Cannot find SUMMARY in recipe\\n\"\n self.logData += \"WARNING: Adding dummy SUMMARY component in recipe\\n\"\n\n extracted_component_list[component] = {\n #\"text\" : \"# WARNING: Adding dummy SUMMARY component in recipe\\n\" + component + self.component_ordering[component][\"join\"] + \"\\\"\\\"\\n\",\n \"text\" : \"# WARNING: \" + component + \" must be added to recipe here\\n\",\n \"clean_text\" : \"\"\n }\n\n # Correcting DESCRIPTION related issues\n if component == \"DESCRIPTION\" and \"DESCRIPTION\" in extracted_component_list:\n # Cleaning ending of component (fixing tabs, etc)\n end_character_index = self.find_previous_non_whitespace_character(extracted_component_list[component][\"text\"], [self.component_ordering[component][\"end_id\"]], 1)\n if end_character_index != -1:\n extracted_component_list[component][\"text\"] = extracted_component_list[component][\"text\"][:(end_character_index + 1)] + self.component_ordering[component][\"end_id\"] + \"\\n\"\n elif component == \"DESCRIPTION\" and \"DESCRIPTION\" not in extracted_component_list:\n print(\"\\033[91mERROR: \\033[00m{}\".format(\"Cannot find DESCRIPTION in recipe\"))\n self.logData += \"ERROR: Cannot find DESCRIPTION in recipe\\n\"\n self.logData += \"WARNING: Adding dummy DESCRIPTION component in recipe\\n\"\n\n extracted_component_list[component] = {\n #\"text\" : \"# WARNING: Adding dummy DESCRIPTION component in recipe\\n\" + component + self.component_ordering[component][\"join\"] + \"\\\"\\\"\\n\",\n \"text\" : \"# WARNING: \" + component + \" must be added to recipe here\\n\",\n \"clean_text\" : \"\"\n }\n\n # Correcting HOMPAGE related issues\n if component == \"HOMEPAGE\" and \"HOMEPAGE\" in extracted_component_list:\n # If it is multi-line, make sure it is correctly formatted\n if len(extracted_component_list[component][\"text\"].split(\"\\n\")) > 2:\n # Getting the individual items within provides\n num_, instances_ = self.number_of_instances(extracted_component_list[component][\"clean_text\"], \"*\", [\"\\n\"])\n\n # Generating the correct homepage component\n generated_text = component + self.component_ordering[component][\"join\"] + \"\\\"\" + re.sub(\"\\t\", \"\", instances_[0]) + \"\\n\"\n\n # Since the first COPYRIGHT is not supposed to be on a newline, ignore it\n num_ -= 1\n instances_ = instances_[1:]\n\n for instance in instances_:\n cleaned_instance = \"\"\n for non_spaced in self.remove_characters(instance, [\"\\t\"]).split(\" \"):\n if non_spaced != \"\":\n cleaned_instance += \" \" + non_spaced\n cleaned_instance = cleaned_instance[1:]\n\n if \"#\" in instance:\n generated_text += instance + \"\\n\"\n else:\n generated_text += \"\\t\" + cleaned_instance + \"\\n\"\n\n # Cleaning ending of component (fixing tabs, etc)\n end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)\n if end_character_index != -1:\n generated_text = generated_text[:(end_character_index + 1)] + self.component_ordering[component][\"end_id\"] + \"\\n\"\n\n extracted_component_list[component][\"text\"] = generated_text\n elif component == \"HOMEPAGE\" and component not in extracted_component_list:\n self.logData += \"WARNING: Adding dummy \" + component + \" component in recipe\\n\"\n\n extracted_component_list[component] = {\n #\"text\" : \"# WARNING: Adding dummy \" + component + \" component in recipe\\n\" + component + self.component_ordering[component][\"join\"] + \"\\\"\\\"\\n\",\n \"text\" : \"# WARNING: \" + component + \" must be added to recipe here\\n\",\n \"clean_text\" : \"\"\n }\n\n # Correcting COPYRIGHT related issues\n if component == \"COPYRIGHT\" and \"COPYRIGHT\" in extracted_component_list:\n # If it is multi-line, make sure it is correctly formatted\n if len(extracted_component_list[component][\"text\"].split(\"\\n\")) > 2:\n # Getting the individual items within provides\n num_, instances_ = self.number_of_instances(extracted_component_list[component][\"clean_text\"], \"*\", [\"\\n\"])\n\n # Cleaning all extra commas\n for instance_index in range(0, num_):\n for character_index in range(1, len(instances_[instance_index]) - 3):\n try:\n if instances_[instance_index][character_index] == \",\":\n if re.sub(\"[0-9]\", \"\", instances_[instance_index][character_index - 1]) == \"\" and instances_[instance_index][character_index + 1] == \" \" and re.sub(\"[0-9]\", \"\", instances_[instance_index][character_index + 2]) != \"\":\n instances_[instance_index] = instances_[instance_index][:character_index] + instances_[instance_index][character_index + 1:]\n except:\n pass\n\n # Generating the correct copyright component\n if instances_[0][0] == \"\\t\":\n generated_text = component + self.component_ordering[component][\"join\"] + \"\\\"\" + instances_[0][1:] + \"\\n\"\n else:\n generated_text = component + self.component_ordering[component][\"join\"] + \"\\\"\" + instances_[0] + \"\\n\"\n\n for instance_index in range(1, len(instances_)):\n cleaned_instance = \"\"\n for non_spaced in self.remove_characters(instances_[instance_index], [\"\\t\"]).split(\" \"):\n if non_spaced != \"\":\n cleaned_instance += \" \" + non_spaced\n cleaned_instance = cleaned_instance[1:]\n\n if \"#\" in instances_[instance_index]:\n generated_text += instances_[instance_index] + \"\\n\"\n elif instance_index > 0:\n if \"\\\\\" in instances_[instance_index - 1]:\n generated_text += cleaned_instance + \"\\n\"\n continue\n\n generated_text += \"\\t\" + cleaned_instance + \"\\n\"\n\n # Cleaning ending of component (fixing tabs, etc)\n end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)\n if end_character_index != -1:\n generated_text = generated_text[:(end_character_index + 1)] + self.component_ordering[component][\"end_id\"] + \"\\n\"\n\n extracted_component_list[component][\"text\"] = generated_text\n elif component == \"COPYRIGHT\" and component not in extracted_component_list:\n self.logData += \"WARNING: Adding dummy \" + component + \" component in recipe\\n\"\n\n extracted_component_list[component] = {\n #\"text\" : \"# WARNING: Adding dummy \" + component + \" component in recipe\\n\" + component + self.component_ordering[component][\"join\"] + \"\\\"\\\"\\n\",\n \"text\" : \"# WARNING: \" + component + \" must be added to recipe here\\n\",\n \"clean_text\" : \"\"\n }\n\n # Correcting LICENSE related issues\n if component == \"LICENSE\" and \"LICENSE\" in extracted_component_list:\n # If it is multi-line, make sure it is correctly formatted\n if len(extracted_component_list[component][\"text\"].split(\"\\n\")) > 2:\n # Getting the individual items within provides\n num_, instances_ = self.number_of_instances(extracted_component_list[component][\"clean_text\"], \"*\", [\"\\n\"])\n\n # Generating the correct license component\n generated_text = component + self.component_ordering[component][\"join\"] + \"\\\"\" + re.sub(\"\\t\", \"\", instances_[0]) + \"\\n\"\n\n # Since the first COPYRIGHT is not supposed to be on a newline, ignore it\n num_ -= 1\n instances_ = instances_[1:]\n\n for instance in instances_:\n cleaned_instance = \"\"\n for non_spaced in self.remove_characters(instance, [\"\\t\"]).split(\" \"):\n if non_spaced != \"\":\n cleaned_instance += \" \" + non_spaced\n cleaned_instance = cleaned_instance[1:]\n\n if \"#\" in instance:\n generated_text += instance + \"\\n\"\n else:\n generated_text += \"\\t\" + cleaned_instance + \"\\n\"\n\n # Cleaning ending of component (fixing tabs, etc)\n end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)\n if end_character_index != -1:\n generated_text = generated_text[:(end_character_index + 1)] + self.component_ordering[component][\"end_id\"] + \"\\n\"\n\n extracted_component_list[component][\"text\"] = generated_text\n elif component == \"LICENSE\" and component not in extracted_component_list:\n self.logData += \"WARNING: Adding dummy \" + component + \" component in recipe\\n\"\n\n extracted_component_list[component] = {\n #\"text\" : \"# WARNING: Adding dummy \" + component + \" component in recipe\\n\" + component + self.component_ordering[component][\"join\"] + \"\\\"\\\"\\n\",\n \"text\" : \"# WARNING: \" + component + \" must be added to recipe here\\n\",\n \"clean_text\" : \"\"\n }\n\n # Correcting REVISION related issues\n if component == \"REVISION\" and \"REVISION\" in extracted_component_list:\n # Make sure it is only one line long\n if len(extracted_component_list[component][\"text\"].split(\"\\n\")) > 2:\n extracted_component_list[component][\"text\"] = re.sub(r\"\\n\", \"\", extracted_component_list[component][\"text\"]) + \"\\n\"\n self.logData += \"WARNING: Removing extra newline characters in REVISION\\n\"\n elif component == \"REVISION\" and component not in extracted_component_list:\n self.logData += \"WARNING: Adding dummy \" + component + \" component in recipe\\n\"\n\n extracted_component_list[component] = {\n \"text\" : \"# WARNING: Adding dummy \" + component + \" component in recipe\\n\" + component + self.component_ordering[component][\"join\"] + \"\\\"1\\\"\\n\",\n \"clean_text\" : \"\"\n }\n\n # Correcting SOURCE_URI related issues\n if component == \"SOURCE_URI\" and \"SOURCE_URI\" in extracted_component_list:\n # Make sure it is only one line long\n if len(extracted_component_list[component][\"text\"].split(\"\\n\")) > 2:\n extracted_component_list[component][\"text\"] = re.sub(r\"\\n\", \"\", extracted_component_list[component][\"text\"]) + \"\\n\"\n self.logData += \"WARNING: Removing extra newline characters in SOURCE_URI\\n\"\n elif component == \"SOURCE_URI\" and component not in extracted_component_list:\n self.logData += \"WARNING: Adding dummy \" + component + \" component in recipe\\n\"\n\n extracted_component_list[component] = {\n #\"text\" : \"# WARNING: Adding dummy \" + component + \" component in recipe\\n\" + component + self.component_ordering[component][\"join\"] + \"\\\"\\\"\\n\",\n \"text\" : \"# WARNING: \" + component + \" must be added to recipe here\\n\",\n \"clean_text\" : \"\"\n }\n\n # Correcting CHECKSUM_SHA256 related issues\n if component == \"CHECKSUM_SHA256\" and \"CHECKSUM_SHA256\" in extracted_component_list:\n # Make sure it is only one line long\n if len(extracted_component_list[component][\"text\"].split(\"\\n\")) > 2:\n extracted_component_list[component][\"text\"] = re.sub(r\"\\n\", \"\", extracted_component_list[component][\"text\"]) + \"\\n\"\n self.logData += \"WARNING: Removing extra newline characters in CHECKSUM_SHA256\\n\"\n elif component == \"CHECKSUM_SHA256\" and component not in extracted_component_list:\n self.logData += \"WARNING: Adding dummy \" + component + \" component in recipe\\n\"\n\n extracted_component_list[component] = {\n #\"text\" : \"# WARNING: Adding dummy \" + component + \" component in recipe\\n\" + component + self.component_ordering[component][\"join\"] + \"\\\"\\\"\\n\",\n \"text\" : \"# WARNING: \" + component + \" must be added to recipe here\\n\",\n \"clean_text\" : \"\"\n }\n\n # Correcting SOURCE_DIR related issues\n if component == \"SOURCE_DIR\" and \"SOURCE_DIR\" in extracted_component_list:\n # Make sure it is only one line long\n if len(extracted_component_list[component][\"text\"].split(\"\\n\")) > 2:\n extracted_component_list[component][\"text\"] = re.sub(r\"\\n\", \"\", extracted_component_list[component][\"text\"]) + \"\\n\"\n self.logData += \"WARNING: Removing extra newline characters in SOURCE_DIR\\n\"\n\n # Correcting PATCHES related issues\n if component == \"PATCHES\" and \"PATCHES\" in extracted_component_list:\n # If it is multi-line, make sure it is correctly formatted\n if len(extracted_component_list[component][\"text\"].split(\"\\n\")) > 2:\n # Getting the individual items within provides\n num_, instances_ = self.number_of_instances(extracted_component_list[component][\"clean_text\"], \"*\", [\"\\n\"])\n\n # Generating the correct patches component\n generated_text = component + self.component_ordering[component][\"join\"] + \"\\\"\\n\"\n for instance in instances_:\n cleaned_instance = \"\"\n for non_spaced in self.remove_characters(instance, [\"\\t\"]).split(\" \"):\n if non_spaced != \"\":\n cleaned_instance += \" \" + non_spaced\n cleaned_instance = cleaned_instance[1:]\n\n if \"#\" in instance:\n generated_text += instance + \"\\n\"\n else:\n generated_text += \"\\t\" + cleaned_instance + \"\\n\"\n\n # Cleaning ending of component (fixing tabs, etc)\n end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)\n if end_character_index != -1:\n generated_text = generated_text[:(end_character_index + 1)] + \"\\n\\t\" + self.component_ordering[component][\"end_id\"] + \"\\n\"\n\n extracted_component_list[component][\"text\"] = generated_text\n\n # Correcting ADDITIONAL_FILES related issues\n if component == \"ADDITIONAL_FILES\" and \"ADDITIONAL_FILES\" in extracted_component_list:\n # If it is multi-line, make sure it is correctly formatted\n if len(extracted_component_list[component][\"text\"].split(\"\\n\")) > 2:\n # Getting the individual items within provides\n num_, instances_ = self.number_of_instances(extracted_component_list[component][\"clean_text\"], \"*\", [\"\\n\"])\n\n # Generating the correct additional_files component\n generated_text = component + self.component_ordering[component][\"join\"] + \"\\\"\\n\"\n for instance in instances_:\n cleaned_instance = \"\"\n for non_spaced in self.remove_characters(instance, [\"\\t\"]).split(\" \"):\n if non_spaced != \"\":\n cleaned_instance += \" \" + non_spaced\n cleaned_instance = cleaned_instance[1:]\n\n if \"#\" in instance:\n generated_text += instance + \"\\n\"\n else:\n generated_text += \"\\t\" + cleaned_instance + \"\\n\"\n\n # Cleaning ending of component (fixing tabs, etc)\n end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)\n if end_character_index != -1:\n generated_text = generated_text[:(end_character_index + 1)] + \"\\n\\t\" + self.component_ordering[component][\"end_id\"] + \"\\n\"\n\n extracted_component_list[component][\"text\"] = generated_text\n\n # Correcting ARCHITECTURES related issues\n if component == \"ARCHITECTURES\" and component not in extracted_component_list:\n self.logData += \"WARNING: Adding dummy \" + component + \" component in recipe\\n\"\n\n extracted_component_list[component] = {\n \"text\" : \"# WARNING: Adding dummy \" + component + \" component in recipe\\n\" + component + self.component_ordering[component][\"join\"] + \"\\\"?x86 ?x86_gcc2\\\"\\n\",\n \"clean_text\" : \"\"\n }\n\n # Correcting SECONDARY_ARCHITECTURES related issues\n if component == \"SECONDARY_ARCHITECTURES\" and \"SECONDARY_ARCHITECTURES\" in extracted_component_list:\n # Make sure it is only one line long\n if len(extracted_component_list[component][\"text\"].split(\"\\n\")) > 2:\n extracted_component_list[component][\"text\"] = re.sub(r\"\\n\", \"\", extracted_component_list[component][\"text\"]) + \"\\n\"\n self.logData += \"WARNING: Removing extra newline characters in SECONDARY_ARCHITECTURES\\n\"\n\n # Correcting PROVIDES related issues\n if component == \"PROVIDES\" and \"PROVIDES\" in extracted_component_list:\n # Getting the individual items within provides\n num_, instances_ = self.number_of_instances(extracted_component_list[component][\"clean_text\"], \"*\", [\"\\n\"])\n\n # Generating the correct provides component\n generated_text = component + self.component_ordering[component][\"join\"] + \"\\\"\\n\"\n for instance in instances_:\n cleaned_instance = \"\"\n for non_spaced in self.remove_characters(instance, [\"\\t\"]).split(\" \"):\n if non_spaced != \"\":\n cleaned_instance += \" \" + non_spaced\n cleaned_instance = cleaned_instance[1:]\n\n if \"#\" in instance:\n generated_text += instance + \"\\n\"\n else:\n generated_text += \"\\t\" + cleaned_instance + \"\\n\"\n\n # Cleaning ending of component (fixing tabs, etc)\n end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)\n if end_character_index != -1:\n generated_text = generated_text[:(end_character_index + 1)] + \"\\n\\t\" + self.component_ordering[component][\"end_id\"] + \"\\n\"\n\n extracted_component_list[component][\"text\"] = generated_text\n elif component == \"PROVIDES\" and \"PROVIDES\" not in extracted_component_list:\n extracted_component_list[\"PROVIDES\"] = {\n \"text\" : \"# WARNING: Adding dummy \" + component + \" component in recipe\\n\" + \"PROVIDES=\\\"\\n\\t\" + re.sub(\"-.*\", \"\", self.name) + \" = $portVersion\\n\\t\\\"\\n\",\n \"clean_text\" : re.sub(\"-.*\", \"\", self.name) + \" = $portVersion\"\n }\n self.logData += \"WARNING: Adding dummy missing PROVIDES in recipe\"\n\n # Correcting REQUIRES related issues\n if component == \"REQUIRES\" and \"REQUIRES\" in extracted_component_list:\n # Making sure that a \"haiku\" is in the REQUIRES component\n if \"SECONDARY_ARCHITECTURES\" in extracted_component_list:\n if \"haiku$secondaryArchSuffix\\n\" not in extracted_component_list[component][\"text\"] and \"haiku${secondaryArchSuffix}\" not in extracted_component_list[component][\"text\"]:\n extracted_component_list[component][\"text\"] = component + self.component_ordering[component][\"join\"] + \"\\\"\\n\\thaiku$secondaryArchSuffix\\n\\t\" + extracted_component_list[component][\"clean_text\"]\n extracted_component_list[component][\"clean_text\"] = \"\\\"\\n\\thaiku$secondaryArchSuffix\\n\\t\" + extracted_component_list[component][\"clean_text\"]\n else:\n if \"haiku\\n\" not in extracted_component_list[component][\"text\"] and \"haiku$secondaryArchSuffix\" not in extracted_component_list[component][\"text\"] and \"haiku${secondaryArchSuffix}\" not in extracted_component_list[component][\"text\"]:\n extracted_component_list[component][\"text\"] = component + self.component_ordering[component][\"join\"] + \"\\\"\\n\\thaiku\\n\\t\" + extracted_component_list[component][\"clean_text\"]\n extracted_component_list[component][\"clean_text\"] = \"\\\"\\n\\thaiku\\n\\t\" + extracted_component_list[component][\"clean_text\"]\n\n # Getting the individual items within provides\n num_, instances_ = self.number_of_instances(extracted_component_list[component][\"clean_text\"], \"*\", [\"\\n\"])\n\n # Generating the correct requires component\n generated_text = component + self.component_ordering[component][\"join\"] + \"\\\"\\n\"\n for instance in instances_:\n cleaned_instance = \"\"\n for non_spaced in self.remove_characters(instance, [\"\\t\"]).split(\" \"):\n if non_spaced != \"\":\n cleaned_instance += \" \" + non_spaced\n cleaned_instance = cleaned_instance[1:]\n\n if \"#\" in instance:\n generated_text += instance + \"\\n\"\n else:\n generated_text += \"\\t\" + cleaned_instance + \"\\n\"\n\n # Cleaning ending of component (fixing tabs, etc)\n end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)\n if end_character_index != -1:\n generated_text = generated_text[:(end_character_index + 1)] + \"\\n\\t\" + self.component_ordering[component][\"end_id\"] + \"\\n\"\n\n extracted_component_list[component][\"text\"] = generated_text\n elif component == \"REQUIRES\" and \"REQUIRES\" not in extracted_component_list:\n extracted_component_list[\"REQUIRES\"] = {\n \"text\" : \"# WARNING: Adding dummy \" + component + \" component in recipe\\n\" + \"REQUIRES=\\\"\\n\\thaiku\\n\\t\\\"\\n\",\n \"clean_text\" : \"haiku\"\n }\n self.logData += \"WARNING: Adding dummy missing REQUIRES in recipe\"\n\n # Correcting PROVIDES_devel related issues\n if component == \"PROVIDES_devel\" and \"PROVIDES_devel\" in extracted_component_list:\n # Getting the individual items within provides\n num_, instances_ = self.number_of_instances(extracted_component_list[component][\"clean_text\"], \"*\", [\"\\n\"])\n\n # Generating the correct provides_devel component\n generated_text = component + self.component_ordering[component][\"join\"] + \"\\\"\\n\"\n for instance in instances_:\n cleaned_instance = \"\"\n for non_spaced in self.remove_characters(instance, [\"\\t\"]).split(\" \"):\n if non_spaced != \"\":\n cleaned_instance += \" \" + non_spaced\n cleaned_instance = cleaned_instance[1:]\n\n if \"#\" in instance:\n generated_text += instance + \"\\n\"\n else:\n generated_text += \"\\t\" + cleaned_instance + \"\\n\"\n\n # Cleaning ending of component (fixing tabs, etc)\n end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)\n if end_character_index != -1:\n generated_text = generated_text[:(end_character_index + 1)] + \"\\n\\t\" + self.component_ordering[component][\"end_id\"] + \"\\n\"\n\n extracted_component_list[component][\"text\"] = generated_text\n\n # Make sure there is a REQUIRES_devel component in the recipe\n if \"REQUIRES_devel\" not in extracted_component_list:\n if \"SECONDARY_ARCHITECTURES\" in extracted_component_list:\n extracted_component_list[\"REQUIRES_devel\"] = {\n \"text\" : \"REQUIRES_devel=\\\"\\n\\thaiku$\\{secondaryArchSuffix\\}_devel\\n\\t\\\"\\n\",\n \"clean_text\" : \"haiku$\\{secondaryArchSuffix\\}_devel\"\n }\n self.logData += \"WARNING: Adding missing REQUIRES_devel component\\n\"\n else:\n extracted_component_list[\"REQUIRES_devel\"] = {\n \"text\" : \"REQUIRES_devel=\\\"\\n\\thaiku_devel\\n\\t\\\"\\n\",\n \"clean_text\" : \"haiku_devel\"\n }\n self.logData += \"WARNING: Adding missing REQUIRES_devel component\\n\"\n\n # Correcting REQUIRES_devel related issues\n if component == \"REQUIRES_devel\" and \"REQUIRES_devel\" in extracted_component_list:\n # Getting the individual items within provides\n num_, instances_ = self.number_of_instances(extracted_component_list[component][\"clean_text\"], \"*\", [\"\\n\"])\n\n # Generating the correct requires_devel component\n generated_text = component + self.component_ordering[component][\"join\"] + \"\\\"\\n\"\n for instance in instances_:\n cleaned_instance = \"\"\n for non_spaced in self.remove_characters(instance, [\"\\t\"]).split(\" \"):\n if non_spaced != \"\":\n cleaned_instance += \" \" + non_spaced\n cleaned_instance = cleaned_instance[1:]\n\n if \"#\" in instance:\n generated_text += instance + \"\\n\"\n else:\n generated_text += \"\\t\" + cleaned_instance + \"\\n\"\n\n # Cleaning ending of component (fixing tabs, etc)\n end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)\n if end_character_index != -1:\n generated_text = generated_text[:(end_character_index + 1)] + \"\\n\\t\" + self.component_ordering[component][\"end_id\"] + \"\\n\"\n\n extracted_component_list[component][\"text\"] = generated_text\n\n # Make sure there is a PROVIDES_devel component in the recipe\n if \"PROVIDES_devel\" not in extracted_component_list:\n if \"SECONDARY_ARCHITECTURES\" in extracted_component_list:\n extracted_component_list[\"PROVIDES_devel\"] = {\n \"text\" : \"PROVIDES_devel=\\\"\\n\\t\" + re.sub(\"-.*\", \"\", self.name) + \"$\\{secondaryArchSuffix\\}_devel = $portVersion\\n\\t\\\"\\n\",\n \"clean_text\" : re.sub(\"-.*\", \"\", self.name) + \"$\\{secondaryArchSuffix\\}_devel = $portVersion\"\n }\n self.logData += \"WARNING: Adding missing PROVIDES_devel component\\n\"\n else:\n extracted_component_list[\"PROVIDES_devel\"] = {\n \"text\" : \"PROVIDES_devel=\\\"\\n\\t\" + re.sub(\"-.*\", \"\", self.name) + \"_devel = $portVersion\\n\\t\\\"\\n\",\n \"clean_text\" : re.sub(\"-.*\", \"\", self.name) + \"_devel = $portVersion\"\n }\n self.logData += \"WARNING: Adding missing PROVIDES_devel component\\n\"\n\n # Correcting REQUIRES_devel related issues\n if component == \"BUILD_REQUIRES\" and \"BUILD_REQUIRES\" in extracted_component_list:\n # Getting the individual items within provides\n num_, instances_ = self.number_of_instances(extracted_component_list[component][\"clean_text\"], \"*\", [\"\\n\"])\n\n # Generating the correct build_requires component\n generated_text = component + self.component_ordering[component][\"join\"] + \"\\\"\\n\"\n for instance in instances_:\n cleaned_instance = \"\"\n for non_spaced in self.remove_characters(instance, [\"\\t\"]).split(\" \"):\n if non_spaced != \"\":\n cleaned_instance += \" \" + non_spaced\n cleaned_instance = cleaned_instance[1:]\n\n if \"#\" in instance:\n generated_text += instance + \"\\n\"\n else:\n generated_text += \"\\t\" + cleaned_instance + \"\\n\"\n\n # Cleaning ending of component (fixing tabs, etc)\n end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)\n if end_character_index != -1:\n generated_text = generated_text[:(end_character_index + 1)] + \"\\n\\t\" + self.component_ordering[component][\"end_id\"] + \"\\n\"\n\n if extracted_component_list[component][\"clean_text\"] != \"\":\n extracted_component_list[component][\"text\"] = generated_text\n elif component == \"BUILD_REQUIRES\" and component not in extracted_component_list:\n self.logData += \"WARNING: Adding dummy \" + component + \" component in recipe\\n\"\n\n extracted_component_list[component] = {\n \"text\" : \"# WARNING: Adding dummy \" + component + \" component in recipe\\n\" + component + self.component_ordering[component][\"join\"] + \"\\\"\\n\\thaiku_devel\\n\\t\\\"\\n\",\n \"clean_text\" : \"\"\n }\n\n # Correcting REQUIRES_devel related issues\n if component == \"BUILD_PREREQUIRES\" and \"BUILD_PREREQUIRES\" in extracted_component_list:\n # Getting the individual items within provides\n num_, instances_ = self.number_of_instances(extracted_component_list[component][\"clean_text\"], \"*\", [\"\\n\"])\n\n # Generating the correct build_prerequires component\n generated_text = component + self.component_ordering[component][\"join\"] + \"\\\"\\n\"\n for instance in instances_:\n cleaned_instance = \"\"\n for non_spaced in self.remove_characters(instance, [\"\\t\"]).split(\" \"):\n if non_spaced != \"\":\n cleaned_instance += \" \" + non_spaced\n cleaned_instance = cleaned_instance[1:]\n\n if \"#\" in instance:\n generated_text += instance + \"\\n\"\n else:\n generated_text += \"\\t\" + cleaned_instance + \"\\n\"\n\n # Cleaning ending of component (fixing tabs, etc)\n end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)\n if end_character_index != -1:\n generated_text = generated_text[:(end_character_index + 1)] + \"\\n\\t\" + self.component_ordering[component][\"end_id\"] + \"\\n\"\n\n extracted_component_list[component][\"text\"] = generated_text\n elif component == \"BUILD_PREREQUIRES\" and component not in extracted_component_list:\n self.logData += \"WARNING: Adding dummy \" + component + \" component in recipe\\n\"\n\n extracted_component_list[component] = {\n #\"text\" : \"# WARNING: Adding dummy \" + component + \" component in recipe\\n\" + component + self.component_ordering[component][\"join\"] + \"\\\"\\n\\t\\\"\\n\",\n \"text\" : \"# WARNING: \" + component + \" must be added to recipe here\\n\",\n \"clean_text\" : \"\"\n }\n\n # Assembling final information\n for component in self.order:\n if component in extracted_component_list:\n for component_part in self.component_ordering[component][\"pre_requests\"]:\n ordered_content += component_part\n ordered_content += extracted_component_list[component][\"text\"]\n\n # Cleaning up log file\n self.logData += \"\\n\"\n\n # Return the final components\n return ordered_content\n\n def extract_component(self, text, component_name):\n \"\"\"\n Returns the start and end index for the component with the name\n component_name. It not only identifies the start and end index, but\n will also grab any additional data that is critical (or in the recipe)\n \"\"\"\n # Setting up indexes\n component_start_index = -1\n component_end_index = -1\n component = component_name\n\n # Detecting previous component\n if self.component_ordering[component][\"name\"] in text:\n if self.component_ordering[component][\"begin_id\"] == self.component_ordering[component][\"end_id\"]:\n component_start_index = text.index(self.component_ordering[component][\"name\"])\n component_text = text[component_start_index:]\n\n start_index = component_text.find(self.component_ordering[component][\"begin_id\"])\n end_index = component_text[start_index + 1:].find(self.component_ordering[component][\"end_id\"])\n\n while str(component_text[(start_index + end_index):(start_index + end_index + 1)]) == \"\\\\\":\n end_index += component_text[start_index + end_index + 2:].find(self.component_ordering[component][\"end_id\"]) + 1\n\n component_end_index = component_start_index + start_index + end_index + 2\n else:\n nesting_index = 0\n component_start_index = text.index(self.component_ordering[component][\"name\"])\n component_text = text[component_start_index:]\n\n start_index = component_text.find(self.component_ordering[component][\"begin_id\"])\n end_index = start_index + 1\n nesting_index += 1\n\n while nesting_index > 0:\n if self.component_ordering[component][\"begin_id\"] in component_text[end_index:end_index + 1]:\n nesting_index += 1\n elif self.component_ordering[component][\"end_id\"] in component_text[end_index:end_index + 1]:\n nesting_index -= 1\n end_index += 1\n\n component_end_index = component_start_index + end_index\n\n return component_start_index, component_end_index\n\n def should_clean_recipe(self, content):\n \"\"\"\n If the recipe detects something that should not be placed inside of\n it, the cleaner should skip the recipe.\n \"\"\"\n content_copy = str(content)\n\n # For each component, go through the recipe, find it, and remove\n # it from the cleaner\n for component in self.order:\n start_index, end_index = self.extract_component(content_copy, component)\n if start_index != -1 and end_index != -1:\n if len(self.remove_whitespace(content_copy[:start_index])) == 0:\n content_copy = content_copy[:start_index] + content_copy[end_index + 1:]\n\n if self.remove_whitespace(content_copy) != \"\":\n self.logData += \"ERROR: Cannot parse recipe file with unknown content\"\n return False\n\n return True\n\n def should_update_format(self, content):\n \"\"\"\n If the parser detects that the recipe is of the old format, update the\n recipe.\n \"\"\"\n for old_component in self.remove_components:\n if old_component in content:\n return True\n\n return False\n\n def remove_whitespace(self, text):\n \"\"\"\n Removes all whitespace in the text and returns whatever is remaining.\n \"\"\"\n return \"\".join(text.split())\n\n def find_previous_non_whitespace_character(self, text, skip_character_list, max_num_chars_to_skip):\n \"\"\"\n Returns the index of the last non-whitespace character, excluding\n the skip characters.\n \"\"\"\n # Setting up variables\n character_index = -1\n find_index = len(text) - 1\n num_chars_skipped = 0\n\n while find_index >= 0:\n current_character = text[find_index]\n\n if current_character.strip() == \"\":\n find_index -= 1\n continue\n\n skip_test = False\n if num_chars_skipped < max_num_chars_to_skip:\n for skip_character in skip_character_list:\n if current_character == skip_character:\n skip_test = True\n num_chars_skipped += 1\n break\n if skip_test:\n find_index -= 1\n continue\n\n character_index = find_index\n break\n\n return character_index\n\n def find_previous_character(self, text, character):\n \"\"\"\n Returns the index of the closest to the end of the text character\n that is \"character\".\n \"\"\"\n # Setting up variables\n character_index = -1\n find_index = len(text) - 1\n\n # Finding previous character\n while find_index >= 0:\n current_character = text[find_index]\n\n if current_character == character:\n character_index = find_index\n break\n\n find_index -= 1\n\n # Returning index of found character\n return character_index\n\n def find_next_non_whitespace_character(self, text, skip_character_list, max_num_chars_to_skip):\n \"\"\"\n Returns the index of the next non-whitespace character, excluding the\n skip characters.\n \"\"\"\n # Setting up variables\n character_index = -1\n find_index = 0\n num_chars_skipped = 0\n\n while find_index < len(text):\n current_character = text[find_index]\n\n if current_character.strip() == \"\":\n find_index += 1\n continue\n\n skip_test = False\n if num_chars_skipped < max_num_chars_to_skip:\n for skip_character in skip_character_list:\n if current_character == skip_character:\n skip_test = True\n num_chars_skipped += 1\n break\n if skip_test:\n find_index += 1\n continue\n\n character_index = find_index\n break\n\n return character_index\n\n def number_of_instances(self, text, char_to_find, skip_chars):\n \"\"\"\n Returns the number of times \"char_to_find\" is found in \"text\", split\n by \"skip_chars\"\n \"\"\"\n number = 0\n instances = []\n\n for skip_char in skip_chars:\n text_components = text.split()\n if skip_char != \"\":\n text_components = text.split(skip_char)\n\n for individual_component in text_components:\n if char_to_find == \"*\":\n if individual_component != \"\":\n number += 1\n instances.append(individual_component)\n else:\n if individual_component == char_to_find:\n number += 1\n instances.append(individual_component)\n\n return number, instances\n\n def remove_characters(self, text, chars_to_remove):\n \"\"\"\n Returns the text minus all of the instances of \"chars_to_remove\"\n \"\"\"\n for char in chars_to_remove:\n text = re.sub(char, \"\", text)\n\n return text\n\n def convert_old_format(self, text):\n \"\"\"\n Convert recipes from the old format to the new format.\n \"\"\"\n warning_text = \"# WARNING: THIS RECIPE WAS AUTO-CONVERTED...SEE GIT LOG FOR MORE INFORMATION\\n\\n\"\n extracted_component_list = {}\n\n # For each component, go through the recipe, find it, and correctly\n # place it into the new recipe\n for component in self.order:\n start_, end_ = self.extract_component(text, component)\n\n if start_ != -1 and end_ != -1:\n extracted_component_list[component] = {\n \"text\" : str(self.content)[start_:end_] + \"\\n\",\n \"clean_text\" : re.sub(component + self.component_ordering[component][\"join\"], \"\", str(self.content)[start_:end_] + \"\\n\")[1:-2]\n }\n\n for component in self.remove_components:\n start_, end_ = self.extract_component(text, component)\n\n if start_ != -1 and end_ != -1:\n extracted_component_list[component] = {\n \"text\" : str(self.content)[start_:end_] + \"\\n\",\n \"clean_text\" : re.sub(component + self.component_ordering[component][\"join\"], \"\", str(self.content)[start_:end_] + \"\\n\")[1:-2]\n }\n\n # Cleaning all old components & generating appropriate current\n # components\n for component in self.remove_components:\n # Converting DEPEND into other parts of the recipe\n if component == \"DEPEND\" and component in extracted_component_list:\n depend_components = self.extract_depend_components(extracted_component_list[component][\"clean_text\"])\n\n if \"REQUIRES\" not in extracted_component_list:\n extracted_component_list[\"REQUIRES\"] = {\n \"text\" : \"REQUIRES=\\\"\\n\\thaiku\\n\\t\\\"\\n\",\n \"clean_text\" : \"haiku\"\n }\n\n text = extracted_component_list[\"REQUIRES\"][\"text\"]\n\n # Cleaning ending of component (fixing tabs, etc)\n end_character_index = self.find_previous_non_whitespace_character(text, [], 0)\n if end_character_index != -1:\n text = text[:end_character_index - 1]\n\n if text[-1] == \"\\t\":\n text = text[:-2]\n\n for depend_component in depend_components:\n text += \"\\t\" + depend_component[0] + \" \" + depend_component[1] + \" \" + depend_component[2] + \"\\n\"\n text += \"\\t\\\"\"\n\n extracted_component_list[\"REQUIRES\"][\"text\"] = text + \"\\n\"\n extracted_component_list[\"REQUIRES\"][\"clean_text\"] = re.sub(\"REQUIRES\" + self.component_ordering[\"REQUIRES\"][\"join\"], \"\", text + \"\\n\")[1:-2]\n\n # Converting STATUS_HAIKU\n if component == \"STATUS_HAIKU\" and component in extracted_component_list:\n if extracted_component_list[component][\"clean_text\"].lower() == \"stable\":\n extracted_component_list[\"ARCHITECTURES\"] = {\n \"text\" : \"ARCHITECTURES\" + self.component_ordering[\"ARCHITECTURES\"][\"join\"] + \"\\\"x86_gcc2\\\"\\n\",\n \"clean_text\" : \"x86_gcc2\"\n }\n elif extracted_component_list[component][\"clean_text\"].lower() == \"broken\":\n extracted_component_list[\"ARCHITECTURES\"] = {\n \"text\" : \"ARCHITECTURES\" + self.component_ordering[\"ARCHITECTURES\"][\"join\"] + \"\\\"!x86_gcc2\\\"\\n\",\n \"clean_text\" : \"!x86_gcc2\"\n }\n else:\n extracted_component_list[\"ARCHITECTURES\"] = {\n \"text\" : \"ARCHITECTURES\" + self.component_ordering[\"ARCHITECTURES\"][\"join\"] + \"\\\"?x86_gcc2\\\"\\n\",\n \"clean_text\" : \"?x86_gcc2\"\n }\n\n # Assembling final information\n ordered_content = warning_text\n for component in self.order:\n if component in extracted_component_list:\n for component_part in self.component_ordering[component][\"pre_requests\"]:\n ordered_content += component_part\n ordered_content += extracted_component_list[component][\"text\"]\n\n return ordered_content\n\n def extract_depend_components(self, clean_depend_component):\n \"\"\"\n Extracts each dependency. It then determines the version(s) required\n and returns a list containing the [ordered] data for each dependency.\n \"\"\"\n depend_components = []\n\n for component in clean_depend_component.split(\"\\n\"):\n if self.remove_whitespace(component) != \"\":\n indiv_dependency_components = component.split(\" \")\n\n name = \"\"\n ver_operator = \"\"\n version = \"\"\n\n for indiv_comp_index in range(0, len(indiv_dependency_components)):\n if self.remove_whitespace(indiv_dependency_components[indiv_comp_index]) != \"\":\n try:\n name = re.sub(\".*/\", \"\", indiv_dependency_components[indiv_comp_index])\n ver_operator = indiv_dependency_components[indiv_comp_index + 1]\n version = indiv_dependency_components[indiv_comp_index + 2]\n break\n except:\n pass\n\n depend_components.append([name, ver_operator, version])\n\n # Returning the dependencies found in the DEPEND component\n return depend_components\n" } ]
5
risand/pong
https://github.com/risand/pong
33d2b990fe276a1e95888b04eea8272ce80d1f0b
6996e96b5ac9c0fd6731812e9ef17ddf5a069c1b
55aeaa3afda7bbeb1e4be8474df7c18c8fc7bc95
refs/heads/main
2023-03-19T08:28:18.205389
2021-03-01T08:17:51
2021-03-01T08:17:51
343,339,044
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5947503447532654, "alphanum_fraction": 0.6274007558822632, "avg_line_length": 17.549999237060547, "blob_id": "8d9c7aa671eb37c8457a6c2f53a405bb7e7b0350", "content_id": "d7950ca772ad959e0d242d975726a3a0d8cc72b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1562, "license_type": "no_license", "max_line_length": 120, "num_lines": 80, "path": "/main.py", "repo_name": "risand/pong", "src_encoding": "UTF-8", "text": "from turtle import Screen, Turtle\r\nfrom scoreboard import Scoreboard\r\nfrom paddle import Paddle\r\nfrom ball import Ball\r\nimport time\r\n\r\nline = Turtle()\r\nscreen = Screen()\r\nscoreboard = Scoreboard()\r\nball = Ball()\r\n\r\nscreen.setup(width=1200, height=600)\r\nscreen.bgcolor(\"black\")\r\nscreen.title(\"Pong\")\r\nscreen.tracer(0)\r\n\r\n\r\nright_paddle = Paddle((560, 0))\r\nleft_paddle = Paddle((-560, 0))\r\n\r\n\r\nscreen.listen()\r\nscreen.onkey(right_paddle.go_up, \"Up\")\r\nscreen.onkey(right_paddle.go_down, \"Down\")\r\nscreen.onkey(left_paddle.go_up, \"w\")\r\nscreen.onkey(left_paddle.go_down, \"s\")\r\n\r\n\r\nline.hideturtle()\r\nline.pensize(5)\r\nline.pencolor(\"white\")\r\nline.penup()\r\nline.goto(0, 285)\r\nline.setheading(270)\r\n\r\nfor _ in range(20):\r\n line.speed(\"fastest\")\r\n line.pendown()\r\n line.forward(9)\r\n line.penup()\r\n line.forward(20)\r\n\r\n\r\n\r\ngame_is_on = True\r\nwhile game_is_on:\r\n time.sleep(ball.move_speed)\r\n screen.update()\r\n ball.move()\r\n if ball.ycor() > 280 or ball.ycor() < -280:\r\n ball.bounce_y()\r\n\r\n if ball.distance(right_paddle) < 40 and ball.xcor() > 530 or ball.distance(left_paddle) < 40 and ball.xcor() < -530:\r\n ball.bounce_x()\r\n\r\n if ball.xcor() > 580:\r\n ball.reset_position()\r\n scoreboard.increase_left_score()\r\n\r\n if ball.xcor() < -580:\r\n ball.reset_position()\r\n scoreboard.increase_right_score()\r\n\r\n\r\n\r\n# screen.listen()\r\n# screen.onkey(right_up, \"Up\")\r\n# screen.onkey(right_down, \"Down\")\r\n# screen.onkey(left_up, \"Up\")\r\n# screen.onkey(left_down, \"Down\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nscreen.exitonclick()" } ]
1
Clearly-Josh/TestGitWithRepl
https://github.com/Clearly-Josh/TestGitWithRepl
7d5af438981c303c8c33e62ed9ad9374308c0f98
59fe50fd6813d9b36702c599c7a14e829b12824f
c4d42fae68926a8c58769a2c5f0538a45bca5ee1
refs/heads/master
2020-12-22T18:41:36.747271
2020-01-29T03:25:47
2020-01-29T03:25:47
236,893,800
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3548869788646698, "alphanum_fraction": 0.38098403811454773, "avg_line_length": 36.13580322265625, "blob_id": "8dd4b80cd0c1996c857660bcb9c4fee490e4c1ef", "content_id": "995a2674d171ffc6a8c621c621733ef6443adcc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6016, "license_type": "no_license", "max_line_length": 110, "num_lines": 162, "path": "/main.py", "repo_name": "Clearly-Josh/TestGitWithRepl", "src_encoding": "UTF-8", "text": "def showMem():\n #here are our column numbers for our memory display\n print(\" 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15\")\n print(\"-----------------------------------------------------------------------------------\")\n print(\"0 \",ram0)\n print(\"1 \",ram1)\n print(\"2 \",ram2)\n print(\"3 \",ram3)\n print(\"4 \",ram4)\n print(\"5 \",ram5)\n print(\"6 \",ram6)\n print(\"7 \",ram7)\n print(\"-----------------------------------------------------------------------------------\")\n\n#and here is the empty matrix of arrays, making up our representation of RAM\n#testing master branch\nram0 = [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\nram1 = [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\nram2 = [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\nram3 = [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\nram4 = [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\nram5 = [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\nram6 = [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\nram7 = [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n\nfile1=[\"A\",\"l\",\"l\",\"_\",\"w\",\"e\",\"_\",\"h\",\"a\",\"v\",\"e\",\"_\",\"t\",\"o\",\"_\",\"d\"]\nfile2=[\"e\",\"c\",\"i\",\"d\",\"e\",\"_\",\"i\",\"s\",\"_\",\"w\",\"h\",\"a\",\"t\",\"_\",\"t\",\"o\"]\nstorage=[file1,file2]\n\na=1\nwhile a<6:\n print(\"\")\n print(\"Welcome to the world's freaking best Memory Simulator.\")\n print(\"\")\n print(\"1 - View the current memory state!\")\n print(\"2 - Load a program or file, please.\")\n print(\"3 - Let's edit a file in memory.\")\n print(\"4 - Can we delete a file?\")\n print(\"5 - Time to save and close this thing.\")\n print(\"6 - I'm done here. Quit Program.\")\n print(\"\")\n a=int(input(\"What would you like to do? \"))\n print(\"\")\n if 1==a:\n showMem()\n elif 2==a:\n print(\"Current files in storage are: \",storage)\n b=input(\"Which file would you like to load? \")\n if b==\"file1\" or b==\"1\":\n ram0=file1\n storage.pop(0)\n #for c in storage:\n # if c == file1:\n # storage.pop(c)\n elif b==\"file2\" or b==\"2\":\n ram1=file2\n if storage[1] != None:\n storage.pop(1)\n else:\n storage.pop(0)\n elif 3==a:\n edit = int(input(\"Would you like to edit a row (1) or a single character (2)? \"))\n if edit==1:\n currentRow=int(input(\"Which row would you like to edit? \"))\n if(currentRow==0):\n currentRow=ram0\n elif(currentRow==1):\n currentRow=ram1\n elif(currentRow==2):\n currentRow=ram2\n elif(currentRow==2):\n currentRow=ram3\n elif(currentRow==4):\n currentRow=ram4\n elif(currentRow==5):\n currentRow=ram5\n elif(currentRow==6):\n currentRow=ram6\n elif(currentRow==7):\n currentRow=ram7\n print(\"The row currently shows: \",currentRow)\n change=input(\"Enter the new row: \")\n i=0\n for l in change:\n print(i)\n print(currentRow[i])\n print(change[i])\n currentRow[i]=change[i]\n i+=1\n elif edit==2:\n c = 0\n d = 0\n while c < 8 and d < 16:\n print(\"Please enter the grid you'd like to edit. Enter out-of-bounds coordinates to exit the editor.\")\n currentRow = int(input(\"Row? \"))\n d = int(input(\"Column? \"))\n change=input(\"What would you like to change it to? \")\n if(currentRow==0):\n currentRow=ram0\n elif(currentRow==1):\n currentRow=ram1\n elif(currentRow==2):\n currentRow=ram2\n elif(currentRow==2):\n currentRow=ram3\n elif(currentRow==4):\n currentRow=ram4\n elif(currentRow==5):\n currentRow=ram5\n elif(currentRow==6):\n currentRow=ram6\n elif(currentRow==7):\n currentRow=ram7\n print(currentRow)\n if c < 8 and d < 16:\n currentRow[d] = change\n elif 4==a:\n currentRow=int(input(\"Which row would you like to delete? \"))\n if currentRow < 8:\n if(currentRow==0):\n ram0= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n elif(currentRow==1):\n ram1= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n elif(currentRow==2):\n ram2= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n elif(currentRow==2):\n ram3= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n elif(currentRow==4):\n ram4= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n elif(currentRow==5):\n ram5= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n elif(currentRow==6):\n ram6= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n elif(currentRow==7):\n ram7= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n print(currentRow)\n else:\n print(\"Invalid row.\")\n elif 5==a:\n currentRow=int(input(\"Which row contains the file you'd like to save & close? \"))\n if currentRow < 8:\n if(currentRow==0):\n ram0= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n elif(currentRow==1):\n ram1= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n elif(currentRow==2):\n ram2= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n elif(currentRow==2):\n ram3= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n elif(currentRow==4):\n ram4= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n elif(currentRow==5):\n ram5= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n elif(currentRow==6):\n ram6= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n elif(currentRow==7):\n ram7= [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n print(currentRow)\n else:\n print(\"Invalid row.\")\n elif 6==a:\n print(\"Aight this has been fun. Peace.\")\n" } ]
1
Khoerul-Umam/Odoo11
https://github.com/Khoerul-Umam/Odoo11
4e4075377cf5fc897fec48df4e975794a32567f1
1c54ae73bdd1746e5845dea3b69b92938e5aea1e
43b7de3b05f000c7bb3e63d57855e15838d7617a
refs/heads/master
2020-07-03T12:52:04.132533
2019-08-12T10:52:50
2019-08-12T10:52:50
201,910,360
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5150214433670044, "alphanum_fraction": 0.5593705177307129, "avg_line_length": 34, "blob_id": "f13a693d468cc271c517d060731f488be54c14d7", "content_id": "c86ec5ed7b6c3badd1af4fcc37f1fcae62d8bd3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 699, "license_type": "no_license", "max_line_length": 92, "num_lines": 20, "path": "/fresh11/controllers/controllers.py", "repo_name": "Khoerul-Umam/Odoo11", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom odoo import http\n\n# class Fresh11(http.Controller):\n# @http.route('/fresh11/fresh11/', auth='public')\n# def index(self, **kw):\n# return \"Hello, world\"\n\n# @http.route('/fresh11/fresh11/objects/', auth='public')\n# def list(self, **kw):\n# return http.request.render('fresh11.listing', {\n# 'root': '/fresh11/fresh11',\n# 'objects': http.request.env['fresh11.fresh11'].search([]),\n# })\n\n# @http.route('/fresh11/fresh11/objects/<model(\"fresh11.fresh11\"):obj>/', auth='public')\n# def object(self, obj, **kw):\n# return http.request.render('fresh11.object', {\n# 'object': obj\n# })" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 17, "blob_id": "da9988db6e5755b8493e99cc49d9fea8c99ef64c", "content_id": "2a76b21eee1de2172d93a23bf5f799e6137bb152", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 36, "license_type": "no_license", "max_line_length": 26, "num_lines": 2, "path": "/README.md", "repo_name": "Khoerul-Umam/Odoo11", "src_encoding": "UTF-8", "text": "# Odoo11\naddons for odoo 11 version\n" } ]
2
Sm4rtyF0x/Sudoku-Solver
https://github.com/Sm4rtyF0x/Sudoku-Solver
1a0e4f66fcc88f740f54435b50cdd6d5c91a270c
f823311f6a238f9c3cba9801b257ef5a8eb9d3cc
129e741caaf229871b652f3c14f8ecd2b00287b5
refs/heads/main
2023-07-02T22:58:35.751805
2021-07-29T15:34:10
2021-07-29T15:34:10
390,758,657
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.32708144187927246, "alphanum_fraction": 0.3737419843673706, "avg_line_length": 28.36111068725586, "blob_id": "82fb6db2fbe739f32d5033ab8af4d75c707be6a9", "content_id": "844ec1db6632292f8a1a5b658a16626069b20cde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2186, "license_type": "no_license", "max_line_length": 177, "num_lines": 72, "path": "/sudoku_solver.py", "repo_name": "Sm4rtyF0x/Sudoku-Solver", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport sys\r\n\r\n# example input 0,0,0,0,0,0,0,9,0,0,9,0,7,0,0,2,1,0,0,0,4,0,9,0,0,0,0,0,1,0,0,0,8,0,0,0,7,0,0,4,2,0,0,0,5,0,0,8,0,0,0,0,7,4,8,0,1,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,9,6,1,3,0,0,0\r\n# Entering sudoku grid\r\n# Checks row, column and square to find same number to n\r\n# if n founded, return False\r\n# Else, return True\r\n\r\ndef print_grid(grid):\r\n template = \"\"\"\r\n+---------+---------+---------+\r\n| {} {} {} | {} {} {} | {} {} {} |\r\n| {} {} {} | {} {} {} | {} {} {} |\r\n| {} {} {} | {} {} {} | {} {} {} |\r\n+---------+---------+---------+\r\n| {} {} {} | {} {} {} | {} {} {} |\r\n| {} {} {} | {} {} {} | {} {} {} |\r\n| {} {} {} | {} {} {} | {} {} {} |\r\n+---------+---------+---------+\r\n| {} {} {} | {} {} {} | {} {} {} |\r\n| {} {} {} | {} {} {} | {} {} {} |\r\n| {} {} {} | {} {} {} | {} {} {} |\r\n+-----------------------------+\r\n \"\"\".format(*grid.flatten())\r\n print(template)\r\n\r\ndef posible(x, y ,n, grid):\r\n # Checking column\r\n for i in range(9):\r\n if grid[i][x] == n:\r\n return False\r\n # Checking row\r\n for i in range(9):\r\n if grid[y][i] == n:\r\n return False\r\n # Checking square\r\n x0 = (x//3) * 3\r\n y0 = (y//3) * 3\r\n for i in range(3):\r\n for j in range(3):\r\n if grid[y0+i][x0+j] == n:\r\n return False\r\n return True\r\n\r\ndef solve(grid):\r\n for y in range(9):\r\n for x in range(9):\r\n # Finding empty spaces in grid\r\n if grid[y][x] == 0:\r\n for n in range(1,10):\r\n if posible(x, y ,n, grid):\r\n grid[y][x] = n\r\n solve(grid)\r\n grid[y][x] = 0\r\n return\r\n print(\"Solved grid:\")\r\n print_grid(grid)\r\n sys.exit()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n sudoku = input(\"Enter sudoku: \")\r\n sudoku_list = sudoku.split(\",\")\r\n sudoku_list_int = []\r\n for i in sudoku_list:\r\n sudoku_list_int.append(int(i))\r\n #print_grid(sudoku_list_int)\r\n grid = np.array(sudoku_list_int).reshape(9,9)\r\n print(\"\\nEntered grid:\")\r\n print_grid(grid)\r\n solve(grid)\r\n" }, { "alpha_fraction": 0.2789783775806427, "alphanum_fraction": 0.5972495079040527, "avg_line_length": 62.625, "blob_id": "69db586cbaa8515e23ed19cdfdeaa70076f0cd2f", "content_id": "e5c7fc43b158e05d7f770efb18038078b44e8e69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 509, "license_type": "no_license", "max_line_length": 161, "num_lines": 8, "path": "/README.md", "repo_name": "Sm4rtyF0x/Sudoku-Solver", "src_encoding": "UTF-8", "text": "# Sudoku-Solver\nSudoku solver program written with python and java.\nThis program uses recursion method to solve sudoku problems.\n<br>Example inputs for python file : <br><br>\n0,0,0,0,0,0,0,9,0,0,9,0,7,0,0,2,1,0,0,0,4,0,9,0,0,0,0,0,1,0,0,0,8,0,0,0,7,0,0,4,2,0,0,0,5,0,0,8,0,0,0,0,7,4,8,0,1,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,9,6,1,3,0,0,0\n<br>\n<br>\n0,4,0,9,0,0,0,0,7,1,9,0,6,0,0,0,0,4,5,0,0,0,0,0,0,1,0,0,8,0,0,3,0,0,7,0,2,0,0,0,0,4,5,0,8,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,3,4,0,0,7,0,0,0,6,0,0,1\n" } ]
2
LumaPictures/maya-usd-scripts
https://github.com/LumaPictures/maya-usd-scripts
7346b69d3851aed8b7b5d6ecddca69ddbd2dcbed
b681e353c570bb52052f5005740179b1cd8862f3
abaefe4879bc22c1783303b6234d18caf748da18
refs/heads/master
2020-08-21T07:08:03.352732
2019-10-18T22:09:40
2019-10-18T22:09:40
216,109,254
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.6893473863601685, "alphanum_fraction": 0.693444550037384, "avg_line_length": 40.16867446899414, "blob_id": "46dfa764b0b79726cb21400fcbf72729c618db56", "content_id": "a93444ad697e331d01f4d3c93199f6883ae8bdcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6834, "license_type": "no_license", "max_line_length": 97, "num_lines": 166, "path": "/replace_lic.py", "repo_name": "LumaPictures/maya-usd-scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n'''Find / replace all files that have the \"old\" pixar-style apache license with\n the new autodesk-style apache license'''\n\nfrom __future__ import print_function\n\nimport argparse\nimport inspect\nimport os\n\nTHIS_FILE = os.path.normpath(os.path.abspath(inspect.getsourcefile(lambda: None)))\n\nparser = argparse.ArgumentParser(description=__doc__)\nmode_group = parser.add_mutually_exclusive_group()\nmode_group.add_argument('--mode', choices=['pxr', 'al', 'both'], default='both',\n help=\"Which repo we're trying to replace lics for\")\nmode_group.add_argument('--al', help='Replace al lics',\n action='store_const', const='al', dest='mode')\nmode_group.add_argument('--pxr', help='Replace pixar lics',\n action='store_const', const='pxr', dest='mode')\n\nparser.add_argument('--reverse', action='store_true',\n help=(\"Replace lics in reverse, removing new and inserting old. \"\n \"Can only be used for one repo at a time (mode 'pxr' or mode 'al', \"\n \"but not mode 'both'\"))\n\nargs = parser.parse_args()\n\nold_pxr_cpp_lic = \"\"\"// Licensed under the Apache License, Version 2.0 (the \"Apache License\")\n// with the following modification; you may not use this file except in\n// compliance with the Apache License and the following modification to it:\n// Section 6. Trademarks. is deleted and replaced with:\n//\n// 6. Trademarks. This License does not grant permission to use the trade\n// names, trademarks, service marks, or product names of the Licensor\n// and its affiliates, except as required to comply with Section 4(c) of\n// the License and to reproduce the content of the NOTICE file.\n//\n// You may obtain a copy of the Apache License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the Apache License with the above modification is\n// distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n// KIND, either express or implied. See the Apache License for the specific\n// language governing permissions and limitations under the Apache License.\"\"\"\n\nold_pxr_py_lic = \"\"\"#\n# Licensed under the Apache License, Version 2.0 (the \"Apache License\")\n# with the following modification; you may not use this file except in\n# compliance with the Apache License and the following modification to it:\n# Section 6. Trademarks. is deleted and replaced with:\n#\n# 6. Trademarks. This License does not grant permission to use the trade\n# names, trademarks, service marks, or product names of the Licensor\n# and its affiliates, except as required to comply with Section 4(c) of\n# the License and to reproduce the content of the NOTICE file.\n#\n# You may obtain a copy of the Apache License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the Apache License with the above modification is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the Apache License for the specific\n# language governing permissions and limitations under the Apache License.\n#\n\"\"\"\n\nold_al_py_lic = \"\"\"# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.# \n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# \n\"\"\"\n\nnew_cpp_lic = \"\"\"// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\"\"\"\n\nnew_py_lic = \"\"\"#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\n\nold_al_cpp_lic_line = \"\"\"// you may not use this file except in compliance with the License.//\"\"\"\nnew_cpp_lic_line = \"\"\"// you may not use this file except in compliance with the License.\"\"\"\n\nold_al_py_lic_line = \"\"\"# you may not use this file except in compliance with the License.//\"\"\"\nnew_py_lic_line = \"\"\"# you may not use this file except in compliance with the License.\"\"\"\n\nif args.mode == 'pxr':\n replacement_pairs = [\n (old_pxr_cpp_lic, new_cpp_lic),\n (old_pxr_py_lic, new_py_lic),\n ]\nelif args.mode == 'al':\n replacement_pairs = [\n (old_al_cpp_lic_line, new_cpp_lic_line),\n (old_al_py_lic, new_py_lic),\n (old_pxr_py_lic, new_py_lic),\n (old_al_py_lic_line, new_py_lic_line),\n ]\nelif args.mode == 'both':\n if args.reverse:\n raise ValueError(\"--reverse cannot be used with mode 'both'\")\n\n replacement_pairs = [\n (old_pxr_cpp_lic, new_cpp_lic),\n (old_pxr_py_lic, new_py_lic),\n (old_al_cpp_lic_line, new_cpp_lic_line),\n (old_al_py_lic, new_py_lic),\n (old_al_py_lic_line, new_py_lic_line),\n ]\nelse:\n raise ValueError(\"Unrecognized mode: {}\".format(args.mode))\n\nif args.reverse:\n replacement_pairs = [(pair[1], pair[0]) for pair in replacement_pairs]\n\nfor dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n filepath = os.path.normpath(os.path.abspath(os.path.join(dirpath, filename)))\n if filepath == THIS_FILE:\n continue\n with open(filepath, 'rb') as f:\n text = f.read()\n altered = False\n new_text = text\n for old_lic, new_lic in replacement_pairs:\n if old_lic in new_text:\n new_text = new_text.replace(old_lic, new_lic)\n altered = True\n if altered:\n with open(filepath, 'wb') as f:\n f.write(new_text)\n print(\"Replaced license in: {}\".format(filepath))\n" }, { "alpha_fraction": 0.7383222579956055, "alphanum_fraction": 0.7654798030853271, "avg_line_length": 46.07954406738281, "blob_id": "ddfc96067c1c5795081d9844a5de35104e5771c0", "content_id": "644efbb5e2c88389446a52e8ca6ae3d0a9510fc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 8285, "license_type": "no_license", "max_line_length": 178, "num_lines": 176, "path": "/merge_al.bash", "repo_name": "LumaPictures/maya-usd-scripts", "src_encoding": "UTF-8", "text": "# change to git root dir\ncd \"$(git rev-parse --show-toplevel)\"\n\n# Some commit reference points:\n\n# 19a1e755c258c9ac0d7495fa0add62508ff377a1 - plugins/AL_USDMaya (initial import of pixar from submodule)\n# 825ca13dd77af84872a063f146dee1799e8be25c - plugins/AL_USDMaya (some removals)\n# 141bab7eba1d380868e822a51f8c8f85e1c0b66f - plugins/AL_USDMaya (identical contents as above)\n# e5e10a28d0ba0535e83675399a5d15314fb79ec9 - plugin/al (renamed dir)\n\n# Removed in 825ca13dd77af84872a063f146dee1799e8be25c\n# D plugins/AL_USDMaya/.gitignore\n# D plugins/AL_USDMaya/AL_USDMaya_Corporate_CLA.pdf\n# D plugins/AL_USDMaya/AL_USDMaya_Individual_CLA.pdf\n# D plugins/AL_USDMaya/CHANGELOG.md\n# D plugins/AL_USDMaya/NOTICE.txt\n# D plugins/AL_USDMaya/PULL_REQUEST_TEMPLATE.md\n# D plugins/AL_USDMaya/build_docker_centos6.sh\n# D plugins/AL_USDMaya/build_docker_centos7.sh\n# D plugins/AL_USDMaya/build_lib.sh\n# D plugins/AL_USDMaya/cmake/defaults/CXXHelpers.cmake\n# D plugins/AL_USDMaya/cmake/defaults/Version.cmake\n# D plugins/AL_USDMaya/cmake/defaults/msvcdefaults.cmake\n# D plugins/AL_USDMaya/cmake/modules/FindMaya.cmake\n# D plugins/AL_USDMaya/cmake/modules/FindUFE.cmake\n# D plugins/AL_USDMaya/cmake/modules/FindUSD.cmake\n# D plugins/AL_USDMaya/docker/Dockerfile_centos6\n# D plugins/AL_USDMaya/docker/Dockerfile_centos7\n# D plugins/AL_USDMaya/docker/README.md\n# D plugins/AL_USDMaya/docker/build_alusdmaya.sh\n# D plugins/AL_USDMaya/setup_environment.sh\n\n# This function will take a \"stock\" AnimalLogic AL_USDMaya repo, and rename /\n# delete files and folders to make it \"line up\" with their locations in maya-usd\n\nfunction renameALRepo ()\n{\n # Get in line with e5e10a28d0ba0535e83675399a5d15314fb79ec9\n # We move in two steps because the AL repo originally has a dir called\n # \"plugin\", which we still want to move into plugin - ie,\n # plugin => plugin/al/plugin\n # By doing in two stages, it makes sure it doesn't treat this initial\n # 'plugin' dir special\n\n mkdir -p temp \n git mv -k $(ls -A) temp\n\n mkdir -p plugin\n git mv temp plugin/al\n\n git rm -f plugin/al/.gitignore\n git rm -f plugin/al/AL_USDMaya_Corporate_CLA.pdf\n git rm -f plugin/al/AL_USDMaya_Individual_CLA.pdf\n git rm -f plugin/al/CHANGELOG.md\n git rm -f plugin/al/NOTICE.txt\n git rm -f plugin/al/PULL_REQUEST_TEMPLATE.md\n git rm -f plugin/al/build_docker_centos6.sh\n git rm -f plugin/al/build_docker_centos7.sh\n git rm -f plugin/al/build_lib.sh\n git rm -f plugin/al/setup_environment.sh\n git rm -rf plugin/al/docker/\n\n git mv plugin/al/cmake cmake\n\n python replace_lic.py --al\n}\n\n##############################\n# branch: renamed_0_31_1\n##############################\n# Make a branch that's IDENTICAL to stock AL 0.31.1, except with directory\n# renames/ file moves / deletions to get files into the same place as in the\n# master branch of Maya-USD\n# \n# This branch is a useful reference point, and will be used to make a diff /\n# patch file which will handy when doing merges.\n\ngit checkout -B renamed_0_31_1 0.31.1\nrenameALRepo\ngit commit -a -m \"Renamed / deleted files from AL 0.31.1 to match maya-usd layout\"\n\n\n##############################\n# branch: renamed_al_develop\n##############################\n# Make a branch that's IDENTICAL to AL's latest develop\n# (3cdea62c31b1be60aa02138e4b54bb13f7e80ee1), except with directory renames/\n# file moves / deletions to get files into the same place as in the master\n# branch of Maya-USD\n# \n# This branch is a useful reference point, and will be used to make a diff /\n# patch file which will handy when doing merges.\n\ngit checkout -B renamed_al_develop origin/al/develop\nrenameALRepo\ngit commit -a -m \"Renamed / deleted files from AL dev (3cdea62c) to match maya-usd layout\"\n\n\n###############\n# Make a patch that gives all changes between renamed_0_31_1 and renamed_al_develop\n# ...this will be used when resolving merge conflicts\n\ngit diff renamed_0_31_1 renamed_al_develop > ../al_0311_develop.diff\n\n###############\n# Now that we have our helper diff, merge pixar-usd dev into latest maya-usd master\n\ngit checkout dev\n\n# attempt the merge - this will give a lot of merge conflicts...\ngit merge origin/al/develop\n\n# These files were removed / we don't care about:\ngit rm -f CHANGELOG.md\n\n# These were new files, that we're moving into their proper places:\n\ngit mv docs/cameraProxy.md plugin/al/docs/cameraProxy.md\ngit mv lib/AL_USDMaya/AL/usdmaya/cmds/ListTranslators.cpp plugin/al/lib/AL_USDMaya/AL/usdmaya/cmds/ListTranslators.cpp\ngit mv lib/AL_USDMaya/AL/usdmaya/cmds/SyncFileIOGui.cpp plugin/al/lib/AL_USDMaya/AL/usdmaya/cmds/SyncFileIOGui.cpp\ngit mv lib/AL_USDMaya/AL/usdmaya/cmds/SyncFileIOGui.h plugin/al/lib/AL_USDMaya/AL/usdmaya/cmds/SyncFileIOGui.h\ngit mv lib/AL_USDMaya/AL/usdmaya/nodes/ProxyUsdGeomCamera.cpp plugin/al/lib/AL_USDMaya/AL/usdmaya/nodes/ProxyUsdGeomCamera.cpp\ngit mv lib/AL_USDMaya/AL/usdmaya/nodes/ProxyUsdGeomCamera.h plugin/al/lib/AL_USDMaya/AL/usdmaya/nodes/ProxyUsdGeomCamera.h\ngit mv mayautils/AL/maya/tests/mayaplugintest/utils/PluginTranslatorOptionsTest.cpp plugin/al/mayautils/AL/maya/tests/mayaplugintest/utils/PluginTranslatorOptionsTest.cpp\ngit mv mayautils/AL/maya/utils/PluginTranslatorOptions.cpp plugin/al/mayautils/AL/maya/utils/PluginTranslatorOptions.cpp\ngit mv mayautils/AL/maya/utils/PluginTranslatorOptions.h plugin/al/mayautils/AL/maya/utils/PluginTranslatorOptions.h\ngit mv plugin/AL_USDMayaTestPlugin/AL/usdmaya/fileio/import_instances.cpp plugin/al/plugin/AL_USDMayaTestPlugin/AL/usdmaya/fileio/import_instances.cpp\ngit mv plugin/AL_USDMayaTestPlugin/AL/usdmaya/fileio/test_activeInActiveTranslators.cpp plugin/al/plugin/AL_USDMayaTestPlugin/AL/usdmaya/fileio/test_activeInActiveTranslators.cpp\ngit mv plugin/AL_USDMayaTestPlugin/AL/usdmaya/nodes/test_ProxyUsdGeomCamera.cpp plugin/al/plugin/AL_USDMayaTestPlugin/AL/usdmaya/nodes/test_ProxyUsdGeomCamera.cpp\ngit mv plugin/AL_USDMayaTestPlugin/AL/usdmaya/test_DiffGeom.cpp plugin/al/plugin/AL_USDMayaTestPlugin/AL/usdmaya/test_DiffGeom.cpp\ngit mv translators/CommonTranslatorOptions.cpp plugin/al/translators/CommonTranslatorOptions.cpp\ngit mv translators/CommonTranslatorOptions.h plugin/al/translators/CommonTranslatorOptions.h\n\ngit add lib/AL_USDMaya/AL/usdmaya/cmds/ListTranslators.h\ngit mv lib/AL_USDMaya/AL/usdmaya/cmds/ListTranslators.h plugin/al/lib/AL_USDMaya/AL/usdmaya/cmds/ListTranslators.h\n\ngit add mayautils/AL/maya/utils/Utils.cpp\ngit mv mayautils/AL/maya/utils/Utils.cpp plugin/al/mayautils/AL/maya/utils/Utils.cpp\n\n# both deleted:\n\ngit rm lib/AL_USDMaya/AL/usdmaya/cmds/ProxyShapeSelectCommands.cpp\ngit rm lib/AL_USDMaya/AL/usdmaya/cmds/ProxyShapeSelectCommands.h\ngit rm plugin/al/lib/AL_USDMaya/AL/usdmaya/cmds/ProxyShapeSelectCommands.cpp\ngit rm plugin/al/lib/AL_USDMaya/AL/usdmaya/cmds/ProxyShapeSelectCommands.h\n\n# newly deleted:\n\ngit rm plugin/al/lib/AL_USDMaya/AL/usdmaya/DrivenTransformsData.cpp\ngit rm plugin/al/lib/AL_USDMaya/AL/usdmaya/DrivenTransformsData.h\ngit rm plugin/al/lib/AL_USDMaya/AL/usdmaya/fileio/translators/CameraTranslator.cpp\ngit rm plugin/al/lib/AL_USDMaya/AL/usdmaya/fileio/translators/MeshTranslator.cpp\ngit rm plugin/al/lib/AL_USDMaya/AL/usdmaya/fileio/translators/NurbsCurveTranslator.cpp\ngit rm plugin/al/lib/AL_USDMaya/AL/usdmaya/nodes/proxy/DrivenTransforms.cpp\ngit rm plugin/al/lib/AL_USDMaya/AL/usdmaya/nodes/proxy/DrivenTransforms.h\ngit rm plugin/al/plugin/AL_USDMayaTestPlugin/AL/usdmaya/nodes/proxy/test_DrivenTransforms.cpp\n\n# When I inspected these, determined we didn't care about any of the changes in\n# these files between renamed_0_31_1 and renamed_al_develop - so just checking out\n# old version\n\ngit checkout dev -- CMakeLists.txt\n\nfunction showALDiff ()\n{\n git difftool -y renamed_0_31_1 renamed_al_develop -- \"$1\" &\n}\n\n# Manually merged these:\n\ngit mergetool cmake/modules/FindUFE.cmake\ngit mergetool plugin/al/lib/AL_USDMaya/AL/usdmaya/nodes/ProxyShapeUI.cpp\ngit mergetool plugin/al/mayautils/AL/maya/event/MayaEventManager.cpp\ngit mergetool plugin/al/mayautils/AL/maya/utils/MenuBuilder.h\ngit mergetool plugin/al/mayautils/AL/maya/utils/NodeHelper.cpp\ngit mergetool plugin/al/usdmayautils/AL/usdmaya/utils/DgNodeHelper.cpp" }, { "alpha_fraction": 0.6989051103591919, "alphanum_fraction": 0.7100645899772644, "avg_line_length": 40.66374206542969, "blob_id": "11a5e21e45ddd47c8705c9b9e5ae084e68636808", "content_id": "8870dee1f134f67deb2af3786804c0ee53b1109c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 14248, "license_type": "no_license", "max_line_length": 181, "num_lines": 342, "path": "/merge_pixar.bash", "repo_name": "LumaPictures/maya-usd-scripts", "src_encoding": "UTF-8", "text": "set -e\n\nif [[ -z \"$1\" ]]; then\n echo \"usage: $0 REMOTE\"\n echo \"First argument must be the git remote that the pixar dev branch can be found on\"\n exit 1\nfi\n\npxr_remote=\"$1\"\n\nif ! git rev-parse --verify -q $pxr_remote/dev > /dev/null ; then\n echo \"Given remote '$pxr_remote' did not have a dev branch\"\n echo \"(ie, '$pxr_remote/dev' did not exist)\"\n exit 1\nfi\n\n# change to git root dir\ncd \"$(git rev-parse --show-toplevel)\"\n\n# Some commit reference points:\n\n# 4b46bfd3b5ea96c709547e830bb645d60c21fa29 - plugins/USD (initial import of pixar from submodule)\n# 825ca13dd77af84872a063f146dee1799e8be25c - plugins/PXR_USDMaya (renamed dir)\n# 141bab7eba1d380868e822a51f8c8f85e1c0b66f - plugins/PXR_USDMaya (identical contents as above)\n# e5e10a28d0ba0535e83675399a5d15314fb79ec9 - plugin/pxr (renamed dir)\n\ndev_mergebase=$(git merge-base $pxr_remote/dev dev)\npixar_dev_commit=$(git show -s --format=\"%H\" dev)\n\n# This function will take a \"stock\" pixar USD repo, and rename / delete files\n# and folders to make it \"line up\" with their locations in maya-usd\n\nfunction renamePixarRepo ()\n{\n cd \"$(git rev-parse --show-toplevel)\"\n\n # move everything in the root to plugin/pxr\n rm -rf plugin/pxr\n mkdir -p plugin/pxr\n git mv -k $(ls -A) plugin/pxr\n\n # Move back some of the cmake stuff to the root\n mkdir -p cmake\n git mv plugin/pxr/cmake/defaults/ cmake/\n git mv plugin/pxr/cmake/modules/ cmake/\n\n # Remove a bunch of files / folders\n git rm -f cmake/defaults/Packages.cmake\n git rm -f cmake/modules/FindGLEW.cmake\n git rm -f cmake/modules/FindPTex.cmake\n git rm -f cmake/modules/FindRenderman.cmake\n git rm -f --ignore-unmatch cmake/modules/FindDraco.cmake\n git rm -f plugin/pxr/.appveyor.yml\n git rm -f plugin/pxr/.gitignore\n git rm -f plugin/pxr/.travis.yml\n git rm -f plugin/pxr/BUILDING.md\n git rm -f plugin/pxr/CHANGELOG.md\n git rm -f plugin/pxr/CONTRIBUTING.md\n git rm -f plugin/pxr/NOTICE.txt\n git rm -f plugin/pxr/README.md\n git rm -f plugin/pxr/USD_CLA_Corporate.pdf\n git rm -f plugin/pxr/USD_CLA_Individual.pdf\n git rm -f --ignore-unmatch VERSIONS.md\n git rm -f plugin/pxr/cmake/macros/generateDocs.py\n git rm -f plugin/pxr/pxr/CMakeLists.txt\n git rm -f plugin/pxr/pxr/pxrConfig.cmake.in\n\n git rm -rf plugin/pxr/.github/\n git rm -rf plugin/pxr/build_scripts/\n git rm -rf plugin/pxr/extras/\n git rm -rf plugin/pxr/pxr/base/\n git rm -rf plugin/pxr/pxr/imaging/\n git rm -rf plugin/pxr/pxr/usd/\n git rm -rf plugin/pxr/pxr/usdImaging/\n git rm -rf plugin/pxr/third_party/houdini/\n git rm -rf plugin/pxr/third_party/katana/\n git rm -rf plugin/pxr/third_party/renderman-22/\n\n git mv plugin/pxr/third_party/maya plugin/pxr/maya\n\n delete_replace_lic=0\n if [[ ! -f replace_lic.py ]]; then\n delete_replace_lic=1\n git checkout dev -- replace_lic.py\n fi\n python replace_lic.py --pxr\n if (( $delete_replace_lic )); then\n rm replace_lic.py\n fi\n}\n\n##############################\n# branch: renamed_mergebase\n##############################\n# Make a branch that's IDENTICAL to stock 19.05, except with directory renames/\n# file moves / deletions to get files into the same place as in the master\n# branch of Maya-USD\n# \n# This branch is a useful reference point, and will be used to make a diff /\n# patch file which will handy when doing merges.\n\necho \"Checking out last-merged pixar-dev commit ($dev_mergebase)\"\ngit checkout -B renamed_mergebase $dev_mergebase\necho \"...renaming files to match maya-usd layout...\"\nrenamePixarRepo\ngit commit -a -m \"Renamed / deleted files from previously-merged dev to match maya-usd layout\"\necho \"...done renaming files\"\n\n\n##############################\n# branch: renamed_pxr_dev\n##############################\n# Make a branch that's IDENTICAL to pixar's latest usd dev\n# (b29152c2896b1b4d03fddbd9c3dcaad133d2c495), except with directory renames/\n# file moves / deletions to get files into the same place as in the master\n# branch of Maya-USD\n# \n# This branch is a useful reference point, and will be used to make a diff /\n# patch file which will handy when doing merges.\n\necho \"Checking out latest pixar-dev commit ($pixar_dev_commit)\"\ngit checkout -B renamed_pxr_dev $pxr_remote/dev\nrenamePixarRepo\necho \"...renaming files to match maya-usd layout...\"\ngit commit -a -m \"Renamed / deleted files from pixar dev to match maya-usd layout\"\necho \"...done renaming files\"\n\n###############\n# Make a patch that gives all changes between renamed_v1905 and renamed_pxr_dev\n# ...this will be used when resolving merge conflicts\n\ngit diff renamed_mergebase renamed_pxr_dev > ../pixar_dev.diff\necho \"Created diff of new changes to merge in\"\n\n\n###############\n# Now that we have our helper diff, merge pixar-usd dev into latest maya-usd master\n\ngit checkout dev\n\n# attempt the merge - this will give a lot of merge conflicts...\necho \"Attempting merge...\"\n\nset +e\n\nif git merge $pxr_remote/dev; then\n echo 'merge succeeded! Unbelieveable!'\nelse\n echo 'merge failed, as expected...'\nfi\n\necho \"Removing files that aren't used by maya-usd\"\n\n# These files were removed / we don't care about:\n\ngit rm -f cmake/defaults/Packages.cmake\ngit rm -f cmake/modules/FindGLEW.cmake\ngit rm -f cmake/modules/FindPTex.cmake\ngit rm -f cmake/modules/FindRenderman.cmake\ngit rm -f cmake/modules/FindDraco.cmake\ngit rm -f .appveyor.yml\ngit rm -f .travis.yml\ngit rm -f BUILDING.md\ngit rm -f CHANGELOG.md\ngit rm -f CONTRIBUTING.md\ngit rm -f NOTICE.txt\ngit rm -f USD_CLA_Corporate.pdf\ngit rm -f USD_CLA_Individual.pdf\ngit rm -f VERSIONS.md\ngit rm -f cmake/macros/generateDocs.py\ngit rm -f pxr/CMakeLists.txt\ngit rm -f pxr/pxrConfig.cmake.in\ngit rm -rf .github/\ngit rm -rf build_scripts/\ngit rm -rf extras/\ngit rm -rf pxr/base/\ngit rm -rf pxr/imaging/\ngit rm -rf pxr/usd/\ngit rm -rf pxr/usdImaging/\ngit rm -rf third_party/houdini/\ngit rm -rf third_party/katana/\ngit rm -rf third_party/renderman-22\n\nset -e\n\necho \"...done removing files.\"\n\n# for license, decided to just use the Pixar one unaltered... this is the\n# LICENSE.txt that stood at the root of the USD project. Had been removing\n# new license bits that didn't apply to the maya plugin... but there were\n# already many bits that didn't apply to maya, and having a few more shouldn't\n# make a difference... and going forward, will be easier to simply take their\n# LICENSE.txt unaltered (moved to plugin/pxr/LICENSE.txt)\n\ngit show $pxr_remote/dev:LICENSE.txt > plugin/pxr/LICENSE.txt\ngit add plugin/pxr/LICENSE.txt\n\necho \"Remaining conflicts to be resolved:\"\necho \"===========================================================\"\ngit status\necho \"===========================================================\"\necho \"(See the commented out section of the script for tips)\"\n\n# Ok, this ends the section of stuff that can be run automated. The rest of this\n# is commented out, and can be copy / pasted into a terminal, or just used for\n# for reference.\n\n# Basically, you need to go through the added / deleted / merge-conflict files,\n# making sure everything seems to be in the right place. The lines below have\n# sections covering ways of handling various issues - the trickiest being when\n# a rename was not detected, and our diff needs to be applied. Most of the other\n# issues can be resolved with normal git tricks...\n\n################################################################################\n# New Files\n################################################################################\n\n# These were new files, that we're moving into their proper places:\n\n# git mv third_party/maya/lib/pxrUsdMayaGL/testenv plugin/pxr/maya/lib/pxrUsdMayaGL\n# git mv third_party/maya/lib/usdMaya/testenv/UsdReferenceAssemblyChangeRepresentationsTest/* plugin/pxr/maya/lib/usdMaya/testenv/UsdReferenceAssemblyChangeRepresentationsTest\n# rm -rf third_party/maya/lib/usdMaya/testenv/UsdReferenceAssemblyChangeRepresentationsTest\n# git mv third_party/maya/lib/usdMaya/testenv/UsdExportAssemblyEditsTest plugin/pxr/maya/lib/usdMaya/testenv\n# git mv third_party/maya/lib/usdMaya/testenv/testUsdExportAssemblyEdits.py plugin/pxr/maya/lib/usdMaya/testenv/testUsdExportAssemblyEdits.py\n\n# git mv third_party/maya/plugin/pxrUsdTranslators/strokeWriter.* plugin/pxr/maya/plugin/pxrUsdTranslators\n# mkdir -p plugin/pxr/maya/plugin/pxrUsdTranslators/testenv/StrokeExportTest\n# git mv third_party/maya/plugin/pxrUsdTranslators/testenv/StrokeExportTest/StrokeExportTest.ma plugin/pxr/maya/plugin/pxrUsdTranslators/testenv/StrokeExportTest/StrokeExportTest.ma\n# git mv third_party/maya/plugin/pxrUsdTranslators/testenv/testPxrUsdTranslatorsStroke.py plugin/pxr/maya/plugin/pxrUsdTranslators/testenv/testPxrUsdTranslatorsStroke.py\n\n\n################################################################################\n# Changes we don't want\n################################################################################\n\n\n# # When I inspected these, determined we didn't care about any of the changes in\n# # these files between renamed_v1905 and renamed_pxr_dev - so just checking out\n# # old version\n\n# git checkout dev -- cmake/defaults/Options.cmake\n\n################################################################################\n# Bad rename detection - mapped to wrong file (ie, in AL)\n################################################################################\n\n# these are conflicts presumably due to bad rename detection\n# git checkout dev -- plugin/al/lib/AL_USDMaya/Doxyfile\n# git checkout dev -- plugin/al/schemas/AL/usd/schemas/mayatest/ExamplePolyCubeNode.h\n\n################################################################################\n# Bad rename detection - didn't detect a rename\n################################################################################\n\n# # Most of the rest of these seem to be files whose rename wasn't properly\n# # recorded by git - they're marked as modifications to deleted files. Solve by\n# # using the patch we made earlier.\n\nfunction applyPixarRootDiff ()\n{\n pxrPath=\"$1\"\n adPath=plugin/pxr/\"$1\"\n git apply ../pixar_1905_dev.diff --include=\"$adPath\"\n result=\"$?\"\n if (( $result == 0 )); then\n echo \"success!\"\n git add \"$adPath\"\n git rm \"$pxrPath\"\n else\n echo\n echo '!!!!!!!!!!!'\n echo 'failure!'\n echo '!!!!!!!!!!!'\n fi\n}\n\nfunction applyPixarMayaDiff ()\n{\n pxrPath=\"$1\"\n adPath=$(echo \"$pxrPath\" | sed -e 's~third_party/maya~plugin/pxr/maya~')\n git apply ../pixar_1905_dev.diff --include=\"$adPath\"\n result=\"$?\"\n if (( $result == 0 )); then\n echo \"success!\"\n git add \"$adPath\"\n git rm \"$pxrPath\"\n else\n echo\n echo '!!!!!!!!!!!'\n echo 'failure!'\n echo '!!!!!!!!!!!'\n fi\n}\n\n# applyPixarRootDiff cmake/macros/Private.cmake\n\n# applyPixarMayaDiff third_party/maya/lib/pxrUsdMayaGL/batchRenderer.cpp\n# applyPixarMayaDiff third_party/maya/lib/pxrUsdMayaGL/batchRenderer.h\n# applyPixarMayaDiff third_party/maya/lib/pxrUsdMayaGL/hdImagingShapeDrawOverride.cpp\n# applyPixarMayaDiff third_party/maya/lib/pxrUsdMayaGL/hdImagingShapeUI.cpp\n# applyPixarMayaDiff third_party/maya/lib/pxrUsdMayaGL/hdRenderer.cpp\n# applyPixarMayaDiff third_party/maya/lib/pxrUsdMayaGL/instancerShapeAdapter.cpp\n# applyPixarMayaDiff third_party/maya/lib/pxrUsdMayaGL/proxyDrawOverride.cpp\n# applyPixarMayaDiff third_party/maya/lib/pxrUsdMayaGL/proxyShapeDelegate.cpp\n# applyPixarMayaDiff third_party/maya/lib/pxrUsdMayaGL/proxyShapeUI.cpp\n# applyPixarMayaDiff third_party/maya/lib/pxrUsdMayaGL/sceneDelegate.cpp\n# applyPixarMayaDiff third_party/maya/lib/pxrUsdMayaGL/sceneDelegate.h\n# applyPixarMayaDiff third_party/maya/lib/pxrUsdMayaGL/shapeAdapter.cpp\n# applyPixarMayaDiff third_party/maya/lib/pxrUsdMayaGL/shapeAdapter.h\n# applyPixarMayaDiff third_party/maya/lib/pxrUsdMayaGL/usdProxyShapeAdapter.cpp\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/CMakeLists.txt\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/editUtil.cpp\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/editUtil.h\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/hdImagingShape.cpp\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/hdImagingShape.h\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/readJob.cpp\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/referenceAssembly.cpp\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/shadingModeImporter.h\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/shadingModePxrRis.cpp\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/shadingModeUseRegistry.cpp\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/testenv/testUsdExportPackage.py\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/testenv/testUsdExportRfMLight.py\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/testenv/testUsdExportShadingModePxrRis.py\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/testenv/testUsdImportRfMLight.py\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/testenv/testUsdImportShadingModePxrRis.py\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/testenv/testUsdMayaGetVariantSetSelections.py\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/testenv/testUsdMayaXformStack.py\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/testenv/testUsdReferenceAssemblyChangeRepresentations.py\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/translatorModelAssembly.cpp\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/translatorRfMLight.cpp\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/translatorUtil.cpp\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/translatorUtil.h\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/translatorXformable.cpp\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/util.cpp\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/util.h\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/wrapEditUtil.cpp\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/writeJob.cpp\n# applyPixarMayaDiff third_party/maya/lib/usdMaya/writeJobContext.cpp\n# applyPixarMayaDiff third_party/maya/plugin/pxrUsdTranslators/CMakeLists.txt\n# applyPixarMayaDiff third_party/maya/plugin/pxrUsdTranslators/fileTextureWriter.cpp\n# applyPixarMayaDiff third_party/maya/plugin/pxrUsdTranslators/lightReader.cpp\n# applyPixarMayaDiff third_party/maya/plugin/pxrUsdTranslators/lightWriter.cpp" } ]
3
midnightslacker/hackerrank
https://github.com/midnightslacker/hackerrank
eb72706d6739619b073becf22aa023e231870f20
90d6734b7073f2bf4e5d9b6ea26780a1d9e56031
3322c4411e839f8dc839a9484e406b1960630f94
refs/heads/master
2021-01-01T19:16:34.864818
2015-04-20T18:24:53
2015-04-20T18:24:53
34,278,253
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5616438388824463, "alphanum_fraction": 0.5808219313621521, "avg_line_length": 27.076923370361328, "blob_id": "c9352b6a34c11a90f2b918bda161af0b2c2e00d8", "content_id": "19aadaaadaaaeb63b6e5625b67cc54ce83e60dbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 69, "num_lines": 13, "path": "/halloween_party.py", "repo_name": "midnightslacker/hackerrank", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\n\ndef maxCandy (numCuts):\n if numCuts%2==0:\n print (numCuts/2) * (numCuts/2)\n else:\n print (numCuts/2) * ((numCuts/2) + 1)\n\nif __name__=='__main__':\n test_cases = raw_input()\n for _ in range(int(test_cases)):\n numCuts = int(raw_input())\n maxCandy(numCuts)\n" }, { "alpha_fraction": 0.6135593056678772, "alphanum_fraction": 0.6271186470985413, "avg_line_length": 14, "blob_id": "e84ac90870362a58b19bed0717e67aac17ea5869", "content_id": "2f6381ab9a820885f612ee7192a39af8b32e01ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 590, "license_type": "no_license", "max_line_length": 45, "num_lines": 39, "path": "/love-letter-mystery.py", "repo_name": "midnightslacker/hackerrank", "src_encoding": "UTF-8", "text": "#!/bin/python\n'''\nNOTES:\neven xy yx\nodd xyx or xzyzx or xzjyjzx\n\nchr(ord('c') + 1)\n'd'\nord('c') - ord('d')\n-1\n\n'''\n\nstrings = ['abc', 'abcba', 'abcd', 'cba']\n\ndef isPalindrome(word):\n\tif word == word[::-1]:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef countPalindromeMaker(word):\n\tcounter = -1\n\tresult = 0\n\t\n\tfor letter in word:\n\t\tif ord(letter) - ord(word[counter]) < 0:\n\t\t\tresult += ord(letter) - ord(word[counter])\n\n\t\tcounter = counter - 1\n\t\n\treturn abs(result)\n\nfor word in strings:\n\tif isPalindrome(word) == True:\n\t\tprint 0\n\telse:\n\t\tresult = countPalindromeMaker(word)\n\t\tprint result\n\n\t\n\n\n" }, { "alpha_fraction": 0.5616196990013123, "alphanum_fraction": 0.5721830725669861, "avg_line_length": 21.639999389648438, "blob_id": "f5054d424472b3b25e2054a37f79bedd09fd3825", "content_id": "846c35aea7dbca1fb434a02612878fdd683216f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 568, "license_type": "no_license", "max_line_length": 69, "num_lines": 25, "path": "/cut-the-sticks.py", "repo_name": "midnightslacker/hackerrank", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\n#!/usr/bin/py\ndef countCuts(array):\n sortedArray = sorted(array)\n if len(sortedArray) < 1:\n return\n newArray = []\n cutLen = sortedArray[0]\n counter = 0\n for x in sortedArray:\n x = x - cutLen\n newArray.append(x)\n counter+=1\n print counter\n \n while 0 in newArray:\n newArray.remove(0)\n \n countCuts(newArray)\n \n\nif __name__ == '__main__':\n a = input()\n array = map(int, raw_input().strip().split(\" \"))\n countCuts(array)\n\n\n" }, { "alpha_fraction": 0.5771428346633911, "alphanum_fraction": 0.581428587436676, "avg_line_length": 28.95652198791504, "blob_id": "12fa7f3b3c3db2da2705f37c9b6d91b028489ced", "content_id": "42f89de5a5992a124f49ca9e70125117a07b9eef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 700, "license_type": "no_license", "max_line_length": 69, "num_lines": 23, "path": "/AngryProfessor.py", "repo_name": "midnightslacker/hackerrank", "src_encoding": "UTF-8", "text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\n\ndef isClassCancelled(students, must_attend, student_times):\n student_counter = 0\n for time in student_times:\n if time <= 0:\n student_counter = student_counter + 1\n else:\n continue\n \n if int(student_counter) < int(must_attend):\n print \"YES\"\n return\n else:\n print \"NO\"\n return\n\nif __name__=='__main__':\n test_cases = raw_input()\n for _ in range(int(test_cases)):\n students, must_attend = raw_input().split()\n student_times = map(int, raw_input().split(\" \"))\n isClassCancelled(students, must_attend, student_times)\n\n\n \n" }, { "alpha_fraction": 0.5603216886520386, "alphanum_fraction": 0.5656836628913879, "avg_line_length": 23.799999237060547, "blob_id": "4c463e4e92411645613f74da2769938445ea7255", "content_id": "341a68228da58280b98b85c000be71d2755ca871", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 373, "license_type": "no_license", "max_line_length": 52, "num_lines": 15, "path": "/lonely-integer.py", "repo_name": "midnightslacker/hackerrank", "src_encoding": "UTF-8", "text": "#!/usr/bin/py\ndef lonelyinteger(numArray):\n newArray = []\n answer = 0\n for number in numArray:\n if number in newArray:\n newArray.remove(number)\n else:\n newArray.append(number)\n return newArray[0]\n\nif __name__ == '__main__':\n a = input()\n array = map(int, raw_input().strip().split(\" \"))\n print lonelyinteger(array)\n\n" } ]
5
santander-syngenta/rc
https://github.com/santander-syngenta/rc
50acfe695a8601515cfeb4d806762fce5d13b8b8
1b2670df9ca5b4d0583a70b6e4ed1a1b675663f4
1d34afa7335ece64b153bc8889c13bb59688c5f2
refs/heads/master
2023-02-27T06:24:02.072233
2021-01-25T14:36:38
2021-01-25T14:36:38
331,050,153
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6886688470840454, "alphanum_fraction": 0.7079207897186279, "avg_line_length": 26.119403839111328, "blob_id": "50bc21df7cdf6e2072c1f4a50d3b4741a6f409fd", "content_id": "ac25a6fad6ec44c4e29c2de9420fcbfc42c9f33a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1818, "license_type": "no_license", "max_line_length": 87, "num_lines": 67, "path": "/api/models.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.conf import settings\nfrom django.template.defaultfilters import date\n\n# Create your models here.\n\nclass FormTags(models.Model):\n\tname = models.CharField(max_length = 300)\n\n\tdef __str__(self):\n\t\treturn self.name\n\n\nclass Form(models.Model):\n\ttitle = models.CharField(max_length = 300, null = True)\n\tfile = models.FileField(upload_to = \"documents/\", null = True)\n\tdate = models.DateField(auto_now_add = True, null = True)\n\ttag = models.ManyToManyField(FormTags)\n\n\tdef __str__(self):\n\t\treturn self.title\n\n\nclass Link(models.Model):\n\ttitle = models.CharField(max_length = 300)\n\turl = models.CharField(max_length = 600)\n\tdate = models.DateField(auto_now_add = True)\n\n\tdef __str__(self):\n\t\treturn self.title\n\n\nclass Subject(models.Model):\n\tname = models.CharField(max_length = 300)\n\n\tdef __str__(self):\n\t\treturn self.name\n\n\nclass Content(models.Model):\n\ttitle = models.CharField(max_length = 300, null = True)\n\tfile = models.FileField(upload_to = \"documents/training/\", null = True)\n\tfile2 = models.FileField(upload_to = \"documents/training/\", null = True, blank = True)\n\tdate = models.DateField(auto_now_add = True, null = True)\n\tsubject = models.ManyToManyField(Subject)\n\n\tdef __str__(self):\n\t\treturn self.title\n\n\nclass Form2(models.Model):\n\ttitle = models.CharField(max_length = 300, null = True)\n\tfile = models.FileField(upload_to = \"documents/resourceForms/\", null = True)\n\tdate = models.DateField(auto_now_add = True, null = True)\n\n\tdef __str__(self):\n\t\treturn self.title\n\t\n\nclass Contact(models.Model):\n\temail = models.CharField(max_length = 300, null = True)\n\tname = models.CharField(max_length = 300, null = True)\n\tnumber = models.CharField(max_length = 300, null = True)\n\tspeciality = models.TextField(max_length = 300, null = True)\n\n\tdef __str__(self):\n\t\treturn self.name\n\n" }, { "alpha_fraction": 0.5376884341239929, "alphanum_fraction": 0.5879396796226501, "avg_line_length": 21.11111068725586, "blob_id": "f9e217b52a0604cc776f4941d62c8203d602f0a5", "content_id": "cb6abbbe418e79d604e9ebbcf385a902bd872c8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 398, "license_type": "no_license", "max_line_length": 84, "num_lines": 18, "path": "/api/migrations/0010_auto_20201210_0809.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1 on 2020-12-10 16:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0009_form2'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='form2',\n name='file',\n field=models.FileField(null=True, upload_to='documents/resourceForms/'),\n ),\n ]\n" }, { "alpha_fraction": 0.6732283234596252, "alphanum_fraction": 0.6803149580955505, "avg_line_length": 44.39285659790039, "blob_id": "b717c343274c85ff2e23d1fc10a2fde605e5a4ab", "content_id": "6699c57a78ae914f7d19a0ca65af97ca5bd812ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1270, "license_type": "no_license", "max_line_length": 82, "num_lines": 28, "path": "/blog/urls.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n\tpath('', views.home, name = 'home'),\n\tpath('blog/', views.blog, name = 'blog'),\n\tpath('files/', views.files, name='files'),\n\tpath('train/', views.training, name = 'training'),\n\tpath('methods/', views.methods, name= 'methods'),\n\tpath('display/<str:pk>/', views.display, name='display'),\n\tpath('display2/<str:pk>/', views.display2, name='display2'),\n\tpath('display3/<str:pk>/', views.display3, name='display3'),\n\tpath('display4/<str:pk>/', views.display4, name='display4'),\n\tpath('tagDB/', views.tagDB, name = 'tagDB'),\n\tpath('search/<str:pk>/', views.search, name = 'search'),\n\tpath('search/<str:pk>/', views.search, name = 'search'),\n\tpath('resources/', views.resources, name='resources'),\n\tpath('links/', views.links, name='links'),\n\tpath('forms/', views.forms, name='forms'),\n\tpath('login/', views.login, name='login'),\n\tpath('trainingDB/', views.trainingUpload, name='trainingDB'),\n\tpath('resourceFormUpload/', views.resourceFormUpload, name='resourceFormUpload'),\n\tpath('linkUpload/', views.linkUpload, name='linkUpload'),\n\tpath('support/', views.support, name='support'),\n\tpath('supportAdmin/', views.supportAdmin, name='supportAdmin'),\n\tpath('calculator/', views.calculator, name='calculator'),\n]" }, { "alpha_fraction": 0.5141955614089966, "alphanum_fraction": 0.5709779262542725, "avg_line_length": 17.647058486938477, "blob_id": "bc6bdb18770acaa8c4f2bcd16b7aeff292a79188", "content_id": "4674c442b8880e6297084584ffea70fb81da3bdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 317, "license_type": "no_license", "max_line_length": 45, "num_lines": 17, "path": "/api/migrations/0015_remove_contact_date.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1 on 2021-01-12 17:46\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0014_contact_number'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='contact',\n name='date',\n ),\n ]\n" }, { "alpha_fraction": 0.48574143648147583, "alphanum_fraction": 0.4971482753753662, "avg_line_length": 26.6842098236084, "blob_id": "8cbfb35663a8671c1a2c3bd20e45cf3424f37313", "content_id": "88d0d08948fc49b72b696c2f9a48206948b258c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2104, "license_type": "no_license", "max_line_length": 114, "num_lines": 76, "path": "/api/migrations/0008_auto_20201202_0823.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1 on 2020-12-02 16:23\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0007_content'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Subject',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=300)),\n ],\n ),\n migrations.RemoveField(\n model_name='content',\n name='active',\n ),\n migrations.RemoveField(\n model_name='content',\n name='file_type',\n ),\n migrations.RemoveField(\n model_name='content',\n name='name',\n ),\n migrations.RemoveField(\n model_name='content',\n name='path',\n ),\n migrations.RemoveField(\n model_name='content',\n name='size',\n ),\n migrations.RemoveField(\n model_name='content',\n name='timestamp',\n ),\n migrations.RemoveField(\n model_name='content',\n name='updated',\n ),\n migrations.RemoveField(\n model_name='content',\n name='uploaded',\n ),\n migrations.RemoveField(\n model_name='content',\n name='user',\n ),\n migrations.AddField(\n model_name='content',\n name='date',\n field=models.DateField(auto_now_add=True, null=True),\n ),\n migrations.AddField(\n model_name='content',\n name='file',\n field=models.FileField(null=True, upload_to='documents/training/'),\n ),\n migrations.AddField(\n model_name='content',\n name='title',\n field=models.CharField(max_length=300, null=True),\n ),\n migrations.AddField(\n model_name='content',\n name='subject',\n field=models.ManyToManyField(to='api.Subject'),\n ),\n ]\n" }, { "alpha_fraction": 0.8071428537368774, "alphanum_fraction": 0.8107143044471741, "avg_line_length": 24.545454025268555, "blob_id": "9dcc1e5635de6e8dadce1f78fa27f9e53985aff6", "content_id": "96d95dd0b1a49205501df28d24221fd6013911b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 280, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/api/admin.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import *\n# Register your models here.\n\nadmin.site.register(Form)\nadmin.site.register(FormTags)\nadmin.site.register(Link)\nadmin.site.register(Content)\nadmin.site.register(Subject)\nadmin.site.register(Form2)\nadmin.site.register(Contact)" }, { "alpha_fraction": 0.6965065598487854, "alphanum_fraction": 0.6986899375915527, "avg_line_length": 17.360000610351562, "blob_id": "4a9bf777435e7aadc73c3c1dc63e03554fde6af4", "content_id": "3c944bd57d243f1e5042c881f1b7f4ea80cdab92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 54, "num_lines": 25, "path": "/api/forms.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "from django.forms import ModelForm\nfrom .models import *\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django import forms\n\n\nclass upload(ModelForm):\n\tclass Meta:\n\t\tmodel = Form\n\t\tfields = '__all__'\n\t\texclude = ['date']\n\n\nclass uploadTrainingContent(ModelForm):\n\tclass Meta:\n\t\tmodel = Content\n\t\tfields = '__all__'\n\t\texclude = ['date']\n\n\nclass uploadResourceForm(ModelForm):\n\tclass Meta:\n\t\tmodel = Form2\n\t\tfields = '__all__'\n\t\texclude = ['date']" }, { "alpha_fraction": 0.4736842215061188, "alphanum_fraction": 0.559556782245636, "avg_line_length": 18, "blob_id": "6b337080cb049692f9d639dc3f22ff6f1b871523", "content_id": "5074cac7746af34bdb9792467aaeae825485315d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 361, "license_type": "no_license", "max_line_length": 47, "num_lines": 19, "path": "/api/migrations/0005_auto_20201012_1519.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2020-10-12 19:19\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0004_auto_20201008_1155'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='Link',\n ),\n migrations.DeleteModel(\n name='LinkTags',\n ),\n ]\n" }, { "alpha_fraction": 0.708088219165802, "alphanum_fraction": 0.7301470637321472, "avg_line_length": 22.660869598388672, "blob_id": "e0227e4c9dd5fd3aebd02498a3bf46b5a3d2ef27", "content_id": "0b3442aae59443fbebb8d9ac30501b1a89149d32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2720, "license_type": "no_license", "max_line_length": 73, "num_lines": 115, "path": "/blog/views.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom rest_framework.renderers import TemplateHTMLRenderer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.parsers import MultiPartParser\n# Create your views here.\nfrom .models import *\nfrom api.models import *\nfrom api.serializers import *\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nimport json\n\ndef login(request):\n\treturn render(request, 'blog/home.html')\n\ndef home(request):\n\tobjs = Announcement.objects.values()\n\tx = 0; d = {};\n\tfor y in objs:\n\t\td[x] = y\n\t\tx+=1\n\n\tmethods = Form.objects.count()\n\tlinks = Link.objects.count()\n\tcontents = Content.objects.count()\n\tforms = Form2.objects.count()\n\n\tcontext = {'text': json.dumps(d),'forms':[forms,links,contents,methods]}\n\treturn render(request, 'blog/home2.html', context)\n\n\ndef methods(request):\n\treturn\trender(request,'blog/methods.html')\n\n\ndef training(request):\n\treturn render(request, 'blog/training.html')\n\n\ndef files(request):\n\treturn render(request, 'blog/files.html')\n\n\ndef display(request, pk):\n\turl = 'http://172.20.57.135:88/api/form-detail/' + pk + '/'\n\tcontext = {'url':url}\n\treturn render(request, 'blog/display.html', context)\n\n\ndef display2(request, pk):\n\turl = 'http://172.20.57.135:88/api/content-detail/' + pk + '/'\n\tcontext = {'url':url}\n\treturn render(request, 'blog/display.html', context)\n\n\ndef display3(request, pk):\n\turl = 'http://172.20.57.135:88/api/form2-detail/' + pk + '/'\n\tcontext = {'url':url}\n\treturn render(request, 'blog/display.html', context)\n\ndef display4(request, pk):\n\turl = 'http://172.20.57.135:88/api/content-detail/' + pk + '/'\n\tcontext = {'url':url}\n\treturn render(request, 'blog/display4.html', context)\n\n\ndef tagDB(request):\n\treturn render(request,'blog/tag.html')\n\n\ndef resources(request):\n\treturn render(request, 'blog/resources.html')\n\n\ndef links(request):\n\treturn render(request, 'blog/links.html')\n\n\ndef blog(request):\n\treturn HttpResponse('This will be the blog')\n\n\ndef search(request, pk):\n\tcontext = {'pk':pk}\n\treturn render(request, 'blog/search.html', context)\n\n\ndef forms(request):\n\treturn render(request, 'blog/forms.html')\n\n\ndef trainingUpload(request):\n\treturn render(request, 'blog/trainingUpload.html')\n\n\ndef resourceFormUpload(request):\n\treturn render(request, 'blog/resourceFormUpload.html')\n\n\ndef linkUpload(request):\n\treturn render(request, 'blog/linkUpload.html')\n\n\ndef support(request):\n\treturn render(request, 'blog/supportUser.html')\n\n\ndef supportAdmin(request):\n\treturn render(request, 'blog/support.html')\n\n\ndef calculator(request):\n\treturn render(request, 'blog/calculator.html')" }, { "alpha_fraction": 0.5141844153404236, "alphanum_fraction": 0.5815602540969849, "avg_line_length": 16.625, "blob_id": "8e395d199c8700f99f69d880295f74a0f5c0c35b", "content_id": "2780377ba23c9bcb723010f2e81b91507ac2c31a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "no_license", "max_line_length": 47, "num_lines": 16, "path": "/blog/migrations/0003_delete_methods.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2020-10-13 20:15\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0002_methods'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='Methods',\n ),\n ]\n" }, { "alpha_fraction": 0.5226730108261108, "alphanum_fraction": 0.5966587066650391, "avg_line_length": 22.27777862548828, "blob_id": "98326e56a8efb7665b3d5c10cc870acc401c99fa", "content_id": "a6c29313d3b61c8f285bef7b780d49e53ca6806c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 419, "license_type": "no_license", "max_line_length": 91, "num_lines": 18, "path": "/api/migrations/0011_content_file2.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1 on 2020-12-31 13:10\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0010_auto_20201210_0809'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='content',\n name='file2',\n field=models.FileField(blank=True, null=True, upload_to='documents/training/'),\n ),\n ]\n" }, { "alpha_fraction": 0.5080214142799377, "alphanum_fraction": 0.5632798671722412, "avg_line_length": 23.39130401611328, "blob_id": "e2b0b3559697f5b7705196933fd590f01ede78e6", "content_id": "df77816e84f20c87de74a9015503e841f9da1ff0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 561, "license_type": "no_license", "max_line_length": 69, "num_lines": 23, "path": "/api/migrations/0003_auto_20201006_1457.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2020-10-06 18:57\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0002_auto_20201005_1329'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='form',\n name='date',\n field=models.DateTimeField(auto_now_add=True, null=True),\n ),\n migrations.AddField(\n model_name='link',\n name='date',\n field=models.DateTimeField(auto_now_add=True, null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.5320855379104614, "alphanum_fraction": 0.5828877091407776, "avg_line_length": 19.77777862548828, "blob_id": "a7fbb95d3ea6396d5d639d53e7f03cdd24d6b355", "content_id": "58f10cb2bbc547e0bfbda372935fedd6c8e41493", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 374, "license_type": "no_license", "max_line_length": 46, "num_lines": 18, "path": "/blog/migrations/0005_announcement_text2.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1 on 2020-12-15 17:19\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0004_announcement'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='announcement',\n name='text2',\n field=models.TextField(null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.4000000059604645, "alphanum_fraction": 0.4000000059604645, "avg_line_length": 8, "blob_id": "32b99482aceee4d1d0e15a2f4bfb68c0e499f497", "content_id": "48937316c527854c6748552bfeead7ae53302341", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 10, "license_type": "no_license", "max_line_length": 8, "num_lines": 1, "path": "/README.md", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "\"# cpfd\" \n" }, { "alpha_fraction": 0.48923078179359436, "alphanum_fraction": 0.7025641202926636, "avg_line_length": 16.105262756347656, "blob_id": "71c03e647ccce873cf9de207aa98d7e0caf45ebb", "content_id": "f4922f3cb598457f9ad603744404807fecdd6194", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 975, "license_type": "no_license", "max_line_length": 27, "num_lines": 57, "path": "/requirements.txt", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "appdirs==1.4.4\nasgiref==3.2.10\nbackcall==0.2.0\nboto==2.49.0\nboto3==1.14.33\nbotocore==1.17.33\ncertifi==2020.6.20\ncffi==1.14.3\nchardet==3.0.4\nclass-registry==2.1.2\ncolorama==0.4.3\ncryptography==3.2.1\ndecorator==4.4.2\ndistlib==0.3.1\ndj-database-url==0.5.0\nDjango==3.1\ndjango-filter==2.3.0\ndjango-smtp-ssl==1.0\ndjango-storages==1.9.1\ndjangorestframework==3.12.2\ndocutils==0.15.2\nfilelock==3.0.12\nfilters==1.3.2\ngunicorn==20.0.4\nidna==2.10\nipython==7.18.1\nipython-genutils==0.2.0\njedi==0.17.2\njmespath==0.10.0\nnumpy==1.19.4\npandas==1.1.4\nparso==0.7.1\npickleshare==0.7.5\npostgres==3.0.0\nprompt-toolkit==3.0.7\npsycopg2==2.8.5\npsycopg2-binary==2.8.5\npsycopg2-pool==1.1\npycparser==2.20\nPygments==2.6.1\npython-dateutil==2.8.1\npytz==2020.1\nregex==2020.7.14\nrequests==2.24.0\ns3chunkuploader==0.14\ns3transfer==0.3.3\nsix==1.15.0\nsqlparse==0.4.1\ntraitlets==5.0.2\nurllib3==1.25.10\nvirtualenv==20.0.29\nwcwidth==0.2.5\nwfastcgi==3.0.0\nwhitenoise==5.1.0\nxlrd==1.2.0\nxlwt==1.3.0\nzipfile36==0.1.3\n" }, { "alpha_fraction": 0.5164113640785217, "alphanum_fraction": 0.5426695942878723, "avg_line_length": 29.46666717529297, "blob_id": "1edcdd7144bab02d67711d15fe07f354cf3d8e74", "content_id": "32497e51c70002583623eb3700d11009c2e3f84c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 914, "license_type": "no_license", "max_line_length": 114, "num_lines": 30, "path": "/api/migrations/0001_initial.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2020-10-05 16:21\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Form',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=300, null=True)),\n ('file', models.FileField(null=True, upload_to='documents/')),\n ],\n ),\n migrations.CreateModel(\n name='Link',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=200)),\n ('url', models.CharField(max_length=200)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6859361529350281, "alphanum_fraction": 0.6876617670059204, "avg_line_length": 23.680850982666016, "blob_id": "74c77a2fac8fb987907d28418a1d4724d473b075", "content_id": "e4ffbe4b5409584c252aecfb4f122e9c944afd3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1159, "license_type": "no_license", "max_line_length": 72, "num_lines": 47, "path": "/api/serializers.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom .models import *\n\n\nclass FormSerializer(serializers.ModelSerializer):\n\tserializers.DateField(format='%b %d, %Y', input_formats=['%b %d, %Y'])\n\tclass Meta:\n\t\tmodel = Form\n\t\tfields = '__all__'\n\nclass TagSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = FormTags\n\t\tfields = '__all__'\n\nclass LinkSerializer(serializers.ModelSerializer):\n\tserializers.DateField(format='%b %d, %Y', input_formats=['%b %d, %Y'])\n\tclass Meta:\n\t\tmodel = Link\n\t\tfields = '__all__'\n\n\nclass ContentSerializer(serializers.ModelSerializer):\n\tserializers.DateField(format='%b %d, %Y', input_formats=['%b %d, %Y',])\n\tclass Meta:\n\t\tmodel = Content\n\t\tfields = '__all__'\n\n\nclass SubjectSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Subject\n\t\tfields = '__all__'\n\n\nclass Form2Serializer(serializers.ModelSerializer):\n\tserializers.DateField(format='%b %d, %Y', input_formats=['%b %d, %Y',])\n\tclass Meta:\n\t\tmodel = Form2\n\t\tfields = '__all__'\n\n\nclass ContactSerializer(serializers.ModelSerializer):\n\tserializers.DateField(format='%b %d, %Y', input_formats=['%b %d, %Y',])\n\tclass Meta:\n\t\tmodel = Contact\n\t\tfields = '__all__'" }, { "alpha_fraction": 0.502970278263092, "alphanum_fraction": 0.5425742864608765, "avg_line_length": 20.95652198791504, "blob_id": "864c16468008acb9a4fecb4d0ee08f5ef3bc4bfb", "content_id": "1f4ebc64d3b91208da64827af411e9f3f4782cfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 45, "num_lines": 23, "path": "/blog/migrations/0006_auto_20210105_1209.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1 on 2021-01-05 20:09\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0005_announcement_text2'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='announcement',\n old_name='text2',\n new_name='body',\n ),\n migrations.RenameField(\n model_name='announcement',\n old_name='text',\n new_name='headline',\n ),\n ]\n" }, { "alpha_fraction": 0.7000839114189148, "alphanum_fraction": 0.7051174640655518, "avg_line_length": 49.74468231201172, "blob_id": "d6a5bc8009605fefa88d7db9f7040409f7ac3342", "content_id": "13ebb80d4484c18bea89e99c92468e8c6a3ad0cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2384, "license_type": "no_license", "max_line_length": 78, "num_lines": 47, "path": "/api/urls.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "from django.urls import path, include, re_path\nfrom . import views\nfrom django.views.generic.base import TemplateView\n\nurlpatterns = [\n\tpath('', views.apiOverview, name='api-overview'),\n\n\tpath('tag-list/', views.tagList, name='tag-list'),\n\tpath('tag-detail/<str:pk>/', views.tagDetail, name='tag-detail'),\n\tpath('tag-create/', views.tagCreate, name='tag-create'),\n\tpath('tag-delete/<str:pk>/', views.tagDelete, name='tag-delete'),\n\tpath('tag-update/<str:pk>/', views.tagUpdate, name='tag-update'),\n\n\tpath('form-list/', views.formList, name='form-list'),\n\tpath('form-detail/<str:pk>/', views.formDetail, name='form-detail'),\n\tpath('form-create/', views.formCreate, name='form-create'),\n\tpath('form-delete/<str:pk>/', views.formDelete, name='form-delete'),\n\tpath('form-update/<str:pk>/', views.formUpdate, name='form-update'),\n\n\tpath('uploadForm/', views.uploadForm, name = 'uploadForm'),\n\n\tpath('link-list/', views.linkList, name='link-list'),\n\tpath('link-detail/<str:pk>/', views.linkDetail, name='link-detail'),\n\tpath('link-create/', views.linkCreate, name='link-create'),\n\tpath('link-delete/<str:pk>/', views.linkDelete, name='link-delete'),\n\tpath('link-update/<str:pk>/', views.linkUpdate, name='link-update'),\n\n\tpath('uploadTraining/', views.uploadTraining, name = 'uploadTraining'),\n\tpath('content-list/', views.contentList, name = 'content-list'),\n\tpath('content-detail/<str:pk>/', views.contentDetail, name='content-detail'),\n\tpath('content-delete/<str:pk>/', views.contentDelete, name='content-delete'),\n\tpath('content-update/<str:pk>/', views.contentUpdate, name='content-update'),\n\n\tpath('subject-list/', views.subjectList, name = 'subject-list'),\n\n\tpath('form2-list/', views.form2List, name='form2-list'),\n\tpath('form2-detail/<str:pk>/', views.form2Detail, name='form2-detail'),\n\tpath('form2-delete/<str:pk>/', views.form2Delete, name='form2-delete'),\n\tpath('form2-update/<str:pk>/', views.form2Update, name='form2-update'),\n\tpath('uploadResource/', views.uploadResourceFunc, name='uploadResource'),\n\n\tpath('contact-list/', views.contactList, name='contact-list'),\n\tpath('contact-detail/<str:pk>/', views.contactDetail, name='contact-detail'),\n\tpath('contact-create/', views.contactCreate, name='contact-create'),\n\tpath('contact-delete/<str:pk>/', views.contactDelete, name='contact-delete'),\n\tpath('contact-update/<str:pk>/', views.contactUpdate, name='contact-update'),\n]" }, { "alpha_fraction": 0.7342256307601929, "alphanum_fraction": 0.7399617433547974, "avg_line_length": 26.421052932739258, "blob_id": "e84e10fdceef9437537d872e0c55fa8557d699ff", "content_id": "f79521bb9a7e3164bbd3bbf5590d489dfd2a5e42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 523, "license_type": "no_license", "max_line_length": 69, "num_lines": 19, "path": "/blog/models.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\n\nclass Question(models.Model):\n\ttxt = models.TextField()\n\tasked = models.DateTimeField(auto_now_add = True)\n\n\nclass Answer(models.Model):\n\tquestion = models.ForeignKey(Question, on_delete = models.CASCADE)\n\tanswer = models.TextField()\n\tanswered = models.DateTimeField(auto_now_add = True)\n\n\nclass Announcement(models.Model):\n\theadline = models.TextField()\n\tbody = models.TextField(null = True)\n\tlink = models.CharField(max_length = 200, null = True, blank = True)\n\n\t" }, { "alpha_fraction": 0.7129557728767395, "alphanum_fraction": 0.7146182060241699, "avg_line_length": 23.117647171020508, "blob_id": "2d2c0d43333ee03cd72313292763bb4da6aa8162", "content_id": "468bed339438cd7bb9a3bbf1d431afb95093b124", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9023, "license_type": "no_license", "max_line_length": 105, "num_lines": 374, "path": "/api/views.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.http import JsonResponse\nfrom django.contrib.auth.models import User\n\nfrom rest_framework.decorators import api_view, authentication_classes, parser_classes, renderer_classes\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication\nfrom rest_framework.response import Response\nfrom rest_framework.parsers import MultiPartParser, FormParser\nfrom rest_framework.renderers import MultiPartRenderer\n\nfrom .serializers import *\nfrom .models import *\nfrom .forms import *\n# Create your views here.\n\n\n@api_view(['GET'])\ndef apiOverview(request):\n\tapi_urls = {\n\t\t'Form List': '/form-list/',\n\t\t'form Detail': '/form-detail/<str:pk>/',\n\t\t'Create form': 'form-create/',\n\t\t'Update form': 'form-update/<str:pk>/',\n\t\t'Delete form': 'form-delete/<str:pk>/',\n\n\t\t'Tag List': '/tag-list/',\n\t\t'Tag Detail': '/tag-detail/<str:pk>/',\n\t\t'Create tag': 'tag-create/',\n\t\t'Update tag': 'tag-update/<str:pk>/',\n\t\t'Delete tag': 'tag-delete/<str:pk>/',\n\n\t\t'Link List': '/link-list',\n\t\t'Link Detail': '/link-detail/<str:pk>/',\n\t\t'Create link':'/link-create/',\n\t\t'Update link': '/link-update/<str:pk>/',\n\t\t'Delete link': '/link-delete/<str:pk>/',\n\n\t\t'Training List': '/content-list',\n\t\t'Training Detail': '/training-list',\n\t\t'Update Training': '/training-update',\n\t\t'Delete Training': '/training-delete',\n\t\t'Subject List': '/subject-list',\n\n\t\t'Resource List':'/form2-list/',\n\t\t'Resource Detail': '/form2-detail/<str:pk>/',\n\t\t'Update Resource': '/form2-update/<str:pk>/',\n\t\t'Delete Resource': '/form2-delete/',\n\t}\n\treturn Response(api_urls)\n\n\n@api_view(['GET'])\ndef linkList(request):\n\tlinks = Link.objects.all().order_by('title')\n\tserializer = LinkSerializer(links, many=True)\n\n\treturn Response(serializer.data)\n\n\n@api_view(['GET'])\ndef linkDetail(request, pk):\n\tlink = Link.objects.get(id = pk)\n\tserializer = LinkSerializer(link, many=False)\n\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef linkCreate(request):\n\tserializer = LinkSerializer(data=request.data, many=False)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef linkUpdate(request, pk):\n\tlink = Link.objects.get(id=pk)\n\tserializer = LinkSerializer(instance = link, data=request.data, partial=True)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n\n@api_view(['DELETE'])\ndef linkDelete(request, pk):\n\tlink = Link.objects.get(id=pk)\n\tlink.delete()\n\n\treturn Response('Item Successfully Deleted')\n\n\n@api_view(['GET'])\ndef contactList(request):\n\tcontacts = Contact.objects.all().order_by('name')\n\tserializer = ContactSerializer(contacts, many=True)\n\n\treturn Response(serializer.data)\n\n\n@api_view(['GET'])\ndef contactDetail(request, pk):\n\tcontact = Contact.objects.get(id = pk)\n\tserializer = ContactSerializer(contact, many = False)\n\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef contactCreate(request):\n\tserializer = ContactSerializer(data = request.data, many=False)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef contactUpdate(request, pk):\n\tcontact = Contact.objects.get(id=pk)\n\tserializer = ContactSerializer(instance=contact, data = request.data, partial=True)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n\n@api_view(['DELETE'])\ndef contactDelete(request, pk):\n\tcontact = Contact.objects.get(id=pk)\n\tcontact.delete()\n\n\treturn Response('Item Successfully Deleted!')\n\n\n@api_view(['GET'])\ndef tagList(request):\n\ttags = FormTags.objects.all().order_by('name')\n\tserializer = TagSerializer(tags, many=True)\n\n\treturn Response(serializer.data)\n\n\n@api_view(['GET'])\ndef tagDetail(request, pk):\n\ttags = FormTags.objects.get(id = pk)\n\tserializer = TagSerializer(tags, many=False)\n\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef tagCreate(request):\n\tserializer = TagSerializer(data=request.data, many=False)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef tagUpdate(request, pk):\n\ttag = FormTags.objects.get(id=pk)\n\tserializer = TagSerializer(instance=tag, data=request.data, partial=True)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n\n@api_view(['DELETE'])\ndef tagDelete(request, pk):\n\ttag = FormTags.objects.get(id=pk)\n\ttag.delete()\n\n\treturn Response('Item Successfully Deleted!')\n\n\n@api_view(['GET'])\ndef formList(request):\n\tforms = Form.objects.all().order_by('title')\n\tserializer = FormSerializer(forms, many=True)\n\n\treturn Response(serializer.data)\n\n\n@api_view(['GET'])\ndef formDetail(request, pk):\n\tforms = Form.objects.get(id = pk)\n\tserializer = FormSerializer(forms, many=False)\n\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\n@parser_classes([MultiPartParser, FormParser])\ndef formCreate(request):\n\tserializer = FormSerializer(data=request.data)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef formUpdate(request, pk):\n\tform = Form.objects.get(id=pk)\n\tserializer = FormSerializer(instance=form, data=request.data, partial=True)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n\n@api_view(['DELETE'])\ndef formDelete(request, pk):\n\tform = Form.objects.get(id=pk)\n\tform.delete()\n\n\treturn Response('Item Successfully Deleted!')\n\n\ndef uploadForm(request):\n\tform = upload()\n\n\tif request.method == \"POST\":\n\t\tform = upload(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tprint('valid')\n\t\t\tfilename = str(request.FILES['file'])\n\t\t\thandle_uploaded_file(request.FILES['file'], filename)\n\t\t\tform.save()\n\t\t\treturn redirect('blog:files')\n\t\telse:\n\t\t\tprint('not valid')\n\t\t\t\n\tcontext = {'form':form}\n\treturn render(request, 'api/uploadForm.html',context)\n\n\ndef handle_uploaded_file(f, filename):\n with open('static/images/documents/' + filename, 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\n\n\ndef uploadTraining(request):\n\tform = uploadTrainingContent()\n\n\tif request.method == \"POST\":\n\t\tform = uploadTrainingContent(request.POST, request.FILES)\n\t\tif form.is_valid:\n\t\t\tfor f in request.FILES.getlist('file'):\n\t\t\t\tfilename = str(f)\n\t\t\t\thandle_uploaded_content(f, filename)\n\t\t\tform.save()\n\t\t\treturn redirect('blog:trainingDB')\n\n\tcontext = {'form':form}\n\treturn render(request, 'api/trainingUpload.html',context)\n\n\ndef handle_uploaded_content(f, filename):\n with open('static/images/documents/training/' + filename, 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\n\n\n@api_view(['GET'])\ndef contentList(request):\n\ttrainingContent = Content.objects.all().order_by('title')\n\tserializer = ContentSerializer(trainingContent, many=True)\n\n\treturn Response(serializer.data)\n\n\n@api_view(['GET'])\ndef contentDetail(request, pk):\n\ttrainingContent = Content.objects.get(id = pk)\n\tserializer = ContentSerializer(trainingContent, many = False)\n\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef contentUpdate(request, pk):\n\ttrainingContent = Content.objects.get(id = pk)\n\tserializer = ContentSerializer(instance = form, data = request.data, partial = True)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n\n@api_view(['DELETE'])\ndef contentDelete(request, pk):\n\ttrainingContent = Content.objects.get(id = pk)\n\ttrainingContent.delete()\n\n\treturn Response('Item Successfully Deleted!')\n\n\n@api_view(['GET'])\ndef subjectList(request):\n\tsubjects = Subject.objects.all().order_by('name')\n\tserializer = SubjectSerializer(subjects, many = True)\n\t\n\treturn Response(serializer.data)\n\n\n@api_view(['GET'])\ndef form2List(request):\n\tforms = Form2.objects.all().order_by('title')\n\tserializer = Form2Serializer(forms, many=True)\n\n\treturn Response(serializer.data)\n\n\n@api_view(['GET'])\ndef form2Detail(request, pk):\n\tforms = Form2.objects.get(id = pk)\n\tserializer = Form2Serializer(forms, many=False)\n\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef form2Update(request, pk):\n\tform = Form2.objects.get(id=pk)\n\tserializer = Form2Serializer(instance=form, data=request.data, partial=True)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n\n@api_view(['DELETE'])\ndef form2Delete(request, pk):\n\tform = Form2.objects.get(id=pk)\n\tform.delete()\n\n\treturn Response('Item Successfully Deleted!')\n\n\ndef uploadResourceFunc(request):\n\tform = uploadResourceForm()\n\tif request.method == \"POST\":\n\t\tform = uploadResourceForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tfilename = str(request.FILES['file'])\n\t\t\thandle_uploaded_form(request.FILES['file'], filename)\n\t\t\tform.save()\n\t\t\treturn redirect('blog:resourceFormUpload')\n\n\tcontext = {'form':form}\n\treturn render(request, 'api/uploadResource.html',context)\n\n\ndef handle_uploaded_form(f, filename):\n with open('static/images/documents/resourceForms/' + filename, 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\n\n\n\n" }, { "alpha_fraction": 0.5258620977401733, "alphanum_fraction": 0.5620689392089844, "avg_line_length": 26.619047164916992, "blob_id": "0c686ceb84aeffddf82d9f94aa7985b6ce56735d", "content_id": "cd4df1398ffc7d770d61f79a86727d688f17c945", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 580, "license_type": "no_license", "max_line_length": 114, "num_lines": 21, "path": "/blog/migrations/0004_announcement.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1 on 2020-12-10 16:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0003_delete_methods'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Announcement',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('text', models.TextField()),\n ('link', models.CharField(blank=True, max_length=200, null=True)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5036496520042419, "alphanum_fraction": 0.5264598727226257, "avg_line_length": 28.62162208557129, "blob_id": "9abd616c3b041bbccaef94f462431b3900cc538f", "content_id": "38d8fa5d3696c057aa7aae56eb49ad7d1df11356", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1096, "license_type": "no_license", "max_line_length": 114, "num_lines": 37, "path": "/api/migrations/0002_auto_20201005_1329.py", "repo_name": "santander-syngenta/rc", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2020-10-05 17:29\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='FormTags',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=300)),\n ],\n ),\n migrations.CreateModel(\n name='LinkTags',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=300)),\n ],\n ),\n migrations.AddField(\n model_name='form',\n name='tag',\n field=models.ManyToManyField(to='api.FormTags'),\n ),\n migrations.AddField(\n model_name='link',\n name='tag',\n field=models.ManyToManyField(to='api.LinkTags'),\n ),\n ]\n" } ]
23
barteklip/pycharm_test
https://github.com/barteklip/pycharm_test
cb1b016844a8f1aa0242f8d87422ea10999269db
77ac1068b2651b81aafe58d0c76207e6839028c8
489b0dacdae3c97eeaaccd8717bd7ad6a74d15c7
refs/heads/master
2022-06-20T20:54:19.557780
2020-05-04T21:21:07
2020-05-04T21:21:07
261,294,458
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7948718070983887, "alphanum_fraction": 0.7948718070983887, "avg_line_length": 12.333333015441895, "blob_id": "7915d54750fe5b1f9bb2aa10722e290ad53198c5", "content_id": "b40ed102cfc4a7bc8d370b22a016d7bf00d09575", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42, "license_type": "no_license", "max_line_length": 27, "num_lines": 3, "path": "/test.py", "repo_name": "barteklip/pycharm_test", "src_encoding": "UTF-8", "text": "#test kodu\n\n#coś się dodatkowo zmieniło" } ]
1
bookertee2/ventera-interview
https://github.com/bookertee2/ventera-interview
b6b25a5011cb797a3e8276416485904361daf64b
03d3af5d8fdfa5ae7a88fde5f3af5dbc14305ea8
2f0f2969381ed1f992c52fc13196986d1f443e46
refs/heads/master
2021-07-11T06:37:30.973518
2017-10-18T21:53:13
2017-10-18T21:53:13
107,466,732
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.45495495200157166, "alphanum_fraction": 0.47597599029541016, "avg_line_length": 22.740739822387695, "blob_id": "80353ee24e8745f6b7a75ba34490a0d88549f8f4", "content_id": "c14979b11f15aa6b0da63d8aef29c9c2ad4a51a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 666, "license_type": "no_license", "max_line_length": 57, "num_lines": 27, "path": "/Interview_Questions.py", "repo_name": "bookertee2/ventera-interview", "src_encoding": "UTF-8", "text": "def iPrime(x):\r\n if(x==2):\r\n return True\r\n if(x>1):\r\n for i in range(2,x):\r\n if (x%i==0):\r\n return False\r\n return True\r\n return False\r\n\r\ndef analyze(prices, price):\r\n total = 0\r\n prices = sorted(prices)\r\n index = 0\r\n for p in prices:\r\n total += prices\r\n if(p >= price):\r\n index = prices.index(p)\r\n mean = total/len(prices)\r\n center = len(prices) / 2\r\n median = 0\r\n if len(prices) % 2 == 0:\r\n median = sum(prices[center - 1:center + 1]) / 2.0\r\n else:\r\n median = prices[center]\r\n output = [mean,median,index]\r\n return output" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 19, "blob_id": "6b577dd70a61e5986d25c6eb8b2c45379094b2bc", "content_id": "926fecfebfe7ce09a321dc612cb7e730166d4cc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 19, "num_lines": 1, "path": "/README.md", "repo_name": "bookertee2/ventera-interview", "src_encoding": "UTF-8", "text": "# ventera-interview\n" } ]
2
bluetyson/jupyterlab
https://github.com/bluetyson/jupyterlab
6117819129665ba5c1b493613258e36cee5f1b06
6adc1ace7a1d3c8907aeec1c5c5beef4922585bd
24cd0e7e03a1068f7ef7b843175e745d56fb97e5
refs/heads/master
2021-01-23T02:06:13.056045
2017-05-30T22:03:09
2017-05-30T22:03:09
92,908,191
1
0
null
2017-05-31T05:34:16
2017-05-30T10:57:15
2017-05-31T00:37:25
null
[ { "alpha_fraction": 0.6141881346702576, "alphanum_fraction": 0.6179817914962769, "avg_line_length": 23.174312591552734, "blob_id": "6d66bd3e0f607bfca62bbbe68b63aaf79d120591", "content_id": "8881554bf56473f4b24ad175e460764acc447095", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2636, "license_type": "permissive", "max_line_length": 74, "num_lines": 109, "path": "/jupyterlab/selenium_check.py", "repo_name": "bluetyson/jupyterlab", "src_encoding": "UTF-8", "text": "\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function, absolute_import\n\nimport os\nimport sys\nimport time\nimport threading\n\nfrom tornado import ioloop\nfrom notebook.notebookapp import NotebookApp, flags, aliases\nfrom traitlets import Bool, Unicode\nfrom jupyterlab_launcher import LabConfig, add_handlers\n\nfrom selenium import webdriver\n\nfrom .commands import get_app_dir\n\n\nhere = os.path.dirname(__file__)\n\n\ntest_flags = dict(flags)\ntest_flags['core-mode'] = (\n {'TestApp': {'core_mode': True}},\n \"Start the app in core mode.\"\n)\n\n\ntest_aliases = dict(aliases)\ntest_aliases['app-dir'] = 'TestApp.app_dir'\n\n\nclass TestApp(NotebookApp):\n\n default_url = Unicode('/lab')\n open_browser = Bool(False)\n base_url = '/foo'\n flags = test_flags\n aliases = test_aliases\n\n core_mode = Bool(False, config=True,\n help=\"Whether to start the app in core mode\")\n\n app_dir = Unicode('', config=True,\n help=\"The app directory to build in\")\n\n def start(self):\n self.io_loop = ioloop.IOLoop.current()\n config = LabConfig()\n if self.core_mode:\n config.assets_dir = os.path.join(here, 'build')\n elif self.app_dir:\n config.assets_dir = os.path.join(self.app_dir, 'static')\n else:\n config.assets_dir = os.path.join(get_app_dir(), 'static')\n\n print('****Testing assets dir %s' % config.assets_dir)\n\n config.settings_dir = ''\n\n add_handlers(self.web_app, config)\n self.io_loop.call_later(1, self._run_selenium)\n super(TestApp, self).start()\n\n def _run_selenium(self):\n thread = threading.Thread(target=run_selenium,\n args=(self.display_url, self._selenium_finished))\n thread.start()\n\n def _selenium_finished(self, result):\n self.io_loop.add_callback(lambda: sys.exit(result))\n\n\ndef run_selenium(url, callback):\n \"\"\"Run the selenium test and call the callback with the exit code.exit\n \"\"\"\n\n print('Starting Firefox Driver')\n driver = webdriver.Firefox()\n\n print('Navigating to page:', url)\n driver.get(url)\n\n completed = False\n\n # Start a poll loop.\n t0 = time.time()\n while time.time() - t0 < 10:\n el = driver.find_element_by_id('main')\n if el:\n completed = True\n break\n\n # Avoid hogging the main thread.\n time.sleep(0.5)\n\n driver.quit()\n\n # Return the exit code.\n if not completed:\n callback(1)\n else:\n if os.path.exists('./geckodriver.log'):\n os.remove('./geckodriver.log')\n callback(0)\n\n\nif __name__ == '__main__':\n TestApp.launch_instance()\n" }, { "alpha_fraction": 0.5989478230476379, "alphanum_fraction": 0.5989478230476379, "avg_line_length": 22.53333282470703, "blob_id": "104337e3b907b2f9f10a8266399c348c9ef475c6", "content_id": "7f9df68fec7a661787bfbe1b40bfed5e96881fd9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 4942, "license_type": "permissive", "max_line_length": 85, "num_lines": 210, "path": "/test/src/chatbox/panel.spec.ts", "repo_name": "bluetyson/jupyterlab", "src_encoding": "UTF-8", "text": "// Copyright (c) Jupyter Development Team.\n// Distributed under the terms of the Modified BSD License.\n\nimport expect = require('expect.js');\n\nimport {\n Message, MessageLoop\n} from '@phosphor/messaging';\n\nimport {\n Widget\n} from '@phosphor/widgets';\n\nimport {\n editorServices\n} from '@jupyterlab/codemirror';\n\nimport {\n Chatbox, ChatboxPanel, ChatboxDocumentInfo\n} from '@jupyterlab/chatbox';\n\nimport {\n Context, DocumentRegistry\n} from '@jupyterlab/docregistry';\n\nimport {\n ServiceManager, Contents\n} from '@jupyterlab/services';\n\nimport {\n uuid\n} from '@jupyterlab/coreutils';\n\n\nimport {\n createFileContext, defaultRenderMime\n} from '../utils';\n\n/**\n * The common file model.\n */\nconst FILE: Contents.IModel = {\n path: uuid() + '.txt',\n type: 'file',\n mimetype: 'text/plain',\n content: 'Hello, world',\n format: 'text'\n};\n\n\n/**\n * Factory stuff.\n */\nconst editorFactory = editorServices.factoryService.newInlineEditor.bind(\n editorServices.factoryService);\nconst contentFactory = new ChatboxPanel.ContentFactory({ editorFactory });\nconst rendermime = defaultRenderMime();\n\n\ndescribe('chatbox/panel', () => {\n\n let panel: ChatboxPanel;\n let context: Context<DocumentRegistry.IModel>;\n let manager: ServiceManager.IManager;\n\n before(() => {\n manager = new ServiceManager();\n return manager.ready.then(() => {\n return manager.contents.save(FILE.path, FILE);\n });\n });\n\n beforeEach(() => {\n panel = new ChatboxPanel({\n rendermime, contentFactory\n });\n context = createFileContext(FILE.path, manager);\n });\n\n afterEach(() => {\n panel.dispose();\n });\n\n describe('ChatboxPanel', () => {\n\n describe('#constructor()', () => {\n\n it('should create a new chatbox panel', () => {\n expect(panel).to.be.a(ChatboxPanel);\n expect(panel.node.classList).to.contain('jp-ChatboxPanel');\n });\n\n });\n\n describe('#chatbox', () => {\n\n it('should be a chatbox widget created at instantiation', () => {\n expect(panel.chatbox).to.be.a(Chatbox);\n });\n\n });\n\n describe('#dispose()', () => {\n\n it('should dispose of the resources held by the panel', () => {\n panel.dispose();\n expect(panel.isDisposed).to.be(true);\n panel.dispose();\n expect(panel.isDisposed).to.be(true);\n });\n\n });\n\n describe('#context', () => {\n\n it('should get the document context for the widget', () => {\n expect(panel.context).to.be(null);\n });\n\n it('should set the document context for the widget', () => {\n panel.context = context\n expect(panel.context).to.be(context);\n });\n\n });\n\n\n describe('#onActivateRequest()', () => {\n\n it('should give the focus to the chatbox prompt', () => {\n Widget.attach(panel, document.body);\n MessageLoop.sendMessage(panel, Widget.Msg.ActivateRequest);\n expect(panel.chatbox.prompt.editor.hasFocus()).to.be(true);\n });\n\n });\n\n describe('#onCloseRequest()', () => {\n\n it('should dispose of the panel resources after closing', () => {\n Widget.attach(panel, document.body);\n expect(panel.isDisposed).to.be(false);\n MessageLoop.sendMessage(panel, Widget.Msg.CloseRequest);\n expect(panel.isDisposed).to.be(true);\n });\n\n });\n\n describe('.ContentFactory', () => {\n\n describe('#constructor', () => {\n\n it('should create a new chatbox factory', () => {\n let factory = new ChatboxPanel.ContentFactory({ editorFactory });\n expect(factory).to.be.a(ChatboxPanel.ContentFactory);\n });\n\n });\n\n });\n\n\n });\n\n describe('ChatboxDocumentInfo', () => {\n\n describe('#constructor', () => {\n\n it('should create a new chatbox document info widget', () => {\n let info = new ChatboxDocumentInfo();\n expect(info).to.be.a(ChatboxDocumentInfo);\n });\n\n });\n\n describe('#context', () => {\n\n it('should get the document context for the widget', () => {\n let info = new ChatboxDocumentInfo();\n expect(info.context).to.be(null);\n });\n\n it('should set the document context for the widget', () => {\n let info = new ChatboxDocumentInfo();\n info.context = context;\n expect(info.context).to.be(context);\n });\n\n it('should update the text content of the widget', () => {\n let info = new ChatboxDocumentInfo();\n expect(info.node.textContent).to.be('');\n info.context = context;\n expect(info.node.textContent).to.be(FILE.path);\n });\n\n it('should update the text content when the context changes names', (done) => {\n let info = new ChatboxDocumentInfo();\n info.context = context;\n expect(info.node.textContent).to.be(FILE.path);\n manager.contents.rename(context.path, 'rename.txt').then( () => {\n expect(info.node.textContent).to.be('rename.txt');\n done();\n });\n });\n\n });\n\n });\n\n});\n" }, { "alpha_fraction": 0.6920152306556702, "alphanum_fraction": 0.6920152306556702, "avg_line_length": 21.6015625, "blob_id": "155eeddec88c774b2e7fd2589864e8f591e135c3", "content_id": "244fe4ec8ccc6f602986bf37d6471c9fb95e6ef7", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2893, "license_type": "permissive", "max_line_length": 190, "num_lines": 128, "path": "/packages/chatbox-extension/src/index.ts", "repo_name": "bluetyson/jupyterlab", "src_encoding": "UTF-8", "text": "// Copyright (c) Jupyter Development Team.\n// Distributed under the terms of the Modified BSD License.\n\nimport {\n JupyterLab, JupyterLabPlugin\n} from '@jupyterlab/application';\n\nimport {\n ICommandPalette, ILayoutRestorer\n} from '@jupyterlab/apputils';\n\nimport {\n IDocumentManager\n} from '@jupyterlab/docmanager';\n\nimport {\n IEditorServices\n} from '@jupyterlab/codeeditor';\n\nimport {\n ChatboxPanel\n} from '@jupyterlab/chatbox';\n\nimport {\n IRenderMime\n} from '@jupyterlab/rendermime';\n\n\n/**\n * The command IDs used by the chatbox plugin.\n */\nnamespace CommandIDs {\n export\n const clear = 'chatbox:clear';\n\n export\n const run = 'chatbox:post';\n\n export\n const linebreak = 'chatbox:linebreak';\n};\n\n/**\n * The chatbox widget content factory.\n */\nexport\nconst chatboxPlugin: JupyterLabPlugin<void> = {\n id: 'jupyter.extensions.chatbox',\n requires: [IRenderMime, ICommandPalette, IEditorServices, IDocumentManager, ILayoutRestorer],\n autoStart: true,\n activate: activateChatbox\n}\n\n\n/**\n * Export the plugin as the default.\n */\nexport default chatboxPlugin;\n\n\n/**\n * Activate the chatbox extension.\n */\nfunction activateChatbox(app: JupyterLab, rendermime: IRenderMime, palette: ICommandPalette, editorServices: IEditorServices, docManager: IDocumentManager, restorer: ILayoutRestorer): void {\n const id = 'chatbox';\n let { commands, shell } = app;\n let category = 'Chatbox';\n let command: string;\n\n /**\n * Create a chatbox for a given path.\n */\n let editorFactory = editorServices.factoryService.newInlineEditor.bind(\n editorServices.factoryService);\n let contentFactory = new ChatboxPanel.ContentFactory({ editorFactory });\n let panel = new ChatboxPanel({\n rendermime: rendermime.clone(),\n contentFactory\n });\n\n // Add the chatbox panel to the tracker.\n panel.title.label = 'Chat';\n panel.id = id;\n\n restorer.add(panel, 'chatbox');\n\n command = CommandIDs.clear;\n commands.addCommand(command, {\n label: 'Clear Chat',\n execute: args => {\n panel.chatbox.clear();\n }\n });\n palette.addItem({ command, category });\n\n command = CommandIDs.run;\n commands.addCommand(command, {\n label: 'Post Chat Entry',\n execute: args => {\n panel.chatbox.post();\n }\n });\n palette.addItem({ command, category });\n\n command = CommandIDs.linebreak;\n commands.addCommand(command, {\n label: 'Insert Line Break',\n execute: args => {\n panel.chatbox.insertLinebreak();\n }\n });\n palette.addItem({ command, category });\n\n let updateDocumentContext = function (): void {\n let context = docManager.contextForWidget(shell.currentWidget);\n if (context && context.model.modelDB.isCollaborative) {\n if (!panel.isAttached) {\n shell.addToLeftArea(panel);\n }\n panel.context = context;\n }\n };\n\n app.restored.then(() => {\n updateDocumentContext();\n });\n shell.currentChanged.connect(updateDocumentContext);\n}\n" }, { "alpha_fraction": 0.6887608170509338, "alphanum_fraction": 0.6887608170509338, "avg_line_length": 17.263158798217773, "blob_id": "4f8e4b3e1c05ed06cf899d7ce247d3764397e0b0", "content_id": "cf3a36b925e6411f4431316e9560696104014b7e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 347, "license_type": "permissive", "max_line_length": 55, "num_lines": 19, "path": "/packages/theme-light-extension/src/index.ts", "repo_name": "bluetyson/jupyterlab", "src_encoding": "UTF-8", "text": "import {\n JupyterLabPlugin\n} from '@jupyterlab/application';\n\nimport '@jupyterlab/theming/style/variables-light.css';\n\n\n/**\n * Initialization data for the light theme extension.\n */\nconst extension: JupyterLabPlugin<void> = {\n id: 'jupyter.themes.light',\n autoStart: true,\n activate: (app) => {\n // No-op.\n }\n};\n\nexport default extension;\n" }, { "alpha_fraction": 0.6130518317222595, "alphanum_fraction": 0.6195777058601379, "avg_line_length": 19.674602508544922, "blob_id": "30bffde297830a2056266924863d824667eeedb3", "content_id": "d549c003ef5b353d5a7a32562749b1c686acb1c4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2605, "license_type": "permissive", "max_line_length": 77, "num_lines": 126, "path": "/packages/chatbox/src/entry.ts", "repo_name": "bluetyson/jupyterlab", "src_encoding": "UTF-8", "text": "// Copyright (c) Jupyter Development Team.\n// Distributed under the terms of the Modified BSD License.\n\nimport {\n Widget, PanelLayout\n} from '@phosphor/widgets';\n\nimport {\n JSONObject\n} from '@phosphor/coreutils';\n\nimport {\n ICollaborator\n} from '@jupyterlab/coreutils';\n\nimport {\n MarkdownCell\n} from '@jupyterlab/cells';\n\n/**\n * The class name added to the chatbox entries.\n */\nexport\nconst CHAT_ENTRY_CLASS = 'jp-ChatEntry';\n\n/**\n * The class name added to chatbox badges.\n */\nconst CHAT_BADGE_CLASS = 'jp-ChatEntry-badge';\n\n\n/**\n * A chat entry widget, which hosts a user badge and a markdown cell.\n */\nexport\nclass ChatEntry extends Widget {\n /**\n * Construct a chat entry widget.\n */\n constructor(options: ChatEntry.IOptions) {\n super();\n this.addClass(CHAT_ENTRY_CLASS);\n this.model = options.model;\n\n this.layout = new PanelLayout();\n\n let color = this.model.author.color;\n let r = parseInt(color.slice(1,3), 16);\n let g = parseInt(color.slice(3,5), 16);\n let b = parseInt(color.slice(5,7), 16);\n\n this._badge = new Widget();\n this._badge.addClass(CHAT_BADGE_CLASS);\n let badgeName = this.model.author.shortName ||\n this.model.author.displayName.split(' ')\n .filter(s => s).map(s => s[0]).join('');\n this._badge.node.textContent = badgeName;\n\n this.cell = options.cell;\n\n if (!options.isMe) {\n this._badge.node.style.backgroundColor = `rgba(${r}, ${g}, ${b}, 0.1)`;\n this.cell.node.style.backgroundColor = `rgba(${r}, ${g}, ${b}, 0.1)`;\n }\n\n let layout = this.layout as PanelLayout;\n layout.addWidget(this._badge);\n layout.addWidget(this.cell);\n }\n\n /**\n * Get the underlying model for the entry.\n */\n readonly model: ChatEntry.IModel;\n\n /**\n * The underlying cell widget for the entry.\n */\n readonly cell: MarkdownCell;\n\n private _badge: Widget = null;\n}\n\n\n/**\n * The namespace for `InputAreaWidget` statics.\n */\nexport\nnamespace ChatEntry {\n /**\n * Options for creating a chat entry widget.\n */\n export\n interface IOptions {\n /**\n * A chat entry model for the widget.\n */\n model: IModel;\n\n /**\n * A markdown widget for rendering the entry.\n */\n cell: MarkdownCell;\n\n /**\n * Whether this author is the local collaborator.\n */\n isMe: boolean;\n }\n\n /**\n * An interface for an entry in the chat log.\n */\n export\n interface IModel extends JSONObject {\n /**\n * The text of the chat entry.\n */\n text: string;\n\n /**\n * The collaborator who logged the entry.\n */\n author: ICollaborator;\n }\n}\n" }, { "alpha_fraction": 0.51317298412323, "alphanum_fraction": 0.5237686038017273, "avg_line_length": 32.90291213989258, "blob_id": "4153ad477152d82f60867d54d9436ac1bb3adaf1", "content_id": "4ee5f4ce5ce1554f4ccebce101ece3022410c743", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 3492, "license_type": "permissive", "max_line_length": 130, "num_lines": 103, "path": "/test/src/coreutils/markdowncodeblocks.spec.ts", "repo_name": "bluetyson/jupyterlab", "src_encoding": "UTF-8", "text": "// Copyright (c) Jupyter Development Team.\n// Distributed under the terms of the Modified BSD License.\n\nimport {\n expect\n} from 'chai';\n\nimport {\n MarkdownCodeBlocks\n} from '@jupyterlab/coreutils';\n\n\ndescribe('@jupyterlab/coreutils', () => {\n\n describe('MarkdownCodeBlocks', () => {\n\n describe('.isMarkdown()', () => {\n it('should return true for a valid markdown extension', () => {\n let isMarkdown = MarkdownCodeBlocks.isMarkdown(\".md\");\n expect(isMarkdown).true\n });\n\n });\n\n function performTest(text: string, startLine: number, endLine: number) {\n for (let marker of MarkdownCodeBlocks.markdownMarkers) {\n let markdown1 = marker + text + marker;\n let cb = MarkdownCodeBlocks.findMarkdownCodeBlocks(markdown1)[0];\n expect(cb.code).to.equal(text);\n expect(cb.startLine).to.equal(startLine);\n expect(cb.endLine).to.equal(endLine);\n }\n }\n\n describe('.findMarkdownCodeBlocks()', () => {\n it('should return the codeblock for all delimiters', () => {\n performTest(\"print(\\\"foobar\\\");\\nprint(\\\"blahblah\\\")\", 0, 1);\n });\n\n it('should return all codeblocks for multiline text', () => {\n let text = `print(\"foo\");\n import os;\n print(\"helloworld!\")`;\n performTest(text, 0, 2);\n });\n\n it('should return all codeblocks for text containing multiple delimiters', () => {\n let text = `\n pop goes the weasel!\n \\`print(\"amazing!\");\\`\n \\`\n print(\"amazing!\");\n \\`\n \\`print(\"amazing!\");\n \\`\n \\`\n print(\"amazing!\");\\`\n \n \\`\\`\\`print(\"with triple quotes\");\\`\\`\\`\n \\`\\`\\`print(\"with triple quotes\");\n print(\"and multiline\");\n \\`\\`\\`\n \\`\\`\\`\n print(\"with triple quotes\");\n \\`\\`\\`\n \\`\\`\\`\n print(\"with triple quotes\");\\`\\`\\`\n\n wheels on the bus go round and round!\n\n ~~~~print(\"how about this?\");~~~~\n ~~~~\n print(\"how about this?\");\n ~~~~\n ~~~~\n print(\"how about this?\");~~~~\n ~~~~print(\"how about this?\");\n ~~~~\n `;\n\n let codeblocks = MarkdownCodeBlocks.findMarkdownCodeBlocks(text);\n expect(codeblocks.length).to.equal(12);\n expect(codeblocks[0].code, 'cb0').to.equal('print(\"amazing!\");')\n expect(codeblocks[1].code, 'cb1').to.equal('\\n print(\"amazing!\");\\n ')\n expect(codeblocks[2].code, 'cb2').to.equal('print(\"amazing!\");\\n ')\n expect(codeblocks[3].code, 'cb3').to.equal('\\n print(\"amazing!\");')\n\n expect(codeblocks[4].code, 'cb4').to.equal('print(\"with triple quotes\");')\n expect(codeblocks[5].code, 'cb5').to.equal('print(\"with triple quotes\");\\n print(\"and multiline\");\\n ');\n expect(codeblocks[6].code, 'cb6').to.equal('\\n print(\"with triple quotes\");\\n ')\n expect(codeblocks[7].code, 'cb7').to.equal('\\n print(\"with triple quotes\");')\n\n expect(codeblocks[8].code, 'cb8').to.equal('print(\"how about this?\");')\n expect(codeblocks[9].code, 'cb9').to.equal('\\n print(\"how about this?\");\\n ')\n expect(codeblocks[10].code, 'cb10').to.equal('\\n print(\"how about this?\");')\n expect(codeblocks[11].code, 'cb11').to.equal('print(\"how about this?\");\\n ')\n\n });\n });\n\n });\n\n});\n" }, { "alpha_fraction": 0.6177723407745361, "alphanum_fraction": 0.6192939877510071, "avg_line_length": 27.08547019958496, "blob_id": "cdd846af88fbc12c4df4ceb7964a415a088de8cb", "content_id": "8c50a3945fde37434445ccc1330e30bcae454d4d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 3286, "license_type": "permissive", "max_line_length": 89, "num_lines": 117, "path": "/packages/coreutils/src/markdowncodeblocks.ts", "repo_name": "bluetyson/jupyterlab", "src_encoding": "UTF-8", "text": "// Copyright (c) Jupyter Development Team.\n// Distributed under the terms of the Modified BSD License.\n\n/**\n * The namespace for code block functions which help\n * in extract code from markdown text\n */\nexport\nnamespace MarkdownCodeBlocks {\n export\n const markdownMarkers: string[] = [\"```\", \"~~~~\", \"`\"]\n const markdownExtensions: string[] = [\n '.markdown',\n '.mdown',\n '.mkdn',\n '.md',\n '.mkd',\n '.mdwn',\n '.mdtxt',\n '.mdtext',\n '.text',\n '.txt',\n '.Rmd'\n ];\n\n export\n class MarkdownCodeBlock {\n startLine: number;\n endLine: number;\n code: string;\n constructor(startLine: number) {\n this.startLine = startLine;\n this.code = \"\";\n this.endLine = -1;\n }\n }\n\n /**\n * Check whether the given file extension is a markdown extension\n * @param extension - A file extension\n *\n * @returns true/false depending on whether this is a supported markdown extension\n */\n export\n function isMarkdown(extension: string): boolean {\n return markdownExtensions.indexOf(extension) > -1\n }\n\n /**\n * Construct all code snippets from current text\n * (this could be potentially optimized if we can cache and detect differences)\n * @param text - A string to parse codeblocks from\n *\n * @returns An array of MarkdownCodeBlocks.\n */\n export\n function findMarkdownCodeBlocks(text: string): MarkdownCodeBlock[] {\n if (!text || text == '') {\n return [];\n }\n\n const lines = text.split(\"\\n\");\n const codeSnippets: MarkdownCodeBlock[] = [];\n var currentCode = null;\n for (var lineIndex = 0; lineIndex < lines.length; lineIndex++) {\n const line = lines[lineIndex];\n const marker = findNextMarker(line);\n const lineContainsMarker = marker != '';\n const constructingSnippet = currentCode != null;\n //skip this line if it is not part of any code snippet and doesn't contain a marker\n if (!lineContainsMarker && !constructingSnippet) {\n continue;\n }\n\n //check if we are already constructing a code snippet\n if (!constructingSnippet) {\n //start constructing\n currentCode = new MarkdownCodeBlock(lineIndex);\n\n //check whether this is a single line code snippet\n const firstIndex = line.indexOf(marker);\n const lastIndex = line.lastIndexOf(marker);\n const isSingleLine = firstIndex != lastIndex\n if (isSingleLine) {\n currentCode.code = line.substring(firstIndex + marker.length, lastIndex);\n currentCode.endLine = lineIndex;\n codeSnippets.push(currentCode);\n currentCode = null;\n } else {\n currentCode.code = line.substring(firstIndex + marker.length);\n }\n } else {\n //already constructing\n if (lineContainsMarker) {\n currentCode.code += \"\\n\" + line.substring(0, line.indexOf(marker));\n currentCode.endLine = lineIndex;\n codeSnippets.push(currentCode);\n currentCode = null;\n } else {\n currentCode.code += \"\\n\" + line;\n }\n }\n }\n return codeSnippets;\n }\n\n\n function findNextMarker(text: string) {\n for (let marker of markdownMarkers) {\n const index = text.indexOf(marker);\n if (index > -1) {\n return marker;\n }\n }\n return '';\n }\n}\n" }, { "alpha_fraction": 0.6230324506759644, "alphanum_fraction": 0.62464439868927, "avg_line_length": 24.69062042236328, "blob_id": "a4b1261cb575025e0b81e5a11f332c7f7fe0694b", "content_id": "246ecd901f84134ca0572edcf337f438143f256b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 21092, "license_type": "permissive", "max_line_length": 119, "num_lines": 821, "path": "/packages/chatbox/src/chatbox.ts", "repo_name": "bluetyson/jupyterlab", "src_encoding": "UTF-8", "text": "// Copyright (c) Jupyter Development Team.\n// Distributed under the terms of the Modified BSD License.\n\nimport {\n each, ArrayExt\n} from '@phosphor/algorithm';\n\nimport {\n MimeData\n} from '@phosphor/coreutils';\n\nimport {\n Signal\n} from '@phosphor/signaling';\n\nimport {\n DisposableSet\n} from '@phosphor/disposable';\n\nimport {\n Message\n} from '@phosphor/messaging';\n\nimport {\n Panel, PanelLayout, Widget\n} from '@phosphor/widgets';\n\nimport {\n Drag\n} from '@phosphor/dragdrop';\n\nimport {\n DocumentRegistry\n} from '@jupyterlab/docregistry';\n\nimport {\n CodeEditor\n} from '@jupyterlab/codeeditor';\n\nimport {\n Cell,\n MarkdownCellModel, MarkdownCell\n} from '@jupyterlab/cells';\n\nimport {\n IObservableList, ActivityMonitor\n} from '@jupyterlab/coreutils';\n\nimport {\n IRenderMime\n} from '@jupyterlab/rendermime';\n\nimport {\n ChatEntry, CHAT_ENTRY_CLASS\n} from './entry';\n\n\n/**\n * The class name added to chatbox widgets.\n */\nconst CHATBOX_CLASS = 'jp-Chatbox';\n\n/**\n * The class name of the active prompt\n */\nconst PROMPT_CLASS = 'jp-Chatbox-prompt';\n\n/**\n * The class name of the panel that holds cell content.\n */\nconst CONTENT_CLASS = 'jp-Chatbox-content';\n\n/**\n * The class name of the panel that holds prompts.\n */\nconst INPUT_CLASS = 'jp-Chatbox-input';\n\n/**\n * The class name added to drag images.\n */\nconst DRAG_IMAGE_CLASS = 'jp-dragImage';\n\n/**\n * The class name added to a filled circle.\n */\nconst FILLED_CIRCLE_CLASS = 'jp-filledCircle';\n\n/**\n * The mimetype used for Jupyter cell data.\n */\nconst JUPYTER_CELL_MIME: string = 'application/vnd.jupyter.cells';\n\n/**\n * The threshold in pixels to start a drag event.\n */\nconst DRAG_THRESHOLD = 5;\n\n/**\n * The number of new entries to render upon loading a\n * new page of the chatlog.\n */\nconst PAGE_LENGTH = 20;\n\n/**\n * The scroll position at which to request a new page.\n */\nconst NEW_PAGE_POSITION = 300;\n\n/**\n * Throttle for scrolling for a new page.\n */\nconst SCROLL_THROTTLE = 1000;\n\n\n/**\n * A widget containing a Jupyter chatbox.\n *\n * #### Notes\n * The Chatbox class is intended to be used within a ChatboxPanel\n * instance. Under most circumstances, it is not instantiated by user code.\n */\nexport\nclass Chatbox extends Widget {\n /**\n * Construct a chatbox widget.\n */\n constructor(options: Chatbox.IOptions) {\n super();\n this.addClass(CHATBOX_CLASS);\n\n // Create the panels that hold the content and input.\n let layout = this.layout = new PanelLayout();\n this._content = new Panel();\n this._input = new Panel();\n\n this.contentFactory = options.contentFactory;\n this._rendermime = options.rendermime;\n\n // Add top-level CSS classes.\n this._content.addClass(CONTENT_CLASS);\n this._input.addClass(INPUT_CLASS);\n\n // Insert the content and input panes into the widget.\n layout.addWidget(this._content);\n layout.addWidget(this._input);\n\n // Throttle the scroll paging of the widget.\n this._monitor = new ActivityMonitor({\n signal: this._scrollSignal,\n timeout: SCROLL_THROTTLE\n });\n this._monitor.activityStopped.connect(this._handleScroll, this);\n }\n\n /**\n * The content factory used by the chatbox.\n */\n readonly contentFactory: Chatbox.IContentFactory;\n\n /**\n * Whether the chatbox has been disposed.\n */\n get isDisposed(): boolean {\n return this._disposables === null;\n }\n\n /*\n * The chatbox input prompt.\n */\n get prompt(): MarkdownCell | null {\n let inputLayout = (this._input.layout as PanelLayout);\n return inputLayout.widgets[0] as MarkdownCell || null;\n }\n\n /**\n * The document model associated with the chatbox.\n */\n get model(): DocumentRegistry.IModel {\n return this._model;\n }\n set model(model: DocumentRegistry.IModel) {\n // Do nothing if it is the same model.\n if (model === this._model) {\n return;\n }\n // Clean up after the previous model.\n if (this._log) {\n this._log.changed.disconnect(this._onLogChanged, this);\n }\n this.clear();\n\n // Set the new model.\n this._model = model;\n if (!model) {\n this._log = null;\n return;\n }\n\n // Populate with the new model values.\n let modelDB = this._model.modelDB;\n modelDB.connected.then(() => {\n // Update the chatlog vector.\n modelDB.createList('internal:chat');\n this._log = modelDB.get('internal:chat') as IObservableList<ChatEntry.IModel>;\n this._log.changed.connect(this._onLogChanged, this);\n this._start = this._log.length;\n\n if (this.isVisible) {\n this._scrollGuard = true;\n this._addPage(PAGE_LENGTH);\n Private.scrollToBottom(this._content.node);\n this._scrollGuard = false;\n }\n });\n }\n\n /**\n * The log of chat entries for the current document model.\n */\n get log(): IObservableList<ChatEntry.IModel> {\n return this._log;\n }\n\n /**\n * The list of currently rendered widgets for the chatbox.\n */\n get widgets(): ReadonlyArray<Widget> {\n return this._content.widgets;\n }\n\n /**\n * Clear the chat entries.\n */\n clear(): void {\n // Dispose all the content cells.\n let entries = this._content.widgets;\n while (entries.length) {\n entries[0].dispose();\n }\n }\n\n /**\n * Dispose of the resources held by the widget.\n */\n dispose() {\n // Do nothing if already disposed.\n if (this._disposables === null) {\n return;\n }\n let disposables = this._disposables;\n this._disposables = null;\n disposables.dispose();\n this._log = null;\n Signal.clearData(this);\n\n super.dispose();\n }\n\n /**\n * Post the current text in the prompt to the chat.\n */\n post(): void {\n if (!this._model) {\n return;\n }\n let prompt = this.prompt;\n\n if (prompt.model.value.text.trim() !== '') {\n this._post();\n this._newPrompt();\n } else {\n return;\n }\n }\n\n /**\n * Insert a line break in the prompt.\n */\n insertLinebreak(): void {\n let prompt = this.prompt;\n let model = prompt.model;\n let editor = prompt.editor;\n // Insert the line break at the cursor position, and move cursor forward.\n let pos = editor.getCursorPosition();\n let offset = editor.getOffsetAt(pos);\n let text = model.value.text;\n model.value.text = text.substr(0, offset) + '\\n' + text.substr(offset);\n pos = editor.getPositionAt(offset + 1);\n editor.setCursorPosition(pos);\n }\n\n /**\n * Handle the DOM events for the widget.\n *\n * @param event - The DOM event sent to the widget.\n *\n * #### Notes\n * This method implements the DOM `EventListener` interface and is\n * called in response to events on the notebook panel's node. It should\n * not be called directly by user code.\n */\n handleEvent(event: Event): void {\n switch (event.type) {\n case 'keydown':\n this._evtKeyDown(event as KeyboardEvent);\n break;\n case 'mousedown':\n this._evtMouseDown(event as MouseEvent);\n break;\n case 'mouseup':\n this._evtMouseup(event as MouseEvent);\n break;\n case 'mousemove':\n this._evtMousemove(event as MouseEvent);\n break;\n case 'scroll':\n this._scrollSignal.emit(void 0);\n break;\n default:\n break;\n }\n }\n\n /**\n * Handle `after_show` messages for the widget.\n */\n protected onAfterShow(msg: Message): void {\n // Put entries on the screen if we have\n // not yet done that.\n if (this._log && this._start === this._log.length) {\n this._scrollGuard = true;\n // Remove any existing widgets.\n this.clear();\n // Add a page.\n this._addPage(PAGE_LENGTH);\n // Scroll to bottom.\n Private.scrollToBottom(this._content.node);\n this._scrollGuard = false;\n }\n }\n\n /**\n * Handle `after_attach` messages for the widget.\n */\n protected onAfterAttach(msg: Message): void {\n let node = this.node;\n node.addEventListener('keydown', this, true);\n node.addEventListener('mousedown', this);\n this._content.node.addEventListener('scroll', this);\n\n // Create a prompt if necessary.\n if (!this.prompt) {\n this._newPrompt();\n } else {\n this.prompt.editor.focus();\n this.update();\n }\n }\n\n /**\n * Handle `before-detach` messages for the widget.\n */\n protected onBeforeDetach(msg: Message): void {\n let node = this.node;\n node.removeEventListener('keydown', this, true);\n node.removeEventListener('mousedown', this);\n this._content.node.removeEventListener('scroll', this);\n }\n\n /**\n * Handle `'activate-request'` messages.\n */\n protected onActivateRequest(msg: Message): void {\n this.prompt.editor.focus();\n this.update();\n }\n\n /**\n * Handle `update-request` messages.\n */\n protected onUpdateRequest(msg: Message): void {\n Private.scrollToBottom(this._content.node);\n }\n\n /**\n * Make a new prompt.\n */\n private _newPrompt(): void {\n let prompt = this.prompt;\n\n // Create the new prompt.\n let factory = this.contentFactory;\n let options = this._createMarkdownCellOptions();\n prompt = factory.createCell(options);\n prompt.model.mimeType = this._mimetype;\n prompt.addClass(PROMPT_CLASS);\n prompt.rendered = false;\n this._input.addWidget(prompt);\n\n if (this.isAttached) {\n this.activate();\n }\n }\n\n /**\n * Add another page of entries.\n */\n private _addPage(count: number): void {\n // Add `count` widgets to the panel.\n let index = this._start - 1;\n let numAdded = 0;\n while (index >= 0 && numAdded < count) {\n let entryWidget = this._entryWidgetFromModel(this._log.get(index--));\n this._content.insertWidget(0, entryWidget);\n numAdded++;\n }\n this._start = index + 1;\n }\n\n\n /**\n * Handle a `'scroll'` event for the content panel.\n */\n private _handleScroll(): void {\n // If we are adding entries right now,\n // ignore any scroll event.\n if (this._scrollGuard) {\n return;\n }\n // Only page if we hit the top.\n if (this._content.node.scrollTop <= NEW_PAGE_POSITION && this._start > 0) {\n let startingHeight = this._content.node.scrollHeight;\n let startingPosition = this._content.node.scrollTop;\n this._addPage(PAGE_LENGTH);\n // Attempt to place the scroll position at\n // same entry where we started.\n this._content.node.scrollTop =\n this._content.node.scrollHeight - startingHeight + startingPosition;\n }\n }\n\n /**\n * Handle the `'keydown'` event for the widget.\n */\n private _evtKeyDown(event: KeyboardEvent): void {\n let editor = this.prompt.editor;\n if (event.keyCode === 13 && !editor.hasFocus()) {\n event.preventDefault();\n editor.focus();\n }\n }\n\n /**\n * Find the chat entry containing the target html element.\n *\n * #### Notes\n * Returns -1 if the entry is not found.\n */\n private _findEntry(node: HTMLElement): number {\n // Trace up the DOM hierarchy to find the root cell node.\n // Then find the corresponding child and select it.\n while (node && node !== this.node) {\n if (node.classList.contains(CHAT_ENTRY_CLASS)) {\n let i = ArrayExt.findFirstIndex(this._content.widgets, widget => widget.node === node);\n if (i !== -1) {\n return i;\n }\n break;\n }\n node = node.parentElement;\n }\n return -1;\n }\n\n /**\n * Handle `mousedown` events for the widget.\n */\n private _evtMouseDown(event: MouseEvent): void {\n let target = event.target as HTMLElement;\n let i = this._findEntry(target);\n\n // Left mouse press for drag start.\n if (event.button === 0 && i !== -1) {\n this._dragData = { pressX: event.clientX, pressY: event.clientY, index: i};\n document.addEventListener('mouseup', this, true);\n document.addEventListener('mousemove', this, true);\n event.preventDefault();\n }\n }\n\n /**\n * Handle the `'mouseup'` event for the widget.\n */\n private _evtMouseup(event: MouseEvent): void {\n if (event.button !== 0 || !this._drag) {\n document.removeEventListener('mousemove', this, true);\n document.removeEventListener('mouseup', this, true);\n return;\n }\n event.preventDefault();\n event.stopPropagation();\n }\n\n /**\n * Handle the `'mousemove'` event for the widget.\n */\n private _evtMousemove(event: MouseEvent): void {\n event.preventDefault();\n event.stopPropagation();\n\n // Bail if we are the one dragging.\n if (this._drag) {\n return;\n }\n\n // Check for a drag initialization.\n let data = this._dragData;\n let dx = Math.abs(event.clientX - data.pressX);\n let dy = Math.abs(event.clientY - data.pressY);\n if (dx < DRAG_THRESHOLD && dy < DRAG_THRESHOLD) {\n return;\n }\n\n this._startDrag(data.index, event.clientX, event.clientY);\n }\n\n /**\n * Start a drag event.\n */\n private _startDrag(index: number, clientX: number, clientY: number): void {\n let toCopy = this._content.widgets[index] as ChatEntry;\n let data = [toCopy.cell.model.toJSON()];\n\n // Create the drag image.\n let dragImage = Private.createDragImage();\n\n // Set up the drag event.\n this._drag = new Drag({\n mimeData: new MimeData(),\n supportedActions: 'copy',\n proposedAction: 'copy',\n dragImage,\n source: this\n });\n this._drag.mimeData.setData(JUPYTER_CELL_MIME, data);\n\n // Remove mousemove and mouseup listeners and start the drag.\n document.removeEventListener('mousemove', this, true);\n document.removeEventListener('mouseup', this, true);\n this._drag.start(clientX, clientY).then(action => {\n if (this.isDisposed) {\n return;\n }\n this._drag = null;\n });\n }\n\n /**\n * Update the chat view after a change in the log vector.\n */\n private _onLogChanged(log: IObservableList<ChatEntry.IModel>, args: IObservableList.IChangedArgs<ChatEntry.IModel>) {\n let index = 0;\n let layout = this._content.layout as PanelLayout;\n switch (args.type) {\n case 'add':\n index = args.newIndex;\n if (index < this._start) {\n // If it is inserted before the view,\n // just update the `_start` index.\n this._start += args.newValues.length;\n } else {\n // Otherwise insert the widgets into the view.\n each(args.newValues, entry => {\n let entryWidget = this._entryWidgetFromModel(entry);\n layout.insertWidget(index++, entryWidget);\n });\n }\n break;\n case 'remove':\n index = args.oldIndex;\n if (index < this._start) {\n // If the removal is before the view,\n // just update the `_start` index.\n this._start -= args.oldValues.length;\n } else {\n // Otherwise remove the widgets from the view.\n each(args.oldValues, entry => {\n let widget = layout.widgets[args.oldIndex];\n widget.parent = null;\n widget.dispose();\n });\n }\n break;\n case 'move':\n if (args.newIndex >= this._start && args.oldIndex >= this._start) {\n // If both are in the view, it is a straightforward move.\n let fromIndex = args.oldIndex - this._start;\n let toIndex = args.newIndex - this._start;\n layout.insertWidget(toIndex, layout.widgets[fromIndex]);\n } else if (args.newIndex >= this._start) {\n // If it is moving into the view, create the widget and\n // update the `_start` index.\n let entry = args.oldValues[0];\n let entryWidget = this._entryWidgetFromModel(entry);\n layout.insertWidget(args.newIndex - this._start, entryWidget);\n this._start--;\n } else if (args.oldIndex >= this._start) {\n // If it is moving out of the view, remove the widget\n // and update the `_start index.`\n let widget = layout.widgets[args.oldIndex - this._start];\n widget.parent = null;\n this._start++;\n }\n // If both are before `_start`, this is a no-op.\n break;\n case 'set':\n index = args.newIndex;\n if (index >= this._start) {\n // Only need to update the widgets if they are in the view.\n each(args.newValues, entry => {\n let entryWidget = this._entryWidgetFromModel(entry);\n layout.insertWidget(index, entryWidget);\n let toRemove = layout.widgets[index+1];\n toRemove.parent = null;\n index++;\n });\n }\n break;\n }\n this.update();\n }\n\n /**\n * Post the text current prompt.\n */\n private _post(): void {\n // Dispose of the current input widget.\n let prompt = this.prompt;\n (this._input.layout as PanelLayout).widgets[0].parent = null;\n\n // Add the chat entry to the log.\n let collaborators = this._model.modelDB.collaborators;\n if (!collaborators) {\n throw Error('Cannot post chat entry to non-collaborative document.');\n }\n this._log.push({\n text: prompt.model.value.text,\n author: collaborators.localCollaborator\n });\n prompt.dispose();\n }\n\n\n /**\n * Given a chat entry model, create a new entry widget.\n */\n private _entryWidgetFromModel(entry: ChatEntry.IModel): ChatEntry {\n let options = this._createMarkdownCellOptions(entry.text);\n let cellWidget = this.contentFactory.createCell(options);\n this._disposables.add(cellWidget);\n cellWidget.readOnly = true;\n cellWidget.rendered = true;\n let entryWidget = new ChatEntry({\n model: entry,\n cell: cellWidget,\n isMe: entry.author.userId ===\n this._model.modelDB.collaborators.localCollaborator.userId\n });\n return entryWidget;\n }\n\n /**\n * Create the options used to initialize markdown cell widget.\n */\n private _createMarkdownCellOptions(text: string = ''): MarkdownCell.IOptions {\n let contentFactory = this.contentFactory.markdownCellContentFactory;\n let model = new MarkdownCellModel({ });\n this._disposables.add(model);\n let rendermime = this._rendermime;\n model.value.text = text || '';\n return { model, rendermime, contentFactory };\n }\n\n private _rendermime: IRenderMime = null;\n private _content: Panel = null;\n private _log: IObservableList<ChatEntry.IModel> = null;\n private _start: number = null;\n private _scrollGuard: boolean = true;\n private _monitor: ActivityMonitor<any, any> = null;\n private _scrollSignal = new Signal<this, void>(this);\n private _input: Panel = null;\n private _mimetype = 'text/x-ipythongfm';\n private _model: DocumentRegistry.IModel = null;\n private _disposables = new DisposableSet();\n private _drag: Drag = null;\n private _dragData: { pressX: number, pressY: number, index: number } = null;\n}\n\n\n/**\n * A namespace for Chatbox statics.\n */\nexport\nnamespace Chatbox {\n /**\n * The initialization options for a chatbox widget.\n */\n export\n interface IOptions {\n /**\n * The content factory for the chatbox widget.\n */\n contentFactory: IContentFactory;\n\n /**\n * The mime renderer for the chatbox widget.\n */\n rendermime: IRenderMime;\n }\n\n /**\n * A content factory for chatbox children.\n */\n export\n interface IContentFactory {\n /**\n * The editor factory.\n */\n readonly editorFactory: CodeEditor.Factory;\n\n /**\n * The factory for a markdown cell widget.\n */\n readonly markdownCellContentFactory: Cell.IContentFactory;\n\n /**\n * Create a new cell widget.\n */\n createCell(options: MarkdownCell.IOptions): MarkdownCell;\n\n }\n\n /**\n * Default implementation of `IContentFactory`.\n */\n export\n class ContentFactory implements IContentFactory {\n /**\n * Create a new content factory.\n */\n constructor(options: IContentFactoryOptions) {\n this.editorFactory = options.editorFactory;\n\n this.markdownCellContentFactory = new MarkdownCell.ContentFactory({\n editorFactory: this.editorFactory,\n });\n }\n\n /**\n * The editor factory.\n */\n readonly editorFactory: CodeEditor.Factory;\n\n /**\n * The factory for a markdown cell widget.\n */\n readonly markdownCellContentFactory: Cell.IContentFactory;\n\n /**\n * Create a new prompt widget.\n */\n createCell(options: MarkdownCell.IOptions): MarkdownCell {\n return new MarkdownCell(options);\n }\n }\n /**\n * An initialize options for `ContentFactory`.\n */\n export\n interface IContentFactoryOptions {\n /**\n * The editor factory.\n */\n editorFactory: CodeEditor.Factory;\n }\n}\n\n\n/**\n * A namespace for chatbox widget private data.\n */\nnamespace Private {\n /**\n * Jump to the bottom of a node.\n *\n * @param node - The scrollable element.\n */\n export\n function scrollToBottom(node: HTMLElement): void {\n node.scrollTop = node.scrollHeight - node.clientHeight;\n }\n}\n\n/**\n * A namespace for private data.\n */\nnamespace Private {\n /**\n * Create a chat entry drag image.\n */\n export\n function createDragImage(): HTMLElement {\n let node = document.createElement('div');\n let span = document.createElement('span');\n span.textContent = '1';\n span.className = FILLED_CIRCLE_CLASS;\n node.appendChild(span);\n node.className = DRAG_IMAGE_CLASS;\n return node;\n }\n}\n" }, { "alpha_fraction": 0.5874519944190979, "alphanum_fraction": 0.5908109545707703, "avg_line_length": 25.977346420288086, "blob_id": "396b2bdecad60b19b0da1a5da92a85a868283cb2", "content_id": "78db6ef10f818534cb14528d7eb741327605a236", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 8336, "license_type": "permissive", "max_line_length": 77, "num_lines": 309, "path": "/test/src/chatbox/chatbox.spec.ts", "repo_name": "bluetyson/jupyterlab", "src_encoding": "UTF-8", "text": "// Copyright (c) Jupyter Development Team.\n// Distributed under the terms of the Modified BSD License.\n\nimport expect = require('expect.js');\n\nimport {\n JSONExt\n} from '@phosphor/coreutils';\n\nimport {\n Message\n} from '@phosphor/messaging';\n\nimport {\n Widget\n} from '@phosphor/widgets';\n\nimport {\n Chatbox, ChatEntry \n} from '@jupyterlab/chatbox';\n\nimport {\n editorServices\n} from '@jupyterlab/codemirror';\n\nimport {\n MarkdownCell, MarkdownCellModel\n} from '@jupyterlab/cells';\n\nimport {\n DocumentModel\n} from '@jupyterlab/docregistry';\n\nimport {\n ModelDB, ObservableMap, ObservableList, ICollaborator\n} from '@jupyterlab/coreutils';\n\nimport {\n defaultRenderMime\n} from '../utils';\n\n\n/**\n * Factory stuff.\n */\nconst editorFactory = editorServices.factoryService.newInlineEditor.bind(\n editorServices.factoryService);\nconst contentFactory = new Chatbox.ContentFactory({ editorFactory });\nconst rendermime = defaultRenderMime();\n\n/**\n * Create a dummy collaborator map.\n */\nclass DummyCollaboratorMap extends ObservableMap<ICollaborator> {\n type: 'Map';\n\n readonly localCollaborator: ICollaborator = {\n userId: '1234',\n sessionId: '5678',\n displayName: 'A. U. Thor',\n color: '#00FF33',\n shortName: 'AU'\n }\n}\n\n/**\n * Create a dummy collaborative ModelDB.\n */\nclass DummyCollaborativeDB extends ModelDB {\n readonly isCollaborative: boolean = true;\n\n readonly collaborators = new DummyCollaboratorMap();\n} \n\n\ndescribe('chatbox/chatbox', () => {\n\n describe('Chatbox', () => {\n\n let chatbox: Chatbox;\n let docmodel: DocumentModel;\n let modelDB: DummyCollaborativeDB;\n\n beforeEach(() => {\n chatbox = new Chatbox({\n rendermime, contentFactory\n });\n modelDB = new DummyCollaborativeDB();\n docmodel = new DocumentModel('', modelDB);\n chatbox.model = docmodel;\n });\n\n afterEach(() => {\n chatbox.dispose();\n docmodel.dispose();\n modelDB.dispose();\n });\n\n describe('#constructor()', () => {\n\n it('should create a new chatbox content widget', () => {\n Widget.attach(chatbox, document.body);\n expect(chatbox).to.be.a(Chatbox);\n expect(chatbox.node.classList).to.contain('jp-Chatbox');\n });\n\n });\n\n describe('#prompt', () => {\n\n it('should be a markdown cell widget', () => {\n Widget.attach(chatbox, document.body);\n expect(chatbox.prompt).to.be.a(MarkdownCell);\n });\n\n it('should be replaced after posting', () => {\n Widget.attach(chatbox, document.body);\n let old = chatbox.prompt;\n expect(old).to.be.a(MarkdownCell);\n old.model.value.text = 'An entry';\n chatbox.post();\n expect(chatbox.prompt).to.be.a(MarkdownCell);\n expect(chatbox.prompt).to.not.be(old);\n\n });\n\n });\n\n describe('#contentFactory', () => {\n\n it('should be the content factory used by the widget', () => {\n expect(chatbox.contentFactory).to.be.a(Chatbox.ContentFactory);\n });\n\n });\n\n describe('#log', () => {\n\n it('should get the log of chat entries', () => {\n expect(chatbox.log).to.be.a(ObservableList);\n });\n\n });\n\n describe('#widgets', () => {\n\n it('should get the array of rendered chat widgets', () => {\n Widget.attach(chatbox, document.body);\n chatbox.prompt.model.value.text = 'An entry';\n chatbox.post();\n expect(chatbox.widgets[0]).to.be.a(Widget);\n });\n\n });\n\n describe('#model', () => {\n\n it('should get the current model of the chatbox', () => {\n Widget.attach(chatbox, document.body);\n expect(chatbox.model).to.be(docmodel);\n });\n\n it('should set the current model of the chatbox', () => {\n Widget.attach(chatbox, document.body);\n let newModel = new DocumentModel('', modelDB);\n chatbox.model = newModel;\n expect(chatbox.model).to.be(newModel);\n });\n\n it('should clear the chatbox if given an invalid model', () => {\n Widget.attach(chatbox, document.body);\n chatbox.model = null;\n expect(chatbox.model).to.be(null);\n expect(chatbox.log).to.be(null);\n expect(chatbox.widgets.length).to.be(0);\n });\n\n it('should be able to recall chat logs of other models', () => {\n Widget.attach(chatbox, document.body);\n let newModelDB = new DummyCollaborativeDB();\n let newModel = new DocumentModel('', newModelDB);\n chatbox.prompt.model.value.text = 'A: 1';\n chatbox.post();\n chatbox.prompt.model.value.text = 'A: 2';\n chatbox.post();\n chatbox.prompt.model.value.text = 'A: 3';\n chatbox.post();\n chatbox.model = newModel;\n chatbox.prompt.model.value.text = 'B: 1';\n chatbox.post();\n chatbox.prompt.model.value.text = 'B: 2';\n chatbox.post();\n chatbox.model = docmodel;\n expect(chatbox.log.length).to.be(3);\n expect(chatbox.log.back.text).to.be('A: 3');\n chatbox.model = newModel;\n expect(chatbox.log.length).to.be(2);\n expect(chatbox.log.back.text).to.be('B: 2');\n });\n\n });\n\n describe('#post()', () => {\n\n it('should add a new entry to the log', () => {\n Widget.attach(chatbox, document.body);\n chatbox.prompt.model.value.text = 'An entry';\n chatbox.post();\n let entry = chatbox.log.back;\n expect(entry.text).to.be('An entry');\n expect(JSONExt.deepEqual(entry.author,\n modelDB.collaborators.localCollaborator)).to.be(true);\n });\n\n it('should add a new entry widget to the panel', () => {\n Widget.attach(chatbox, document.body);\n chatbox.prompt.model.value.text = 'An entry';\n chatbox.post();\n let widget = chatbox.widgets[chatbox.widgets.length-1] as ChatEntry;\n expect(widget.model.text).to.be('An entry');\n expect(JSONExt.deepEqual(widget.model.author,\n modelDB.collaborators.localCollaborator)).to.be(true);\n });\n\n it('should not add an entry if the prompt has only whitespace', () => {\n Widget.attach(chatbox, document.body);\n chatbox.prompt.model.value.text = ' \\n ';\n chatbox.post();\n expect(chatbox.log.length).to.be(0);\n expect(chatbox.widgets.length).to.be(0);\n });\n\n });\n\n describe('#insertLineBreak()', () => {\n\n it('should insert a line break into the prompt', () => {\n Widget.attach(chatbox, document.body);\n\n let model = chatbox.prompt.model;\n expect(model.value.text).to.be.empty();\n chatbox.insertLinebreak();\n expect(model.value.text).to.be('\\n');\n });\n\n });\n\n describe('#clear()', () => {\n\n it('should clear all of the content cells', () => {\n Widget.attach(chatbox, document.body);\n chatbox.prompt.model.value.text = 'An entry';\n chatbox.post();\n expect(chatbox.widgets.length).to.be.greaterThan(0);\n chatbox.clear();\n expect(chatbox.widgets.length).to.be(0);\n expect(chatbox.prompt.model.value.text).to.be('');\n });\n\n });\n\n describe('#dispose()', () => {\n\n it('should dispose the content widget', () => {\n Widget.attach(chatbox, document.body);\n expect(chatbox.isDisposed).to.be(false);\n chatbox.dispose();\n expect(chatbox.isDisposed).to.be(true);\n });\n\n it('should be safe to dispose multiple times', () => {\n Widget.attach(chatbox, document.body);\n expect(chatbox.isDisposed).to.be(false);\n chatbox.dispose();\n chatbox.dispose();\n expect(chatbox.isDisposed).to.be(true);\n });\n\n });\n\n describe('#onActivateRequest()', () => {\n\n it('should focus the prompt editor', done => {\n expect(chatbox.prompt).to.not.be.ok();\n Widget.attach(chatbox, document.body);\n requestAnimationFrame(() => {\n chatbox.activate();\n requestAnimationFrame(() => {\n expect(chatbox.prompt.editor.hasFocus()).to.be(true);\n done();\n });\n });\n });\n\n });\n\n describe('#onAfterAttach()', () => {\n\n it('should be called after attach, creating a prompt', () => {\n expect(chatbox.prompt).to.not.be.ok();\n Widget.attach(chatbox, document.body);\n expect(chatbox.prompt).to.be.ok();\n });\n\n });\n\n });\n\n});\n" }, { "alpha_fraction": 0.6667770147323608, "alphanum_fraction": 0.6671081185340881, "avg_line_length": 21.969581604003906, "blob_id": "0fc9a45d83360479951a758f893c22e1cf99563e", "content_id": "e92ab0d63a3d8d6af4e72e42c353d52030d2293e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 6041, "license_type": "permissive", "max_line_length": 106, "num_lines": 263, "path": "/packages/chatbox/src/panel.ts", "repo_name": "bluetyson/jupyterlab", "src_encoding": "UTF-8", "text": "// Copyright (c) Jupyter Development Team.\n// Distributed under the terms of the Modified BSD License.\n\nimport {\n Panel, Widget\n} from '@phosphor/widgets';\n\nimport {\n Message\n} from '@phosphor/messaging';\n\nimport {\n CodeEditor\n} from '@jupyterlab/codeeditor';\n\nimport {\n PathExt\n} from '@jupyterlab/coreutils';\n\nimport {\n IRenderMime\n} from '@jupyterlab/rendermime';\n\nimport {\n DocumentRegistry\n} from '@jupyterlab/docregistry';\n\nimport {\n Chatbox\n} from './chatbox';\n\n\n/**\n * The class name added to chatbox panels.\n */\nconst PANEL_CLASS = 'jp-ChatboxPanel';\n\n/**\n * The class name added to the document info widget.\n */\nconst DOCUMENT_INFO_CLASS = 'jp-ChatboxDocumentInfo';\n\n/**\n * The class name added to a button icon node.\n */\nconst ICON_CLASS = 'jp-FileButtons-buttonIcon';\n\n/**\n * The class name added to a material icon button.\n */\nconst MATERIAL_CLASS = 'jp-MaterialIcon';\n\n/**\n * The class name added to the add button.\n */\nconst CHAT_ICON = 'jp-ChatIcon';\n\n\n/**\n * A panel which contains a chatbox and the ability to add other children.\n */\nexport\nclass ChatboxPanel extends Panel {\n /**\n * Construct a chatbox panel.\n */\n constructor(options: ChatboxPanel.IOptions) {\n super();\n this.addClass(PANEL_CLASS);\n let factory = options.contentFactory;\n let rendermime = options.rendermime;\n let contentFactory = factory.chatboxContentFactory;\n\n this._documentInfo = new ChatboxDocumentInfo();\n this.addWidget(this._documentInfo);\n\n this.chatbox = new Chatbox({\n rendermime, contentFactory\n });\n this.addWidget(this.chatbox);\n this.id = 'chatbox';\n }\n\n /**\n * The chatbox widget used by the panel.\n */\n readonly chatbox: Chatbox;\n\n /**\n * The current document context for the chat.\n */\n get context(): DocumentRegistry.IContext<DocumentRegistry.IModel> {\n return this._context;\n }\n set context(value: DocumentRegistry.IContext<DocumentRegistry.IModel>) {\n if (this._context === value) {\n return;\n }\n this._context = value;\n this.chatbox.model = value.model;\n this._documentInfo.context = value;\n }\n\n /**\n * Dispose of the resources held by the widget.\n */\n dispose(): void {\n this.chatbox.dispose();\n this._documentInfo.dispose();\n super.dispose();\n }\n\n /**\n * Handle `'activate-request'` messages.\n */\n protected onActivateRequest(msg: Message): void {\n this.chatbox.prompt.editor.focus();\n }\n\n /**\n * Handle `'close-request'` messages.\n */\n protected onCloseRequest(msg: Message): void {\n super.onCloseRequest(msg);\n this.dispose();\n }\n\n private _documentInfo: ChatboxDocumentInfo;\n private _context: DocumentRegistry.IContext<DocumentRegistry.IModel> = null;\n}\n\n/**\n * A class representing a widget displaying document information\n * for the chatbox.\n */\nexport\nclass ChatboxDocumentInfo extends Widget {\n constructor() {\n super();\n this.addClass(DOCUMENT_INFO_CLASS);\n let chatIcon = document.createElement('div');\n chatIcon.className = ICON_CLASS + ' ' + MATERIAL_CLASS + ' ' + CHAT_ICON;\n let fileName = document.createElement('div');\n fileName.className = 'jp-ChatboxDocumentInfo-name';\n this.node.appendChild(chatIcon);\n this.node.appendChild(fileName);\n }\n\n /**\n * The current document context for the chat.\n */\n get context(): DocumentRegistry.IContext<DocumentRegistry.IModel> {\n return this._context;\n }\n set context(value: DocumentRegistry.IContext<DocumentRegistry.IModel>) {\n if (this._context) {\n this._context.pathChanged.disconnect(this._onPathChanged, this);\n }\n this._context = value;\n this._context.pathChanged.connect(this._onPathChanged, this);\n this.node.children[1].textContent = PathExt.basename(value.path);\n }\n\n /**\n * Handle a file moving/renaming.\n */\n private _onPathChanged(sender: DocumentRegistry.IContext<DocumentRegistry.IModel>, path: string): void {\n this.node.children[1].textContent = PathExt.basename(path);\n }\n\n private _context: DocumentRegistry.IContext<DocumentRegistry.IModel> = null;\n}\n\n\n/**\n * A namespace for ChatboxPanel statics.\n */\nexport\nnamespace ChatboxPanel {\n /**\n * The initialization options for a chatbox panel.\n */\n export\n interface IOptions {\n /**\n * The rendermime instance used by the panel.\n */\n rendermime: IRenderMime;\n\n /**\n * The content factory for the panel.\n */\n contentFactory: IContentFactory;\n }\n\n /**\n * The chatbox panel renderer.\n */\n export\n interface IContentFactory {\n /**\n * The editor factory used by the content factory.\n */\n readonly editorFactory: CodeEditor.Factory;\n\n /**\n * The factory for chatbox content.\n */\n readonly chatboxContentFactory: Chatbox.IContentFactory;\n }\n\n /**\n * Default implementation of `IContentFactory`.\n */\n export\n class ContentFactory implements IContentFactory {\n /**\n * Create a new content factory.\n */\n constructor(options: ContentFactory.IOptions) {\n this.editorFactory = options.editorFactory;\n this.chatboxContentFactory = (options.chatboxContentFactory ||\n new Chatbox.ContentFactory({\n editorFactory: this.editorFactory\n })\n );\n }\n\n /**\n * The editor factory used by the content factory.\n */\n readonly editorFactory: CodeEditor.Factory;\n\n /**\n * The factory for code chatbox content.\n */\n readonly chatboxContentFactory: Chatbox.IContentFactory;\n }\n\n /**\n * The namespace for `ContentFactory`.\n */\n export\n namespace ContentFactory {\n /**\n * An initialization options for a chatbox panel factory.\n */\n export\n interface IOptions {\n /**\n * The editor factory. This will be used to create a\n * chatboxContentFactory if none is given.\n */\n editorFactory: CodeEditor.Factory;\n\n /**\n * The factory for chatbox widget content. If given, this will\n * take precedence over the output area and cell factories.\n */\n chatboxContentFactory?: Chatbox.IContentFactory;\n }\n }\n}\n" } ]
10
zhixingheyi666/tsgSql
https://github.com/zhixingheyi666/tsgSql
a39431fa0430e7c0b4c71a0bd0b5d701caf07a88
acf027a1dac97237180c4ac89a3ea671b6dbc4b7
521405d3bb63087f3d2c963ec093f755c50f354e
refs/heads/master
2021-05-12T10:07:37.208486
2018-12-17T14:40:18
2018-12-17T14:40:18
117,345,609
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5571444630622864, "alphanum_fraction": 0.5832481980323792, "avg_line_length": 40.37089157104492, "blob_id": "679d7ec252edd25c9224ecb600eb2e8ba2edd924", "content_id": "5777b5358b70c61e3fe95459e25b469243605445", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11053, "license_type": "no_license", "max_line_length": 237, "num_lines": 213, "path": "/GenerLendWork/RidBidDate.py", "repo_name": "zhixingheyi666/tsgSql", "src_encoding": "UTF-8", "text": "#_*_coding:utf-8_*_\n\n__author__ = 'Master Wang'\n\n\nfrom fc.LOG_sf import logger\n\nfrom fc.conn_SQL import *\ncursor, mkconn = mkcon('mic')\n\nbidInuse = set()\n\n# --同一时间段内,为防止同一本书同时被不同的人借阅,全部读者对应的bid不应重复,已经用过的bid放入集合bidInuse\n\ndef Bid(grade):\n # bStat函数状态标志,以后每个函数都应包含名为Stat的函数状态标志\n # --选取grade年级曾经借过的书\n # ??缺少把普通的借阅记录转换到abklendwork中去的sql语句和规范,abklendwork怎样产生的?gu\n global bidInuse\n sql = 'SELECT DISTINCT bid, bcid FROM abklendwork a WHERE left(a.loperator,1) = \\'' + grade + '\\''\n cursor.execute(sql)\n rows = cursor.fetchall()\n tSet = set()\n lBid = []\n for row in rows:\n tSet.add(row[0])\n lBid.append(list(row))\n tSet = tSet - bidInuse\n bidInuse = bidInuse | tSet\n return lBid\n\n\ndef Rid(grade):\n # rStat函数状态标志,以后每个函数都应包含名为Stat的函数状态标志\n # --选取当前时间grade年级读者 \t\t下面语句的基础是部门名称第一个字符是年级 如 ‘一年级3班2022’\n sql = 'SELECT rid FROM reader r, department d WHERE left(d.depname,1) = \\'' + grade + '\\' AND r.depid = d.depid'\n cursor.execute(sql)\n rows = cursor.fetchall()\n tSet = set()\n for row in rows:\n tSet.add(row[0])\n return list(tSet)\n\n\ndef lDay(beginDate, endDate, Holiday, Workday, tmp, temp=False):\n # temp 是一个bool值,当其为真,表示生成的借阅记录仅仅为了应对学期中间的临时检查\n \"\"\"\n #先写一个生成整个学期借阅记录日期池的代码\n #本程序适应的情况:半学年半学年的生成记录\n #beginDate 日期池开始时间 endDate 日期池结束时间 采用的是'yyyy-mm-dd'格式的字符串\n #holiday 起始时间范围内的法定假期,或者其他异常导致的非工作日 workday 起始时间内因调休导致的工作日\n # 以上连个变量都是list,列表中的每项采用的是'mm-dd'格式的字符\n #ldStat函数状态标志,以后每个函数都应包含名为Stat的函数状态标志\n #--if(beginDate,endDate格式错误),ldStat=[5,'格式错误']\n #本程序是以Abklendwork中五年的日期为蓝本,生成的借阅日期,用那几年为蓝本,也可以设计成传参数控制。\n #--@ny,借阅记录生成时的年份,@hisy,将要选取的原记录的年份\n #--@begindate,需要生成记录的起始时间,一般为学期的开始, @enddate,需要生成记录的结束时间,一般为学期的结束\n #--tt--貌似declare语句声明的变量如@wk,在后面的语句如set语句和后面用到这个变量的语句中无法使用,会返回42000\n # 错误,如果我把他们放在一个execute中执行,才有效。看看是否跟begin 和end有关。\n #2016-2017上半学年\n #2016-09-01 2011-01-14\n #holiday\n #09-15 09-16 09-17 10-01 10-02 10-03 10-04 10-05 10-06 10-07 12-31 01-01 01-02\n #workday\n #09-18 10-08 10-09\n #\n #\n #日期筛选规则:\n # 先筛选bakcdate为周末或者在holiday数组中的记录,将其删除\n # 筛选lenddate为周末且不在workday中,将其日期加入数组holiday,然后为workday生成记录,然后删除holiday记录\n #workday补:\n # workday记录只从lenddate在workday之后的记录中转移(原则不超过30天)\n # workday记录优先从有记录的holiday中转移,至少要有一天的记录,\n # 如果不存在符合条件且有记录的holiday,则workday从lendate在workday之后的记录最多的三天中每天转移三分之一\n \"\"\"\n ny = beginDate[:4]\n cursor.execute('SELECT DISTINCT year(lenddate) hisy FROM Abklendwork ORDER BY hisy')\n hisys = cursor.fetchall()\n hisy = []\n # 清空临时表\n cursor.execute('TRUNCATE TABLE AttLendWork')\n for hi in hisys:\n hisy.append(hi[0])\n for hi in hisy:\n if hi < int(ny):\n # cursor.execute('set @hisy =' + str(hi))\n # cursor.execute('set @wk = datediff(wk, @hisy, @ny)*7')\n cursor.execute('select datediff(wk, ?, ?)*7', str(hi), ny)\n wk = cursor.fetchall()[0][0]\n logger.info(wk)\n \"\"\"\n # backdate < endDate 说明本程序没有考虑学期中间有临时检查的来,学期中间可以有未归还的书籍\n # 未归还的书籍backdate应当为NULL,boperator也应当为NULL\n # 上面的要求不难实现,首先更改下面语句的时间选择范围,更改 backdate < endDate 为 lenddate < endDate\n # 最后将生成的日期池中的backdate > endDate 的backdate 置为 NULL ,同时将boperator也置为NULL \n #pdb.set_trace()\n #进行下面语句之前,数据库中已经建好一个临时数据表AttLendWork\n \"\"\"\n # temp 是一个bool值,当其为真,表示生成的借阅记录仅仅为了应对学期中间的临时检查\n if not temp:\n sql = \"\"\"INSERT INTO AttLendWork(lenddate, backdate, returndate) \n SELECT lenddate + ?, backdate + ?, returndate + ? FROM abklendwork\\\n WHERE lenddate + ? > ? AND backdate + ? < ?;\\\n INSERT INTO attlendwork(lenddate, backdate, returndate) \\\n SELECT lenddate + ?-7, backdate + ?-7, returndate + ?-7 FROM abklendwork\\\n WHERE lenddate + ?-7 > ? AND backdate + ?-7 < ?;\"\"\"\n elif temp:\n sql = \"\"\"INSERT INTO AttLendWork(lenddate, backdate, returndate) \n SELECT lenddate + ?, backdate + ?, returndate + ? FROM abklendwork\\\n WHERE lenddate + ? > ? AND lenddate+ ? < ?;\\\n INSERT INTO attlendwork(lenddate, backdate, returndate) \\\n SELECT lenddate + ?-7, backdate + ?-7, returndate + ?-7 FROM abklendwork\\\n WHERE lenddate + ?-7 > ? AND lenddate + ?-7 < ?;\"\"\"\n\n cursor.execute(sql, wk, wk, wk, wk, beginDate, wk, endDate, wk, wk, wk, wk, beginDate, wk, endDate)\n # 日期筛选整理\n holiday = []\n workday = []\n # yHalf:1,下半学年,0,上半学年,_,未设置\n yHalf = '_'\n for hi in Holiday:\n # 如果开学日期为下半年的日期,跨过元旦,年份应该加一年。\n # 这里突出了日期检查的重要,‘2016-09-02’能够正常工作,其他的诸如‘2016-9-2’,‘09-02-2016’都将不能正常运行,月份格式必须为‘09-02’\n if int(beginDate[5:7]) > 7 and (hi[:2] in ['01', '02']):\n holiday.append(str(int(ny) + 1) + '-' + hi)\n yHalf = '1'\n else:\n holiday.append(str(int(ny)) + '-' + hi)\n yHalf = '0'\n for wi in Workday:\n if int(beginDate[5:7]) > 7 and (wi[:2] in ['01', '02']):\n workday.append(str(int(ny) + 1) + '-' + wi)\n else:\n workday.append(str(int(ny)) + '-' + wi)\n # 先筛选bakcdate为周末或者在holiday数组中的记录,将其删除\n sql = 'DELETE FROM AttLendWork WHERE datepart(DW, backdate) IN (1,7)'\n cursor.execute(sql)\n rowCount = cursor.rowcount\n for hi in holiday:\n sql = 'DELETE FROM AttLendWork WHERE CONVERT(VARCHAR(100), backdate, 23) = ?'\n cursor.execute(sql, hi)\n logger.info(rowCount)\n rowCount = rowCount + cursor.rowcount\n tt = 'Delete backdate Effect rows:' + str(rowCount)\n logger.info(tt)\n # 筛选lenddate为周末且不在workday中,将其日期加入数组holiday\n sql = 'SELECT DISTINCT CONVERT(VARCHAR(100), lenddate, 23) FROM AttLendWork WHERE datepart(DW, lenddate) IN (1,7)'\n cursor.execute(sql)\n for li in cursor:\n if li[0] not in workday and li[0] not in holiday:\n holiday.append(li[0])\n logger.info(holiday)\n # 为workday生成记录,然后删除holiday记录\n for wi in workday:\n t = 0\n wcnt = 0\n for hi in holiday:\n cursor.execute('select datediff(dd,?,?)', wi, hi)\n diff = cursor.fetchone()[0]\n if diff < 30 and diff > 0:\n # 执行日期替换\n sql = 'UPDATE AttLendWork SET lenddate = ? WHERE CONVERT(VARCHAR(100),lenddate,23) = ?'\n cursor.execute(sql, wi, hi)\n wcnt = wcnt + cursor.rowcount\n logger.debug('本次替换:')\n logger.debug(wcnt)\n holiday.remove(hi)\n if wcnt > 0:\n t = 1\n if wcnt > 30:\n logger.debug(\"替换超过30条,即可结束替换\")\n logger.debug(wcnt)\n break\n if t == 1:\n cursor.commit()\n if t == 0:\n try:\n logger.debug(\"holiday中不存在合格的替换日期,从普通日期中替换\")\n # sql = 'update AttLendWork set lenddate = ? where did in (select top 30 did from AttLendWork where lenddate = (select top 1 lenddate from AttLendWork where lenddate > ? group by lenddate order by count(lenddate) desc ))'\n # pdb.set_trace()\n sql = 'SELECT TOP 1 CONVERT( VARCHAR(100), lenddate, 23) FROM AttLendWork WHERE lenddate > ? GROUP BY lenddate ORDER BY count(lenddate) DESC'\n cursor.execute(sql, wi)\n tli = cursor.fetchone()[0]\n sql = 'UPDATE AttLendWork SET lenddate = ? WHERE did IN (SELECT TOP 30 did FROM AttLendWork WHERE lenddate = ?)'\n cursor.execute(sql, wi, tli)\n logger.debug('本次替换:')\n wcnt = cursor.rowcount\n logger.debug(wcnt)\n if wcnt > 0:\n cursor.commit()\n except:\n logger.warn('日期 %s 没有合适的替换日期 ' % wi)\n logger.debug('删除lenddate在holiday中的记录')\n sql = 'DELETE FROM AttLendWork WHERE CONVERT(VARCHAR(100), lenddate, 23) = ?'\n for hi in holiday:\n cursor.execute(sql, hi)\n logger.debug(cursor.rowcount)\n cursor.commit()\n if tmp:\n toperator = '王' + '_00'\n else:\n toperator = '王' + ny[2:4] + yHalf\n # pdb.set_trace()\n cursor.execute('UPDATE AttLendWork SET loperator = ?, boperator = ? WHERE 1=1', toperator, toperator)\n if temp:\n # temp 是一个bool值,当其为真,表示生成的借阅记录仅仅为了应对学期中间的临时检查\n # 将生成的日期池中的backdate > endDate 的backdate 置为 NULL ,同时将boperator也置为NULL\n cursor.execute('UPDATE AttLendWork SET backdate = NULL ,boperator = NULL WHERE backdate >= ?', endDate)\n logger.debug(cursor.rowcount)\n cursor.commit()\n logger.info(hisy)\n # return(hisy)\n # for hi in hisy:" }, { "alpha_fraction": 0.5481881499290466, "alphanum_fraction": 0.5535851716995239, "avg_line_length": 27.77777862548828, "blob_id": "d408148d4a2da02222cc463b140c7f9c57eeabc0", "content_id": "931a02b4b5e2db1554080053ef50b863cf6733bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1369, "license_type": "no_license", "max_line_length": 147, "num_lines": 45, "path": "/InOutSync/待整理/mic_to_my_bookclass.py", "repo_name": "zhixingheyi666/tsgSql", "src_encoding": "UTF-8", "text": "# --coding: utf-8--\n#已经运行过,此为存档文件,若仍需运行,请重新检查参数。并把下面两行退注释\n#import mysql.connector\n#import pyodbc\n\n#MYsql configure\nmy_config = {\n 'user':'root',\n 'password':'sa',\n 'database':'sql-learn'\n }\nbookclass_wmy = 'insert into bookclass( Bcid, Bid, State, Clerk, EnterDate, price, sk, inForm, Ddid ) values( %s, %s, %s, %s, %s, %s, %s, %s, %s )'\n\nMy_conn = mysql.connector.connect( **my_config )\nmy_cursor = My_conn.cursor()\n\n#Microsql configure\nMic_config = r'driver={SQL Server};server=localhost;uid=sa;pwd=sa;database=easybook'\n\nmic_conn = pyodbc.connect( Mic_config )\nmic_cursor = mic_conn.cursor()\nloop = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', \n 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', \n 'y', 'z']\nsql0 = 'select * from bookclass where bcid like \\''\nsql1 = '%\\''\n\nfor yy in loop:\n temp = input(\"press N to quit:\\n\")\n if temp == 'N':\n break\n sql_e = sql0 + yy + sql1\n mic_cursor.execute( sql_e )\n mic_rows = mic_cursor.fetchall()\n count_t = 0\n for row in mic_rows:\n# print(row)\n arg_wmy = []\n for row_mem in row:\n arg_wmy.append( row_mem )\n my_cursor.execute( bookclass_wmy, arg_wmy )\n count_t += 1\n My_conn.commit()\n print( count_t )\n print( '\\n' )\n\n\n" }, { "alpha_fraction": 0.5400696992874146, "alphanum_fraction": 0.5902438759803772, "avg_line_length": 30.19565200805664, "blob_id": "b5ca917f04144d52de158d5beb8a36eddcf062c8", "content_id": "b7416d197d846e4d4bb551b0b3d254b206b00bf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1736, "license_type": "no_license", "max_line_length": 139, "num_lines": 46, "path": "/GenerLendWork/Book Insertion/gener_Books.py", "repo_name": "zhixingheyi666/tsgSql", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\nimport random, pdb, datetime\nfrom itertools import zip_longest as myzip\n\nfrom fc.conn_SQL import *\nfrom fc.LOG_sf import logger\n\ncursor, mkconn = mkcon('mic')\n\n# from RidBidDate import Rid, Bid, lDay\n\ndef gener_books( num='',Bcid=\"I999/9999\", Clerk='王_00', State=0, EnterDate='', price=11, sk=2, inForm='', Ddid=None,temp=False, tempNum=0):\n # # 临时指定必要的参数,每次程序运行前,根据需要修改,并反注释\n # # ·考虑将以下参数随机化,以便增加真实性\n # # Bcid必须是Booklist表中存在的,否则会报错\n # Bcid ='I267/234'\n # #··sql数据库中类型为datetime的列,可以用时间字符串的格式插入,例如'2018-10-17 08:26:28.297'\n # EnterDate = '2018-10-17 08:26:28.297'\n # Bid = ['0'+str(i) for i in range(60000, 60000+num)]\n # #相应的总括登记号\n # inForm = 8\n # 生成插入是的数据\n books = [(Bcid, bid, State, Clerk, EnterDate, price, sk, inForm) for bid in Bid]\n\n # 如果没有指定必须的参数,程序将退出\n if enterDate == '' or inForm == '' or Ddid != None:\n num = ''\n if num == '':\n logger.warn(\"-------------------程序缺少必要的参数!!------------------\\n\")\n return\n\n # Values后面括号了的?必须用逗号分隔,不然报错\n sql = \"\"\"insert into bookclass(Bcid, Bid, State, Clerk, EnterDate, price, sk, inForm) VALUES(?, ?, ?, ?, ?, ?, ?, ?)\"\"\"\n cursor.executemany(sql, books)\n # cursor.execute(sql)\n mkconn.commit()\n # sql = \"select * from bookclass where Clerk = '王_00'\"\n # cursor.execute(sql)\n # for ci in cursor.fetchall():\n # print(ci)\n\n\nif __name__ == '__main__':\n gener_books(num=175)\n" }, { "alpha_fraction": 0.6532769799232483, "alphanum_fraction": 0.6553910970687866, "avg_line_length": 20.363636016845703, "blob_id": "a4667b25e3a2abc14544d4afbe42bbbfa8b635f1", "content_id": "c529e3a35a0f43de9f62a268e80f222ed15c29b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 473, "license_type": "no_license", "max_line_length": 84, "num_lines": 22, "path": "/GenerLendWork/connection.py", "repo_name": "zhixingheyi666/tsgSql", "src_encoding": "UTF-8", "text": "# --coding: utf-8--\n\nimport mysql.connector\nimport pyodbc\n\n#MYsql configure\nmy_config = {\n 'user':'root',\n 'password':'sa',\n 'database':'sql-learn'\n }\n\nMy_conn = mysql.connector.connect( **my_config )\nmy_cursor = My_conn.cursor()\n\n#Microsql configure\nMic_config = r'driver={SQL Server};server=localhost;uid=sa;pwd=sa;database=easybook'\n\nmic_conn = pyodbc.connect( Mic_config )\nmic_cursor = mic_conn.cursor()\n\nprint('import connection OK!!')\n\n\n\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5623652338981628, "avg_line_length": 40.17289733886719, "blob_id": "071c413799ebca509c5c2832708b00ff96d3ffff", "content_id": "e9d15ca9e3482cd96766fcea868fe58ef9860942", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21246, "license_type": "no_license", "max_line_length": 237, "num_lines": 428, "path": "/GenerLendWork/backUp/gener_LendWorkBK2.0.py", "repo_name": "zhixingheyi666/tsgSql", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n-->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n--生成2016.9至2017.2(上半学年)的借阅记录\n--每行开头有四个注释符号,如“----”的,为废弃语句\n--同一时间段内,为防止同一本书同时被不同的人借阅,全部读者对应的bid不应重复\n--对每个年级分别生成,以一年级为例\n--对于法定节假日以及其他工作日的调整,尤其异常调整,如大雪天停课,平时应当记录\n--生成日期时,用holiday数组传入\n\n--选取一年级曾经借过的书\n--select * from abklendwork a where left(a.loperator,1) = '一';\n--select distinct bid from abklendwork a where left(a.loperator,1) = '一';\n--选取当前时间一年级读者 \t\t下面语句的基础是部门名称第一个字符是年级 如 ‘一年级3班2022’\n--select rid, depname from reader r, department d where left(d.depname,1) = '一' and r.depid = d.depid; \n--select rid from reader r, department d where left(d.depname,1) = '一' and r.depid = d.depid;\n--生成临时表,读者写入4次,代表借阅4次\n--select rid into AtttRid from reader r, department d where left(d.depname,1) = '一' and r.depid = d.depid;\n--insert into AtttRid select rid from reader r, department d where left(d.depname,1) = '一' and r.depid = d.depid;\n--insert into AtttRid select rid from reader r, department d where left(d.depname,1) = '一' and r.depid = d.depid;\n--insert into AtttRid select rid from reader r, department d where left(d.depname,1) = '一' and r.depid = d.depid;\n--select * from AtttRid;\n--drop table Atttrid;\n--随机选择bid写入临时表\n--select distinct bid , newid() randUid into AtttBid from abklendwork a where left(a.loperator,1) = '一' order by newid();\n--drop table Atttbid;\n--select * from Atttbid;\n--测试合并两个表\n\n\n\n\n\n----rid和bid的笛卡尔积\n----select a.bid, r.rid from abklendwork a, reader r, department d where left(a.loperator,1) = '一' and left(d.depname,1) = '一' and r.depid = d.depid;\n----选取当前时间一年级读者 2016 + 7 - 1 = 2022 ,下面语句的基础是部门名称后四位是用阿拉伯数字表示的学生的毕业年份\n----选取某年级读者公式: 当前学年开始年份 + 7 - 在读年级 = 毕业年份\n----或者获取depname,用python查找子字符串,匹配‘一年级’\n----select rid, depname from reader r, department d where right(d.depname, 4) = right(str((2016 + 7 - 1)),4) and r.depid = d.depid;\n----select len(str((2016 + 7 - 1));--这个字符串的长度是 10 \n----select str(2022);\n\n\n-->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\n\"\"\"\n\n# from fc.conn_SQL import mkcon\n\nimport random, pdb\n\nfrom itertools import zip_longest as myzip\nfrom fc.conn_SQL import *\nfrom fc.LOG_sf import logger\n\ncursor, mkconn = mkcon('mic')\n\n\n# --同一时间段内,为防止同一本书同时被不同的人借阅,全部读者对应的bid不应重复,已经用过的bid放入集合bidInuse\n\ndef Bid(grade):\n # bStat函数状态标志,以后每个函数都应包含名为Stat的函数状态标志\n # --选取grade年级曾经借过的书\n # ??缺少把普通的借阅记录转换到abklendwork中去的sql语句和规范,abklendwork怎样产生的?gu\n global bidInuse\n sql = 'SELECT DISTINCT bid, bcid FROM abklendwork a WHERE left(a.loperator,1) = \\'' + grade + '\\''\n cursor.execute(sql)\n rows = cursor.fetchall()\n tSet = set()\n lBid = []\n for row in rows:\n tSet.add(row[0])\n lBid.append(list(row))\n tSet = tSet - bidInuse\n bidInuse = bidInuse | tSet\n return lBid\n\n\ndef Rid(grade):\n # rStat函数状态标志,以后每个函数都应包含名为Stat的函数状态标志\n # --选取当前时间grade年级读者 \t\t下面语句的基础是部门名称第一个字符是年级 如 ‘一年级3班2022’\n sql = 'SELECT rid FROM reader r, department d WHERE left(d.depname,1) = \\'' + grade + '\\' AND r.depid = d.depid'\n cursor.execute(sql)\n rows = cursor.fetchall()\n tSet = set()\n for row in rows:\n tSet.add(row[0])\n return list(tSet)\n\n\ndef lDay(beginDate, endDate, Holiday, Workday, tmp):\n \"\"\"\n #先写一个生成整个学期借阅记录日期池的代码 \n #本程序适应的情况:半学年半学年的生成记录\n #beginDate 日期池开始时间 endDate 日期池结束时间 采用的是'yyyy-mm-dd'格式的字符串\n #holiday 起始时间范围内的法定假期,或者其他异常导致的非工作日 workday 起始时间内因调休导致的工作日 \n # 以上连个变量都是list,列表中的每项采用的是'mm-dd'格式的字符\n #ldStat函数状态标志,以后每个函数都应包含名为Stat的函数状态标志\n #--if(beginDate,endDate格式错误),ldStat=[5,'格式错误']\n #本程序是以Abklendwork中五年的日期为蓝本,生成的借阅日期,用那几年为蓝本,也可以设计成传参数控制。\n #--@ny,借阅记录生成时的年份,@hisy,将要选取的原记录的年份 \n #--@begindate,需要生成记录的起始时间,一般为学期的开始, @enddate,需要生成记录的结束时间,一般为学期的结束 \n #--tt--貌似declare语句声明的变量如@wk,在后面的语句如set语句和后面用到这个变量的语句中无法使用,会返回42000\n # 错误,如果我把他们放在一个execute中执行,才有效。看看是否跟begin 和end有关。\n #2016-2017上半学年\n #2016-09-01 2011-01-14\n #holiday\n #09-15 09-16 09-17 10-01 10-02 10-03 10-04 10-05 10-06 10-07 12-31 01-01 01-02\n #workday\n #09-18 10-08 10-09\n #\n #\n #日期筛选规则:\n # 先筛选bakcdate为周末或者在holiday数组中的记录,将其删除\n # 筛选lenddate为周末且不在workday中,将其日期加入数组holiday,然后为workday生成记录,然后删除holiday记录\n #workday补:\n # workday记录只从lenddate在workday之后的记录中转移(原则不超过30天)\n # workday记录优先从有记录的holiday中转移,至少要有一天的记录,\n # 如果不存在符合条件且有记录的holiday,则workday从lendate在workday之后的记录最多的三天中每天转移三分之一\n \"\"\"\n ny = beginDate[:4]\n cursor.execute('SELECT DISTINCT year(lenddate) hisy FROM Abklendwork ORDER BY hisy')\n hisys = cursor.fetchall()\n hisy = []\n # 清空临时表\n cursor.execute('TRUNCATE TABLE AttLendWork')\n for hi in hisys:\n hisy.append(hi[0])\n for hi in hisy:\n if hi < int(ny):\n # cursor.execute('set @hisy =' + str(hi))\n # cursor.execute('set @wk = datediff(wk, @hisy, @ny)*7')\n cursor.execute('select datediff(wk, ?, ?)*7', str(hi), ny)\n wk = cursor.fetchall()[0][0]\n logger.info(wk)\n \"\"\"\n # backdate < endDate 说明本程序没有考虑学期中间有检查的来,学期中间可以有未归还的书籍\n # 未归还的书籍backdate应当为NULL,boperator也应当为NULL\n # 上面的要求不难实现,首先更改下面语句的时间选择范围,更改 backdate < endDate 为 lenddate < endDate\n # 最后将生成的日期池中的backdate > endDate 的backdate 置为 NULL ,同时将boperator也置为NULL \n #pdb.set_trace()\n #进行下面语句之前,数据库中已经建好一个临时数据表AttLendWork\n \"\"\"\n sql = \"\"\"INSERT INTO AttLendWork(lenddate, backdate, returndate) \n SELECT lenddate + ?, backdate + ?, returndate + ? FROM abklendwork\\\n WHERE lenddate + ? > ? AND backdate + ? < ?;\\\n INSERT INTO attlendwork(lenddate, backdate, returndate) \\\n SELECT lenddate + ?-7, backdate + ?-7, returndate + ?-7 FROM abklendwork\\\n WHERE lenddate + ?-7 > ? AND backdate + ?-7 < ?;\"\"\"\n cursor.execute(sql, wk, wk, wk, wk, beginDate, wk, endDate, wk, wk, wk, wk, beginDate, wk, endDate)\n # 日期筛选整理\n holiday = []\n workday = []\n # yHalf:1,下半学年,0,上半学年,_,未设置\n yHalf = '_'\n for hi in Holiday:\n # 如果开学日期为下半年的日期,跨过元旦,年份应该加一年。\n # 这里突出了日期检查的重要,‘2016-09-02’能够正常工作,其他的诸如‘2016-9-2’,‘09-02-2016’都将不能正常运行,月份格式必须为‘09-02’\n if int(beginDate[5:7]) > 7 and (hi[:2] in ['01', '02']):\n holiday.append(str(int(ny) + 1) + '-' + hi)\n yHalf = '1'\n else:\n holiday.append(str(int(ny)) + '-' + hi)\n yHalf = '0'\n for wi in Workday:\n if int(beginDate[5:7]) > 7 and (wi[:2] in ['01', '02']):\n workday.append(str(int(ny) + 1) + '-' + wi)\n else:\n workday.append(str(int(ny)) + '-' + wi)\n # 先筛选bakcdate为周末或者在holiday数组中的记录,将其删除\n sql = 'DELETE FROM AttLendWork WHERE datepart(DW, backdate) IN (1,7)'\n cursor.execute(sql)\n rowCount = cursor.rowcount\n for hi in holiday:\n sql = 'DELETE FROM AttLendWork WHERE CONVERT(VARCHAR(100), backdate, 23) = ?'\n cursor.execute(sql, hi)\n logger.info(rowCount)\n rowCount = rowCount + cursor.rowcount\n tt = 'Delete backdate Effect rows:' + str(rowCount)\n logger.info(tt)\n # 筛选lenddate为周末且不在workday中,将其日期加入数组holiday\n sql = 'SELECT DISTINCT CONVERT(VARCHAR(100), lenddate, 23) FROM AttLendWork WHERE datepart(DW, lenddate) IN (1,7)'\n cursor.execute(sql)\n for li in cursor:\n if li[0] not in workday and li[0] not in holiday:\n holiday.append(li[0])\n logger.info(holiday)\n # 为workday生成记录,然后删除holiday记录\n for wi in workday:\n t = 0\n wcnt = 0\n for hi in holiday:\n cursor.execute('select datediff(dd,?,?)', wi, hi)\n diff = cursor.fetchone()[0]\n if diff < 30 and diff > 0:\n # 执行日期替换\n sql = 'UPDATE AttLendWork SET lenddate = ? WHERE CONVERT(VARCHAR(100),lenddate,23) = ?'\n cursor.execute(sql, wi, hi)\n wcnt = wcnt + cursor.rowcount\n logger.debug('本次替换:')\n logger.debug(wcnt)\n holiday.remove(hi)\n if wcnt > 0:\n t = 1\n if wcnt > 30:\n logger.debug(\"替换超过30条,即可结束替换\")\n logger.debug(wcnt)\n break\n if t == 1:\n cursor.commit()\n if t == 0:\n try:\n logger.debug(\"holiday中不存在合格的替换日期,从普通日期中替换\")\n # sql = 'update AttLendWork set lenddate = ? where did in (select top 30 did from AttLendWork where lenddate = (select top 1 lenddate from AttLendWork where lenddate > ? group by lenddate order by count(lenddate) desc ))'\n # pdb.set_trace()\n sql = 'SELECT TOP 1 CONVERT( VARCHAR(100), lenddate, 23) FROM AttLendWork WHERE lenddate > ? GROUP BY lenddate ORDER BY count(lenddate) DESC'\n cursor.execute(sql, wi)\n tli = cursor.fetchone()[0]\n sql = 'UPDATE AttLendWork SET lenddate = ? WHERE did IN (SELECT TOP 30 did FROM AttLendWork WHERE lenddate = ?)'\n cursor.execute(sql, wi, tli)\n logger.debug('本次替换:')\n wcnt = cursor.rowcount\n logger.debug(wcnt)\n if wcnt > 0:\n cursor.commit()\n except:\n logger.warn('日期 %s 没有合适的替换日期 ' % wi)\n logger.debug('删除lenddate在holiday中的记录')\n sql = 'DELETE FROM AttLendWork WHERE CONVERT(VARCHAR(100), lenddate, 23) = ?'\n for hi in holiday:\n cursor.execute(sql, hi)\n logger.debug(cursor.rowcount)\n cursor.commit()\n if tmp:\n toperator = '王' + '_00'\n else:\n toperator = '王' + ny[2:4] + yHalf\n # pdb.set_trace()\n cursor.execute('UPDATE AttLendWork SET loperator = ?, boperator = ? WHERE 1=1', toperator, toperator)\n logger.debug(cursor.rowcount)\n cursor.commit()\n logger.info(hisy)\n # return(hisy)\n # for hi in hisy:\n\n\n# 随机匹配bid和rid,先使得bid的数目超过bid,然后打乱数组顺序,选取与bid相同数目的rid与bid组成匹配借阅条目\ndef gener(beginDate, endDate, Holiday, Workday, yearBefore, times=4, Reader='All', tmp=False):\n # gStat函数状态标志,以后每个函数都应包含名为Stat的函数状态标志\n # times: 本时间段内,平均每生的借阅次数\n # yearBefore:如果生成本学年的,值为0,如果生成上个学年的借阅记录,值为1,上上个学年,值为2,依次类推\n # 如果生成从前年份的借阅记录,比如一年以前,那么现在的六年级就是当时的五年级,对应的bid应当选五年级读者借过的\n # 而原先的六年级已经毕业,所以不用生成了\n if yearBefore > 5 or yearBefore < 0 or type(yearBefore) is not int:\n gStat = ['yearBefore:如果生成本学年的,值为0,如果生成上个学年的借阅记录,值为1,上上个学年,值为2,依次类推', 'yearBefore参数错误', False]\n logger.warn(gStat)\n pdb.set_trace()\n return gStat\n tGrade = [['一', 2.5], ['二', 3.5], ['三', 4], ['四', 4.5], ['五', 5.5], ['六', 4]]\n if yearBefore == 0:\n tGrid = tGbid = tGrade\n else:\n tGrid = tGrade[yearBefore:]\n tGbid = tGrade[: -yearBefore]\n ttRidBid = []\n trid_num = 0\n for tgr, tgb in myzip(tGrid, tGbid):\n # pdb.set_trace()\n rid = Rid(tgr[0])\n rL = len(rid)\n trid_num = trid_num + rL\n bid = Bid(tgb[0])\n bL = len(bid)\n # for tg in tGrade:\n # rid = Rid(tg[0])\n # rL = len(rid)\n # trid_num = trid_num + rL\n # bid = Bid(tg[0])\n # bL = len(bid)\n # 这里如果从前曾经借过的书越多,这个年级生成的条目占比就越多,如此循环,最后可致比例失衡,下面加入比例控制因子\n # 一年级生均2.5,二年级3.5,三4,四4.5,五5.5,六4\n # 总记录数 = 一年级人数 * 2.5 + 二年级人数 * 3.5 +三年级人数 * 4 +四年级人数 * 4.5 +五年级人数 * 5.5 +六年级人数 * 4\n if (bL <= rL):\n return False\n rid = rid * (bL // rL + 1)\n random.shuffle(rid)\n random.shuffle(bid)\n rid = rid[:bL]\n if bL > rL * tgb[1]:\n bL = round(rL * tgb[1])\n for i in range(bL):\n ti = bid[i] + [rid[i]]\n ttRidBid.append(ti)\n # pdb.set_trace()\n random.shuffle(ttRidBid)\n logger.debug('生成日期条目:')\n lDay(beginDate, endDate, Holiday, Workday, tmp)\n # 读者、日期匹配规则:\n # 先把所有lenddate出现次数少于50的记录全匹配一遍(避免日期丢失问题),然后剩余读者的按概率随机匹配\n try:\n sql = 'SELECT count(lenddate) cn,CONVERT(VARCHAR(100), lenddate, 23) lenddate INTO #t1 FROM AttLendWork GROUP BY lenddate'\n cursor.execute(sql)\n except:\n cursor.execute('TRUNCATE TABLE #t1')\n sql = 'INSERT INTO #t1 SELECT count(lenddate) cn,CONVERT(VARCHAR(100), lenddate, 23) lenddate FROM AttLendWork GROUP BY lenddate'\n cursor.execute(sql)\n sql = 'SELECT did FROM AttLendWork WHERE CONVERT(VARCHAR(100), lenddate, 23) IN (SELECT lenddate FROM #t1 WHERE cn < 50)'\n cursor.execute(sql)\n Adid = set()\n for ci in cursor.fetchall():\n Adid.add(ci[0])\n tnum = len(ttRidBid) - len(Adid)\n if tnum > 0:\n # --从剩余的日期条目随机抽取匹配读者条目,先抽取3倍需要量,再截取\n # pdb.set_trace()\n sql = 'select top %s did from AttLendWork order by checksum(newid())' % (str(tnum * 3))\n cursor.execute(sql)\n Bdid = set()\n for ci in cursor.fetchall():\n Bdid.add(ci[0])\n Cdid = list(Bdid - Adid)\n random.shuffle(Cdid)\n # 汇聚所有用于生成记录的日期条目\n Ddid = Cdid[:tnum] + list(Adid)\n else:\n Ddid = list(Adid)\n Ddid.sort()\n tLendWork = []\n for i in range(len(ttRidBid)):\n ttRidBid[i].append(Ddid[i])\n tLendWork.append(tuple(ttRidBid[i]))\n # pdb.set_trace()\n logger.info('预备有 %s 条记录写入LendWork' % len(tLendWork))\n sql = 'INSERT INTO LendWork( bid, Bcid, Rid, LendDate, ReturnDate, BackDate, loperator, boperator ) SELECT ?, ?, ?, LendDate, ReturnDate, BackDate, loperator, boperator FROM AttLendWork WHERE did = ?'\n cursor.executemany(sql, tLendWork)\n # for i in tLendWork:\n # cursor.execute(sql, i)\n # cursor.commit()\n # logger.info(cursor.rowcount)\n cursor.commit()\n # pdb.set_trace()\n logger.info('Success!')\n\n\nif __name__ == '__main__':\n o1 = 2\n o2 = 2\n if o1 > 1:\n bidInuse = set()\n beginDate = '2017-02-14'\n endDate = '2017-06-11'\n holiday = ['04-02', '04-03', '04-04', '04-29', '04-30', '05-01', '05-28', '05-29', '05-30']\n workday = ['04-01', '05-27']\n gener(beginDate, endDate, holiday, workday, yearBefore=0, tmp=1)\n if o1 > 1:\n bidInuse = set()\n beginDate = '2016-09-02'\n endDate = '2017-01-12'\n holiday = ['09-15', '09-16', '09-17', '10-01', '10-02', '10-03', '10-04', '10-05', '10-06', '10-07', '12-31',\n '01-01', '01-02']\n workday = ['09-18', '10-08', '10-09']\n gener(beginDate, endDate, holiday, workday, yearBefore=0)\n if o1 > 1:\n bidInuse = set()\n beginDate = '2016-02-25'\n endDate = '2016-07-05'\n holiday = ['04-02', '04-03', '04-04', '04-30', '05-01', '05-02', '06-09', '06-10', '06-11']\n workday = ['06-12']\n gener(beginDate, endDate, holiday, workday, yearBefore=1, tmp=0)\n if o1 > 1:\n bidInuse = set()\n beginDate = '2015-09-01'\n endDate = '2016-01-25'\n holiday = ['09-03', '09-04', '09-05', '09-26', '09-27', '10-01', '10-02', '10-03', '10-04', '10-05', '10-06',\n '10-07', '01-01', '01-02', '01-03']\n workday = ['09-06', '10-10']\n gener(beginDate, endDate, holiday, workday, yearBefore=1, tmp=0)\n if o1 > 1:\n bidInuse = set()\n beginDate = '2015-03-09'\n endDate = '2015-07-03'\n holiday = ['04-04', '04-05', '04-06', '05-01', '05-02', '05-03', '06-20', '06-21', '06-22']\n workday = []\n gener(beginDate, endDate, holiday, workday, yearBefore=2, tmp=0)\n if o1 > 1:\n bidInuse = set()\n beginDate = '2014-09-01'\n endDate = '2015-02-05'\n holiday = ['09-05', '09-06', '09-07', '09-26', '09-27', '10-01', '10-02', '10-03', '10-04', '10-05', '10-06',\n '10-07', '01-01', '01-02', '01-03']\n workday = ['09-28', '10-11', '01-04']\n gener(beginDate, endDate, holiday, workday, yearBefore=2, tmp=0)\n if o1 > 1:\n bidInuse = set()\n beginDate = '2014-02-17'\n endDate = '2014-07-03'\n holiday = ['04-05', '04-06', '04-07', '05-01', '05-02', '05-03', '05-31', '06-01', '06-02']\n workday = ['05-04']\n gener(beginDate, endDate, holiday, workday, yearBefore=3, tmp=0)\n if o1 > 1:\n bidInuse = set()\n beginDate = '2013-09-02'\n endDate = '2014-01-15'\n holiday = ['09-19', '09-20', '09-21', '10-01', '10-02', '10-03', '10-04', '10-05', '10-06', '10-07', '01-01']\n workday = ['09-22', '09-29', '10-12']\n gener(beginDate, endDate, holiday, workday, yearBefore=3, tmp=0)\n if o1 > 1:\n bidInuse = set()\n beginDate = '2013-02-18'\n endDate = '2013-07-04'\n holiday = ['04-04', '04-05', '04-29', '04-30', '05-01', '06-10', '06-11', '06-12']\n workday = ['04-07', '04-27', '04-28', '06-08', '06-09']\n gener(beginDate, endDate, holiday, workday, yearBefore=4, tmp=0)\n if o1 > 1:\n bidInuse = set()\n beginDate = '2012-09-03'\n endDate = '2013-01-15'\n holiday = ['09-30', '10-01', '10-02', '10-03', '10-04', '10-05', '10-06', '10-07', '01-01', '01-02', '01-03']\n workday = ['09-29', '10-08', '01-05', '01-06']\n gener(beginDate, endDate, holiday, workday, yearBefore=4, tmp=0)\n # 测试用句>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # 测试用句>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # 测试用句>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # 测试用句>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # hisy = lDay( beginDate, endDate, holiday, workday )\n # Lw = gener('2015')\n # rows = Bid( '一' )\n # rid = Rid( '一' )\n" }, { "alpha_fraction": 0.5727455019950867, "alphanum_fraction": 0.6008015871047974, "avg_line_length": 33.17808151245117, "blob_id": "a29f7cae21235a0d23adb21185bdc16dc51f8dc8", "content_id": "1f506ab7050a672e6771c237ad7baa81340ea4ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2851, "license_type": "no_license", "max_line_length": 127, "num_lines": 73, "path": "/InOutSync/待整理/inReader15ji.py", "repo_name": "zhixingheyi666/tsgSql", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n 本程序用于向金典图书管理程序的数据库导入读者数据\n 读者的部门需要提前在图书管理程序中建立,并记录下DepID(部门id),Rgid(读者权限id)\n password可以用randrom函数自动生成,idDisp可以用程序生成(部门内部唯一即可),这两项其实可以为NULL\n \n 读者Name和Rid则需要事先按行对应存入datasource文本文件\n \n 下面是micsql查询时用到的相关语句,去掉双短横线注释即可应用\n --select * from Reader order by Rid desc;\n --select count(*) from Reader;\n --insert into Reader \n --select rgid from Reader group by rgid;\n --select count(idDisp) as cnt, idDisp from Reader group by idDisp order by iddisp;\n --select * from Reader where rgid = 19;\n --insert into Reader ( Name, Rid, DepID, Rgid) values ( '测试2', '00031', 45, 19);\n --select *, DepID, idDisp from Reader where DepID = 52 order by Rid;\n --delete from Reader where rid = '000300';\n --select count(distinct DepID) from Reader;\n --select * from department;\n --2015级一、二、三班 DepID 49,50,51\t\n --包含idDisp列的表有--->\t;BorrowRightList\t;skList\t\t;FromList\t;jcff\t;jcpc\t;jcrk\t;Reader\n --在Read中,idDisp的规则是从本部门第一个读者开始按照1,2,3...的规律增加guestguest\n \n \n\"\"\"\n\n\nimport pdb, random, re, random\n\nfrom fc.conn_SQL import mkcon\nfrom fc.LOG_sf import logger\n\ncursor, miconn = mkcon( s = 'mic' )\n\ndef importReader( fname, DepID, Rgid, cursor = cursor, miconn = miconn ):\n logger.info('Func ----> importReader( fname, DepID, Rgid, cursor = cursor, miconn = miconn )')\n fdata = open( fname, 'r' )\n flines = fdata.readlines()\n try:\n cursor.execute( 'select idDisp from Reader where DepID = ? order by idDisp', DepID )\n tidsp = cursor.fetchall()\n except:\n #tidsp = None \n pass\n if tidsp:\n ti0 = set( range( 1, len(flines) + tidsp[-1][0] + 1 ) )\n ti1 = set()\n for tii in tidsp:\n ti1.add( tii[0] )\n idsp = list( ti0 - ti1 )\n idsp.sort\n else:\n idsp = list( range( 1, len(flines) + 1 ) )\n logger.info(idsp)\n ts = []\n cnt = 0\n for ifl in flines:\n tn = re.split('\\t|\\n',ifl)\n ttn = tn[:2] + [ DepID, Rgid , idsp[ cnt ], str(random.randint( 100000, 999999 )) ]\n ts.append( tuple(ttn) )\n cnt += 1\n logger.debug(ts)\n cursor.executemany( 'insert into Reader ( Name, Rid, DepID, Rgid, idDisp, GuestPassWord ) values ( ?, ?, ?, ?, ?, ?)', ts )\n miconn.commit()\n \nif __name__ == '__main__':\n #fname = 'd:\\python_learn\\sql\\Data\\\\15test.txt'\n # fname = None\n fname = 'd:\\python_learn\\sql\\Data\\\\17_1.txt'\n DepID = 61\n Rgid = 19\n importReader(fname,DepID,Rgid)\n" }, { "alpha_fraction": 0.5884160399436951, "alphanum_fraction": 0.6080811023712158, "avg_line_length": 36.19428634643555, "blob_id": "20a80a3eab9078c0d49bd051b579cd786e0d1417", "content_id": "a4d03e36228141cdfc0562668ca2966cca326ab0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9432, "license_type": "no_license", "max_line_length": 204, "num_lines": 175, "path": "/GenerLendWork/gener_LendWork2.0.py", "repo_name": "zhixingheyi666/tsgSql", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\nimport random, pdb\nfrom itertools import zip_longest as myzip\n\nfrom fc.conn_SQL import *\nfrom fc.LOG_sf import logger\n\ncursor, mkconn = mkcon('mic')\n\nfrom RidBidDate import Rid, Bid, lDay\n\n\"\"\"\nrid:读者id,也就是借书证号,数据库中每一个id号和一个读者一一对应\nbid:书籍id,每一个id号和唯一的一本书一一对应\nlDay: lendDay,生成借阅记录中日期部分的函数\n 内容包括借书操作员(loperator),还书操作员(boperator)\n 借出日期(lenddate),还书日期(backdate),应还日期(returndate)\n\"\"\"\n\n\ndef gener(beginDate, endDate, Holiday, Workday, yearBefore, times=5, Reader='All', tmp=False, temp=False, tempNum=0):\n \"\"\"\n :param beginDate: 要生成借阅记录的起始时间,一般为学期开始 格式为: yyyy-mm-dd\n :param endDate: 要生成的借阅记录的结束时间 格式为: yyyy-mm-dd\n :param Holiday: 在 beginDate 和 endDate 之间的法定节假日,例如十一假期,中秋节等\n 格式为: [mm-dd,mm-dd,...] 是一个list,每年不尽相同,要根据实际调整\n :param Workday: 在 beginDate 和 endDate 之间法定节假日调休而产生的工作日\n 格式为: [mm-dd,mm-dd,...] 是一个list,每年不尽相同,要根据实际调整\n :param yearBefore: 如果为0,表示生成本学年度的借阅记录.\n (一个学年度两个学期,从一个暑假到另一个暑假之间的时间\n 同一学年度内,学生的年级不会发生变化,如果进入下一学年度,学生的年级将\n 升高一级,例如从五年级升入六年级)\n 如果值为n,表示生成从当前年份向前数第n年的借阅记录.\n :param times: 典型值是一个学期4次,表示在起始和结束日期之间,平均为每个学生生成times条借阅记录.\n :param tempNum: 参数用于决定需要生成多少条借阅记录,可以在配置文件config.py中设定\n 例如:用于20171016借阅记录生成条目总数的限制,开学仅仅一个半月,限制在960条吧\n 此时可以设置tempNum = 960,那么仅仅会有960条生成的记录写入数据库\n :param Reader: 留用\n :param tmp: 测试程序时,或者学期中间检查,生成一些临时记录,将其值置为1.此时生成的借阅记录的loperator为<王_00>.\n 测试完毕,可用 \"delete from LendWork where loperator='王_00'\" 语句仅将测试数据删除.\n ·注意在sql中,字符串是用单引号包围起来的部分,如果用双引号或者反引号包围,\n 那么表示的是列名,例如同样是上面的句子,如果这样写\n delete from LendWork where loperator=\"王_00\"\n 就会报找不到列名 王_00 的错误\n :param temp: (会生成部分未归还记录)通常是按整个学期生成借阅记录,如果学期还未结束,有检查的来,需要临时生成一些借阅\n 记录,将此参数置为Ture,则可以生成一些<未归还>的书籍,让生成的结果更逼真.\n bug:有学生会在重叠的时间段内借阅两本书,以后可以在最后再加一个筛选函数解决\n :gStat: 函数状态标志,以后每个函数都应包含名为Stat的函数状态标志\n :return: None or gStat(函数状态标志,以后每个函数都应包含名为Stat的函数状态标志)\n \"\"\"\n # 参数范围校验\n if yearBefore > 5 or yearBefore < 0 or type(yearBefore) is not int:\n gStat = ['yearBefore:如果生成本学年的,值为0,如果生成上个学年的借阅记录,值为1,上上个学年,值为2,依次类推', 'yearBefore参数错误', False]\n logger.warn(gStat)\n return gStat\n \"\"\"\n tGrade: 各年级的times,平均借阅次数\n 这里如果从前曾经借过的书越多,这个年级生成的条目占比就越多,如此循环,最后可致比例失衡,下面加入比例控制因子\n 一年级生均2.5,二年级3.5,三4,四4.5,五5.5,六4\n 总记录数 = 一年级人数 * 2.5 + 二年级人数 * 3.5 +三年级人数 * 4 +四年级人数 * 4.5 +五年级人数 * 5.5 +六年级人数 * 4\n \"\"\"\n bidInuse = set()\n times = times/4\n tGrade = [['一', 2.5*times], ['二', 3.5*times], ['三', 4*times], ['四', 4.5*times], ['五', 5.5*times], ['六', 4*times]]\n if yearBefore == 0:\n tGrid = tGbid = tGrade\n else:\n \"\"\"\n 这里的巧妙写法,应该添加详细注释\n 下面的语句中,会把tGrid和tGbid对应起来,加入yearBefore的值为1\n 则tGrid的序列是二三四五六,\n tGbid的序列是一二三四五,\n rid表示的是读者目前分别就读于二三四五六年级,那么1年以前,\n 他们所在的年级应当是一二三四五年级,所以那个时间,他们应当\n 借阅的书籍应该分别是从前一二三四五年级学生借阅过的书籍,所以\n tGbid就选一二三四五,\n \"\"\"\n tGrid = tGrade[yearBefore:]\n tGbid = tGrade[: -yearBefore]\n ttRidBid = []\n trid_num = 0\n for tgR, tgB in myzip(tGrid, tGbid):\n # 选取需要生成借阅记录的读者(当前数据库内的真实读者)\n rid = Rid(tgR[0])\n rL = len(rid)\n if rL == 0:\n logger.warn('@@@未找到任何 %s年级 读者。' % tgR[0])\n continue\n trid_num = trid_num + rL\n # 选取恰当的书籍(bid),如上面选取的读者是在选定的时间期间,就读于五年级,则应选取五年级曾经借阅过的书籍\n bid = Bid(tgB[0])\n bL = len(bid)\n if (bL <= rL):\n return False\n # 让读者(rid)倍增到与bid(bL)数目相等\n rid = rid * (bL // rL + 1)\n # shuffle(rid),对rid进行随机排序(打乱顺序)\n random.shuffle(rid)\n random.shuffle(bid)\n rid = rid[:bL]\n # 如果bid数目大于每个年级预设的平均借阅次数(tgB[i])与读者数(rL)的乘积,重设rL\n # round(x),对x进行四舍五入\n if bL > rL * tgB[1]:\n bL = round(rL * tgB[1])\n for i in range(bL):\n ti = bid[i] + [rid[i]]\n ttRidBid.append(ti)\n random.shuffle(ttRidBid)\n logger.debug('生成日期条目:')\n lDay(beginDate, endDate, Holiday, Workday, tmp, temp)\n \"\"\"\n 读者、日期匹配规则:\n 先把所有lenddate出现次数少于50的记录全匹配一遍(避免日期丢失问题),然后剩余读者的按概率随机匹配\n \"\"\"\n try:\n sql = 'SELECT count(lenddate) cn,CONVERT(VARCHAR(100), lenddate, 23) lenddate INTO #t1 FROM AttLendWork GROUP BY lenddate'\n cursor.execute(sql)\n except:\n cursor.execute('TRUNCATE TABLE #t1')\n sql = 'INSERT INTO #t1 SELECT count(lenddate) cn,CONVERT(VARCHAR(100), lenddate, 23) lenddate FROM AttLendWork GROUP BY lenddate'\n cursor.execute(sql)\n sql = 'SELECT did FROM AttLendWork WHERE CONVERT(VARCHAR(100), lenddate, 23) IN (SELECT lenddate FROM #t1 WHERE cn < 50)'\n cursor.execute(sql)\n Adid = set()\n for ci in cursor.fetchall():\n Adid.add(ci[0])\n tnum = len(ttRidBid) - len(Adid)\n if tnum > 0:\n # --从剩余的日期条目随机抽取匹配读者条目,先抽取3倍需要量,再截取\n # pdb.set_trace()\n sql = 'select top %s did from AttLendWork order by checksum(newid())' % (str(tnum * 3))\n cursor.execute(sql)\n Bdid = set()\n for ci in cursor.fetchall():\n Bdid.add(ci[0])\n Cdid = list(Bdid - Adid)\n random.shuffle(Cdid)\n # 汇聚所有用于生成记录的日期条目\n Ddid = Cdid[:tnum] + list(Adid)\n else:\n Ddid = list(Adid)\n Ddid.sort()\n tLendWork = []\n for i in range(len(ttRidBid)):\n ttRidBid[i].append(Ddid[i])\n tLendWork.append(tuple(ttRidBid[i]))\n # 学期中间检查,前面生成的记录过多,下面语句用于截取部分数据。\n # 例如:用于20171016借阅记录生成条目总数的限制,开学仅仅一个半月,限制在960条吧\n # tempNum 参数用于决定需要生成多少条借阅记录,可以在配置文件config.py中设定\n if temp:\n random.shuffle(tLendWork)\n tLendWork = tLendWork[:tempNum]\n # pdb.set_trace()\n logger.info('预备有 %s 条记录写入LendWork' % len(tLendWork))\n sql = 'INSERT INTO LendWork( bid, Bcid, Rid, LendDate, ReturnDate, BackDate, loperator, boperator ) SELECT ?, ?, ?, LendDate, ReturnDate, BackDate, loperator, boperator FROM AttLendWork WHERE did = ?'\n cursor.executemany(sql, tLendWork)\n cursor.commit()\n logger.info('Success!')\n\n\nif __name__ == '__main__':\n \"\"\"\n 1.主程序需要的参数写在config模块中,并且以函数的返回值(kw)的方式传递回主程序\n 2.这里用了一个option,因为每次生成借阅记录,都要重新组织日期参数,\n 传递一个option给getConfig,用来选择相应的日期参数\n \"\"\"\n from config import getConfig\n option = 20181217\n kw = getConfig(option)\n if kw == -1:\n logger.warn('Break in function getConfig!')\n else:\n gener(**kw)\n" }, { "alpha_fraction": 0.649921178817749, "alphanum_fraction": 0.7195314168930054, "avg_line_length": 68.375, "blob_id": "07b184e9663f19010f70764d5c80e986cc1e91fe", "content_id": "b9212bd9393fc111f0557708e3285f64bd3a5143", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 5385, "license_type": "no_license", "max_line_length": 231, "num_lines": 64, "path": "/GraduationUpgrade/@@@毕业班记录备份至abklendwork20171016UTF-8.sql", "repo_name": "zhixingheyi666/tsgSql", "src_encoding": "UTF-8", "text": "--select rid, depname from reader r, department d where left(d.depname,1) = '六' and r.depid = d.depid;\n--select r.rid, depname,bid,bcid,lenddate,loperator from reader r, department d,lendwork ld where left(d.depname,1) = '六' and r.rid = ld.rid and r.depid=d.depid;\n--select * from department;\n--select * from reader;\n--select * from abklendwork where left(loperator,1) = '三' ;\n--select * from lendwork;\n--select * from lendwork where backdate is NULL;\n-----------------------------------------------------------------------------------------------------\n--1.保存毕业班的借阅史\n\t--金典系统的会将毕业班的借阅记录完全删除\n\t--我希望保留这部分记录,以便为<使用程序生成借阅记录>提供基础数据\n\t--所以我将这部分借阅记录导入abklendwork中保存\n\t--<借阅记录生成程序>需要知道:对于每条记录,学生借阅这本书的时候正在读几年级\n\t--所以我在将毕业班记录导入abklendwork时,将记录按学生借阅时正在就读的年级分类,并通过在loperator前面加上相应汉字\n\t\t--(如五姓名,表示当时正在就读五年级) 对借阅记录按借阅年级进行分类标记\n\t--下面语句就是将毕业班借阅记录按照如上要求导入abklendwork的详细过程\n--选取毕业班借阅所有借阅记录\n--testI 6395\n--select r.rid, depname,bid,bcid,lenddate,loperator from reader r, department d,lendwork ld where left(d.depname,1) = '六' and r.rid = ld.rid and r.depid=d.depid;\n--select rid, depname from reader r, department d where left(d.depname,1) = '六' and r.depid = d.depid;\n--testII 6395\n--select * from lendwork where rid in (select rid from reader r, department d where left(d.depname,1) = '六' and r.depid = d.depid);\n--testIII 6393 排除<未归还>的借阅记录\n--select * from lendwork where rid in (select rid from reader r, department d where left(d.depname,1) = '六' and r.depid = d.depid) and boperator is not NULL;\n--select * from lendwork where rid in (select rid from reader r, department d where left(d.depname,1) = '六' and r.depid = d.depid) and boperator is not NULL order by lenddate desc;\n--借阅时在读年级的判定 以毕业时间 如2017年7月15日为基准,减去1年,lenddate在2016年7月16至2017年7月15之间的,就是六年级时借阅的,依次类推\n--选取六年级时借过的书 1064\n--select * from lendwork where rid in (select rid from reader r, department d where left(d.depname,1) = '六' and r.depid = d.depid) and boperator is not NULL and lenddate between '2016-07-16' and '2017-07-15' order by lenddate desc;\n--选取五年级时借过的书 1747\n--select * from lendwork where rid in (select rid from reader r, department d where left(d.depname,1) = '六' and r.depid = d.depid) and boperator is not NULL and lenddate between '2015-07-16' and '2016-07-15' order by lenddate desc;\n--选取四年级时借过的书 1586\n--select * from lendwork where rid in (select rid from reader r, department d where left(d.depname,1) = '六' and r.depid = d.depid) and boperator is not NULL and lenddate between '2014-07-16' and '2015-07-15' order by lenddate desc;\n--选取三年级时借过的书 1064\n--select * from lendwork where rid in (select rid from reader r, department d where left(d.depname,1) = '六' and r.depid = d.depid) and boperator is not NULL and lenddate between '2013-07-16' and '2014-07-15' order by lenddate desc;\n--选取二年级时借过的书 932\n--select * from lendwork where rid in (select rid from reader r, department d where left(d.depname,1) = '六' and r.depid = d.depid) and boperator is not NULL and lenddate between '2012-07-16' and '2013-07-15' order by lenddate desc;\n--选取一年级时借过的书 0\n--select * from lendwork where rid in (select rid from reader r, department d where left(d.depname,1) = '六' and r.depid = d.depid) and boperator is not NULL and lenddate between '2011-07-16' and '2012-07-15' order by lenddate desc;\n\n----先把毕业班的记录写入一个临时表格,然后修改loperator标记好借阅时就读的年级,然后导入abklendwork\n--select * into #t1 from lendwork where rid in (select rid from reader r, department d where left(d.depname,1) = '六' and r.depid = d.depid) and boperator is not NULL ;\n--select * from #t1;\n--更改loperator标记借阅时所在年级\n--update #t1 set loperator = '六'+loperator where lenddate between '2016-07-16' and '2017-07-15';\n--update #t1 set loperator = '五'+loperator where lenddate between '2015-07-16' and '2016-07-15';\n--update #t1 set loperator = '四'+loperator where lenddate between '2014-07-16' and '2015-07-15';\n--update #t1 set loperator = '三'+loperator where lenddate between '2013-07-16' and '2014-07-15';\n--update #t1 set loperator = '二'+loperator where lenddate between '2012-07-16' and '2013-07-15'; \n\n----drop table #t1;\n\n----把修改好的记录添加进abklendwork\n--查询原有记录数14492\n--select * from abklendwork;\n--查询原有六年级记录数 3811\n--select * from abklendwork where left(loperator,1) = '六';\n----从临时表中把整理好的数据插入abklendwork\n--insert into abklendwork select * from #t1;\n--插入后的验证 14492 + 6393 = 20885\n--select * from abklendwork;\n--select 14492 + 6393;\n--插入后的验证 3811 + 1064 = 4875\n--select * from abklendwork where left(loperator,1) = '六'\n--select 3811 + 1064;" }, { "alpha_fraction": 0.5793848633766174, "alphanum_fraction": 0.6034912467002869, "avg_line_length": 26.930233001708984, "blob_id": "ee5b6aa6322f6d29a7097a3cebd132ccbfbab986", "content_id": "1e8c8299fd97ebd763a5bf845e4e174c369f7798", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1275, "license_type": "no_license", "max_line_length": 150, "num_lines": 43, "path": "/InOutSync/待整理/mic_to_my_lendwork.py", "repo_name": "zhixingheyi666/tsgSql", "src_encoding": "UTF-8", "text": "# --coding: utf-8--\n#已经运行过,此为存档文件,若仍需运行,请重新检查参数。并把下面两行退注释\n#import mysql.connector\n#import pyodbc\n\n#MYsql configure\nmy_config = {\n 'user':'root',\n 'password':'sa',\n 'database':'sql-learn'\n }\nlendwork_wmy = 'insert into lendwork( rid, bcid, lenddate, returndate, backdate, loperator, boperator, bid ) values( %s, %s, %s, %s, %s, %s, %s, %s )'\n\nMy_conn = mysql.connector.connect( **my_config )\nmy_cursor = My_conn.cursor()\n\n#Microsql configure\nMic_config = r'driver={SQL Server};server=localhost;uid=sa;pwd=sa;database=easybook'\n\nmic_conn = pyodbc.connect( Mic_config )\nmic_cursor = mic_conn.cursor()\n\nyearlist = [ '2011', '2012', '2013', '2014', '2015', '2016' ]\nsql0 = 'select * from lendwork where year(lenddate) ='\n\nfor yy in yearlist:\n temp = input(\"press N to quit:\\n\")\n if temp == 'N':\n break\n sql_e = sql0 + yy\n mic_cursor.execute( sql_e )\n mic_rows = mic_cursor.fetchall()\n count_t = 0\n for row in mic_rows:\n# print(row)\n arg_wmy = []\n for row_mem in row:\n arg_wmy.append( row_mem )\n my_cursor.execute( lendwork_wmy, arg_wmy )\n My_conn.commit()\n count_t += 1\n print( count_t )\n print( '\\n' )\n\n\n" }, { "alpha_fraction": 0.6863100528717041, "alphanum_fraction": 0.6899365186691284, "avg_line_length": 23.511110305786133, "blob_id": "a7f71e1341d3e18e84c49d5667b8bd3fc287e0ca", "content_id": "f26fd59e839507235c5f6cb6a872c9212efd1272", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1103, "license_type": "no_license", "max_line_length": 84, "num_lines": 45, "path": "/GenerLendWork/Book Insertion/fc/LOG_sf.py", "repo_name": "zhixingheyi666/tsgSql", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport logging\nfrom logging import getLogger\nfrom logging import FileHandler \nfrom logging import StreamHandler \nfrom logging import Formatter \n\n#code for log\n#Using: logger.info( 'Some information' ) to replace print( 'Some information' )\nlogger = getLogger( 'SF:' )\nlogger.setLevel( logging.DEBUG)\n#logging.basicConfig( level = logging.INFO )\n\n#LOG_FILE = 'Log_test.log'\n\nfh = FileHandler( 'Log-test.log' )\nfh.setLevel( logging.INFO )\n\nch = StreamHandler()\n#ch.setLevel( logging.WARN)\nch.setLevel( logging.DEBUG)\n\nformatter = Formatter( '%(asctime)s - %(name)s - %(levelname)s:\\n%(message)s\\n' )\nformatter_ch = Formatter( '%(name)s: %(message)s' )\nfh.setFormatter( formatter )\nch.setFormatter( formatter_ch )\n\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n\n\"\"\"\nThe code below is a test for module logging.\n\"\"\"\ntts = '0'\nttn = int(tts) \nlogger.info( 'ttn = %d ' % ttn )\nlogger.debug( 'Debug:ttn = %d ' % ttn )\nldebug = logger.debug\nlinfo = logger.info\nldebug( 'LDB\\n\\n rename loggger.debug to ldebug!!' )\ntry:\n print( 10 / ttn )\nexcept Exception as e:\n ldebug( e )\n" }, { "alpha_fraction": 0.6329254508018494, "alphanum_fraction": 0.7141268253326416, "avg_line_length": 44, "blob_id": "0e6fd2c432affcdfa79ed995ccd8162114fa712d", "content_id": "da3d7a104f76db29bb5bceff6e6e9e7057707f6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1169, "license_type": "no_license", "max_line_length": 150, "num_lines": 20, "path": "/GenerLendWork/Book Insertion/@@@向数据库直接插入指定数据测试.sql", "repo_name": "zhixingheyi666/tsgSql", "src_encoding": "GB18030", "text": "-- select * from lendwork where loperator = '王_00';\n-- -- delete from lendwork where loperator = '王_00';\n\n\n-- select * from bookclass order by bid desc;\n\n-- 根据查询,inForm大约对应每年的增量,程序中的标志大概是总括登记号\n-- select distinct inForm from bookclass;\n-- select distinct Ddid from bookclass;\n-- select * from bookclass where inForm in (3,4,5);\n-- select * from bookclass where inForm in (6);\n-- select * from bookclass where inForm in (8);\n-- select * from bookclass where Clerk = '王_00'\n-- --delete from bookclass where Clerk = '王_00';\n-- 写一段语句,插入数据用于增加图书册数,等检查完毕,可以利用约定标记,轻易删除插入的数据\n-- 约定插入的图书的Bcid为(# # Bcid必须是Booklist表中存在的,否则会报错),\n-- \tbid为06****,EnterDate视情况而定(可以写时间字符串格式,如2018-10-17 08:26:28.297),\n--\tprice为11,sk为2,inForm与同年的inForm相同,Ddid为NULL,Clerk为王_00\n-- 示例:\n-- insert into bookclass(Bcid, Bid, State, Clerk, EnterDate, price, sk, inForm) Values('I267/234','060000',0,'王_00','2018-10-17 08:26:28.297',11,2,8);" }, { "alpha_fraction": 0.26868125796318054, "alphanum_fraction": 0.4451890289783478, "avg_line_length": 37.02404022216797, "blob_id": "0a042d753a0227b285fa2c892fbf6ebca48c78b1", "content_id": "532d2231d1062676f27e07f19bd8dff22197689c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8481, "license_type": "no_license", "max_line_length": 148, "num_lines": 208, "path": "/GenerLendWork/config.py", "repo_name": "zhixingheyi666/tsgSql", "src_encoding": "UTF-8", "text": "# _*_coding:utf-8_*_\n\"\"\"\n1.为了主程序简洁清楚,主程序需要的参数写在这个模块中,并且以函数的返回值(kw)的方式传递回主程序\n2.这里用了一个option,因为每次生成借阅记录,都要重新组织日期参数,\n 从主程序传递一个option,用来选择相应的日期参数\n\"\"\"\nfrom fc.LOG_sf import logger\n\n__author__ = 'Master Wang'\n\n\ndef getConfig(option):\n \"\"\"\n :param option: 运行<生成借阅记录程序>的日期 格式为: yyyymmdd\n :return: dict,事先在这里编写好的参数\n :param tempNum: 参数用于决定需要生成多少条借阅记录,可以在配置文件config.py中设定\n 例如:用于20171016借阅记录生成条目总数的限制,开学仅仅一个半月,限制在960条吧\n 此时可以设置tempNum = 960,那么仅仅会有960条生成的记录写入数据库\n :param temp: 由于程序没有添加相应的处理语句,这参数和tempNum两个参数不能省略,\n 当temp设定为False时,tempNum的设定值将不会被程序采用\n ··注意,times参数这里并没有设置接口,如果需要,可以直接修改gener函数对应位置的默认值\n \"\"\"\n if 1 == 1:\n if option == 20181217:\n bidInuse = set()\n beginDate = '2018-09-03'\n endDate = '2018-12-17'\n holiday = ['09-24', '10-01', '10-02', '10-03', '10-04', '10-05', '10-06', '10-07']\n workday = ['09-29', '09-30']\n yearbefore = 0\n tmp = True\n temp = True\n tempNum = 2200\n\n if option == 20181112:\n bidInuse = set()\n beginDate = '2018-09-03'\n endDate = '2018-11-16'\n holiday = ['09-24', '10-01', '10-02', '10-03', '10-04', '10-05', '10-06', '10-07']\n workday = ['09-29', '09-30']\n yearbefore = 0\n tmp = True\n temp = True\n tempNum = 1700\n\n if option == 20181105:\n bidInuse = set()\n beginDate = '2018-09-03'\n endDate = '2018-11-04'\n holiday = ['09-24', '10-01', '10-02', '10-03', '10-04', '10-05', '10-06', '10-07']\n workday = ['09-29', '09-30']\n yearbefore = 0\n tmp = True\n temp = True\n tempNum = 1600\n\n if option == 20180926:\n bidInuse = set()\n beginDate = '2018-09-03'\n endDate = '2018-09-26'\n holiday = ['09-24']\n workday = []\n yearbefore = 0\n tmp = True\n temp = True\n tempNum = 640\n\n if option == 20180925:\n bidInuse = set()\n beginDate = '2018-03-05'\n endDate = '2018-06-22'\n holiday = ['04-05', '04-06', '04-07', '04-29', '04-30', '05-01', '05-17', '05-18', '05-16']\n workday = ['04-08', '04-28']\n yearbefore = 1\n tmp = False\n temp = False\n tempNum = 1500\n\n if option == 20180625:\n bidInuse = set()\n beginDate = '2018-03-05'\n endDate = '2018-06-22'\n holiday = ['04-05', '04-06', '04-07', '04-29', '04-30', '05-01', '05-17', '05-18', '05-16']\n workday = ['04-08', '04-28']\n yearbefore = 0\n tmp = False\n temp = False\n tempNum = 3200\n\n if option == 20180107:\n bidInuse = set()\n beginDate = '2017-09-04'\n endDate = '2018-01-07'\n holiday = ['10-01', '10-02', '10-03', '10-04', '10-05', '10-06', '10-07', '10-08', '12-30', '12-31', '01-01']\n workday = ['09-30']\n tmp = False\n temp = True\n tempNum = 2800\n yearbefore = 0\n\n if option == 20171110:\n bidInuse = set()\n beginDate = '2017-09-04'\n endDate = '2017-11-10'\n holiday = ['10-01', '10-02', '10-03', '10-04', '10-05', '10-06', '10-07', '10-08']\n workday = ['09-30']\n tmp = False\n temp = True\n tempNum = 1516\n yearbefore = 0\n try:\n # kw = dict(bidInuse=bidInuse, beginDate=beginDate, endDate=endDate, holiday=holiday, workday=workday, temp=temp)\n kw = dict(beginDate=beginDate, endDate=endDate, Holiday=holiday, Workday=workday, yearBefore=yearbefore, temp=temp, tmp=tmp,tempNum=tempNum)\n return kw\n except BaseException as e:\n print(\"No option matches,Please check the option string!!\")\n logger.warn(e)\n logger.warn(\"No option matches,Please check the option string!!\")\n return -1\n\n\n\"\"\"\n if 1 == 1:\n if option == 20171101:\n bidInuse = set()\n beginDate = '2017-09-04'\n endDate = '2017-11-01'\n holiday = ['10-01', '10-02', '10-03', '10-04', '10-05', '10-06', '10-07', '10-08']\n workday = ['09-30', '05-27']\n tmp = False\n temp = True\n tempNum = 960\n yearbefore = 0\n if 1 == 1:\n if option == 20160706:\n bidInuse = set()\n beginDate = '20-17-02-14'\n endDate = '2017-06-11'\n holiday = ['04-02', '04-03', '04-04', '04-29', '04-30', '05-01', '05-28', '05-29', '05-30']\n workday = ['04-01', '05-27']\n if 1 == 1:\n if option == -1:\n bidInuse = set()\n beginDate = '2017-02-14'\n endDate = '2017-06-11'\n holiday = ['04-02', '04-03', '04-04', '04-29', '04-30', '05-01', '05-28', '05-29', '05-30']\n workday = ['04-01', '05-27']\n if option == 1:\n bidInuse = set()\n beginDate = '2016-09-02'\n endDate = '2017-01-12'\n holiday = ['09-15', '09-16', '09-17', '10-01', '10-02', '10-03', '10-04', '10-05', '10-06', '10-07',\n '12-31', '01-01', '01-02']\n workday = ['09-18', '10-08', '10-09']\n if option == 1:\n bidInuse = set()\n beginDate = '2016-02-25'\n endDate = '2016-07-05'\n holiday = ['04-02', '04-03', '04-04', '04-30', '05-01', '05-02', '06-09', '06-10', '06-11']\n workday = ['06-12']\n if option == 1:\n bidInuse = set()\n beginDate = '2015-09-01'\n endDate = '2016-01-25'\n holiday = ['09-03', '09-04', '09-05', '09-26', '09-27', '10-01', '10-02', '10-03', '10-04', '10-05',\n '10-06', '10-07', '01-01', '01-02', '01-03']\n workday = ['09-06', '10-10']\n if option == 1:\n bidInuse = set()\n beginDate = '2015-03-09'\n endDate = '2015-07-03'\n holiday = ['04-04', '04-05', '04-06', '05-01', '05-02', '05-03', '06-20', '06-21', '06-22']\n workday = []\n if option == 1:\n bidInuse = set()\n beginDate = '2014-09-01'\n endDate = '2015-02-05'\n holiday = ['09-05', '09-06', '09-07', '09-26', '09-27', '10-01', '10-02', '10-03', '10-04', '10-05',\n '10-06', '10-07', '01-01', '01-02', '01-03']\n workday = ['09-28', '10-11', '01-04']\n gener(beginDate, endDate, holiday, workday, yearBefore=2, tmp=0)\n if option == 1:\n bidInuse = set()\n beginDate = '2014-02-17'\n endDate = '2014-07-03'\n holiday = ['04-05', '04-06', '04-07', '05-01', '05-02', '05-03', '05-31', '06-01', '06-02']\n workday = ['05-04']\n if option == 1:\n bidInuse = set()\n beginDate = '2013-09-02'\n endDate = '2014-01-15'\n holiday = ['09-19', '09-20', '09-21', '10-01', '10-02', '10-03', '10-04', '10-05', '10-06', '10-07',\n '01-01']\n workday = ['09-22', '09-29', '10-12']\n if option == 1:\n bidInuse = set()\n beginDate = '2013-02-18'\n endDate = '2013-07-04'\n holiday = ['04-04', '04-05', '04-29', '04-30', '05-01', '06-10', '06-11', '06-12']\n workday = ['04-07', '04-27', '04-28', '06-08', '06-09']\n if option == 1:\n bidInuse = set()\n beginDate = '2012-09-03'\n endDate = '2013-01-15'\n holiday = ['09-30', '10-01', '10-02', '10-03', '10-04', '10-05', '10-06', '10-07', '01-01', '01-02',\n '01-03']\n workday = ['09-29', '10-08', '01-05', '01-06']\n\"\"\"\n" } ]
12
nissan997/root_finding_py
https://github.com/nissan997/root_finding_py
2e175a186766a6a70cea0e949f0bfcb65bf073a4
06fcc0f19b456201a2a5207c67dfc8830ae8c9d2
7490a6d26c7cd71aecd786aa1b40e32cc957c031
refs/heads/master
2020-07-04T15:14:23.914980
2019-08-14T09:52:44
2019-08-14T09:52:44
202,322,056
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5301724076271057, "alphanum_fraction": 0.5495689511299133, "avg_line_length": 21.095237731933594, "blob_id": "af9f493ae122c2665c2c05edf5253f6a095895c2", "content_id": "ac1db147cbbb27b7cc3b648babf353ba37a6c114", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 464, "license_type": "no_license", "max_line_length": 56, "num_lines": 21, "path": "/iteration.py", "repo_name": "nissan997/root_finding_py", "src_encoding": "UTF-8", "text": "import math\n#g(x) of the function f(x)\ndef g(x):\n result=(math.cos(x)+1)/3\n return (result)\ndef iteration(guess,tol):\n result=0\n a=g(guess)\n b=g(a)\n n=100\n for i in range(0,n):\n if(round(a,tol)==round(b,tol)):\n result=a\n break\n else:\n a=b\n b=g(b)\n return result\ntol=int(input(\"Enter the tolarence in decimal places:\"))\nanswer = iteration(3.2,tol)\nprint('answer:',round(answer,tol))\n" }, { "alpha_fraction": 0.5091241002082825, "alphanum_fraction": 0.5510948896408081, "avg_line_length": 22.826086044311523, "blob_id": "3f8ce8a087b082d464cd584eb617f48f7bf08cb0", "content_id": "98f6630626f770eeaf78a30c13b4c446d2120527", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 548, "license_type": "no_license", "max_line_length": 53, "num_lines": 23, "path": "/newtonraphson.py", "repo_name": "nissan997/root_finding_py", "src_encoding": "UTF-8", "text": "import math\ndef f(x):\n return(3*x -math.cos(x)-1) #the function\ndef fp(x):\n return(3+math.sin(x))\ndef midfunc(x0):\n x1=x0-(f(x0)/fp(x0))\n return(x1)\ndef newrap_method(a,b,tol):\n if f(a)*f(b)>0:\n print(\"No root found.\")\n else:\n x0=a\n x1=midfunc(x0)\n if(round(x0,tol)==round(x1,tol)):\n return x1\n else:\n x0=x1\n x1=midfunc(x1)\n return(x1)\ntol=int(input(\"Enter decimal places for tolerance:\"))\nanswer=newrap_method(0,1,tol)\nprint('answer:',round(answer,tol))\n" }, { "alpha_fraction": 0.45026177167892456, "alphanum_fraction": 0.47294938564300537, "avg_line_length": 26.285715103149414, "blob_id": "0ba1e2bba492c28af696ee4f399a1a3c2845deee", "content_id": "50f83bf1a0d8a20c976c4f64cf4d759ef94a9615", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 573, "license_type": "no_license", "max_line_length": 49, "num_lines": 21, "path": "/falsi.py", "repo_name": "nissan997/root_finding_py", "src_encoding": "UTF-8", "text": "import math\ndef f(x):\n return(3*x -math.cos(x)-1) #the function\ndef midpoint(a,b,fa,fb):\n result=((a*fb)-(b*fa))/(fb-fa)\n return(result)\ndef falsi_method(a,b,tol):\n if f(a)*f(b)>0:\n print(\"No root found.\")\n else:\n while (b-a)/2.0 > tol :\n mid = midpoint(a,b,f(a),f(b))\n if f(mid) == 0:\n return (mid) #root\n elif f(a)*f(mid)<0:\n b= mid\n else :\n a= mid\n return(mid)\nanswer = falsi_method(-1,5,0.01) #(a,b,tolerance)\nprint(\"Answer: \",round(answer,3))\n" }, { "alpha_fraction": 0.4647619128227234, "alphanum_fraction": 0.49714285135269165, "avg_line_length": 28.16666603088379, "blob_id": "63665c996991be6491eede85885257a9202fb42b", "content_id": "c4be847a50da5f9d908613b11b45ba5a256ccb2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 525, "license_type": "no_license", "max_line_length": 55, "num_lines": 18, "path": "/bisect.py", "repo_name": "nissan997/root_finding_py", "src_encoding": "UTF-8", "text": "import math\ndef f(x):\n return(3*x -math.cos(x)-1) #the function\ndef bisection_method(a,b,tol):\n if f(a)*f(b)>0:\n print(\"No root found.\")\n else:\n while (b-a)/2.0 > tol :\n midpoint = (a+b)/2.0\n if f(midpoint) == 0:\n return (midpoint) #root\n elif f(a)*f(midpoint)<0:\n b= midpoint\n else :\n a= midpoint\n return(midpoint)\nanswer = bisection_method(-1,5,0.0001) #(a,b,tolerance)\nprint(\"Answer: \",round(answer,3))\n" }, { "alpha_fraction": 0.7896679043769836, "alphanum_fraction": 0.8118081092834473, "avg_line_length": 26, "blob_id": "5cf9cdcab96a21346ec834b1e0feede846d7a9e1", "content_id": "0981bad900a133b968d6c8d4f424ec9fa2b1ee48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 271, "license_type": "no_license", "max_line_length": 60, "num_lines": 10, "path": "/Readme.txt", "repo_name": "nissan997/root_finding_py", "src_encoding": "UTF-8", "text": "Various root finding methods are implemented using python.\nCurrently implemented methods are :\n1.Bisection Method\n2.Regular Falsi Method\n3.One point iteration Method\n4.Newton Raphson Method\n\nall these methods are implemented using the example function\n\nf(x)=3x-cos(x)-1\n\n" } ]
5
pooneh-nb/temporal_clustering
https://github.com/pooneh-nb/temporal_clustering
5640067c86d3906b1ed8b12ca04a2ecdf02fabf1
9f77163389c705bea67629c1fae9e0081379577d
c1e8278747941e2c44dc24f781b967bf9bb3971e
refs/heads/master
2023-02-21T03:35:42.069546
2021-01-22T08:12:15
2021-01-22T08:12:15
322,684,292
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5590062141418457, "alphanum_fraction": 0.6149068474769592, "avg_line_length": 25.91666603088379, "blob_id": "310edc8c9d3bd62a023ff6ed16c5bfd6895b6223", "content_id": "e2d0622ecd1dcec1a3797d91b43553d943c23ce6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "no_license", "max_line_length": 79, "num_lines": 12, "path": "/test.py", "repo_name": "pooneh-nb/temporal_clustering", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\n\nd = {'col1': [1, 4, 3, 4, 5], 'col2': [4, 5, 6, 7, 8], 'col3': [7, 8, 9, 0, 1]}\ndf = pd.DataFrame(data=d)\nprint(\"Original DataFrame\")\nprint(df)\nprint('Data from new_file.csv file:')\ndf.to_csv('new_file.csv', sep='\\t', index=False)\nnew_df = pd.read_csv('new_file.csv')\nprint(new_df)" }, { "alpha_fraction": 0.45485585927963257, "alphanum_fraction": 0.4610100984573364, "avg_line_length": 37.62883377075195, "blob_id": "441c655f33a5f98b5449533807a1e15082d27945", "content_id": "f10bbf9fade42455fe18878858a6157bcedcf163", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25186, "license_type": "no_license", "max_line_length": 115, "num_lines": 652, "path": "/tpSCAN_A-1-0.5-2.py", "repo_name": "pooneh-nb/temporal_clustering", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport readfile\nimport math\nimport sys\nimport os\nimport matplotlib.pyplot as plt\nimport networkx as nx\nfrom time import sleep, time\nimport datetime\nimport json\nfrom statistics import mean\nimport math\nimport gc\nimport itertools\nimport pprint\nimport multiprocessing\nfrom multiprocessing import Pool as ThreadPool\nimport itertools\n\n\nclass tGraph:\n\n def __init__(self, path):\n self.path = path\n # list of (node, degree) sorted based on degree\n self.rank = []\n # the highest rank of rank list: (92,181) based on node's degree. here node 92 with degree 181 has the\n # highest rank\n self.toprank = 0\n self.visited_set = []\n self.visited_node = set() # add nodes which their eligibility for being a core has been tested\n self.visited_edge = set()\n # degree of similarity between two neighbor nodes\n self.eps = 0.3\n # minimum number of similar neighbors\n self.miu = 5\n # minimum number of similar timestamps\n self.tau = 3\n self.theta = 1\n self.sigma = {}\n # calculated sigma for each edge at timestamp t\n self.sigma_t = {}\n self.union_set = [] # list of cores\n self.collections = {}\n # dictionary of each node as key along with their neighbors and degrees as value\n # {u: {v:v_deg, h:h_degree,...}\n # v: {list of v's neighbors along with their degree}\n # }\n self.adj = {}\n self.subgraph = {}\n self.frquent_set = {}\n # create a temporal graph in this format (u,v,t)\n self.G = readfile.tGraph(self.path)\n\n print(len(self.G.nodes()))\n print(len(self.G.edges()))\n temporal_edge = 0\n for nodex in self.G.nodes():\n for nodey in self.G.adj[nodex]:\n temporal_edge += len(self.G.edges[nodex, nodey]['t'])\n print(\"temporal edges:\" + str(temporal_edge / 2))\n\n # dict: {node:degree}\n ranktemp = {}\n for node_temp in self.G.nodes():\n # add two attributes to each node; l and u. l=0 and u = number of neighbors(degree)\n self.G.nodes[node_temp]['l'] = 0\n # attr u should be the average weight of u\n #self.G.nodes[node_temp]['u'] = len(self.G.adj[node_temp])\n wt = 0\n for nbr, eattr in self.G.adj[node_temp].items():\n wt = wt + sum(eattr['w'])\n #self.G.nodes[node_temp]['u'] = len(self.G.adj[node_temp])\n self.G.nodes[node_temp]['u'] = wt\n ranktemp[node_temp] = self.G.nodes[node_temp]['u']\n # then sort ranktemp dictionary based on degree of each node\n # the node with largest degree is number one\n self.rank = sorted(ranktemp.items(), key=lambda item: item[1], reverse=True)\n\n for node_temp in ranktemp:\n self.adj[node_temp] = {}\n for item in self.G.adj[node_temp]:\n if item == node_temp:\n print(item)\n else:\n # the adj member is created here\n self.adj[node_temp][item] = ranktemp[item]\n\n # for each node, sort its neighbors based on its degree\n adjtemp = sorted(self.adj[node_temp].items(), key=lambda item: item[1], reverse=True)\n\n self.adj[node_temp] = []\n for item in adjtemp:\n # add neighbors of each node, based on highest degree\n self.adj[node_temp].append(item[0])\n\n ranktemp = []\n # set to the node with the highest rank\n self.toprank = self.rank[0]\n for i in self.rank:\n ranktemp.append(i[0])\n self.rank = ranktemp\n\n del (ranktemp)\n\n def tDistribution(self, tempG):\n timestamps = {}\n for item in tempG.edges.data():\n if item[2]['t'] in timestamps:\n timestamps[item[2]['t']] = timestamps[item[2]['t']] + 1\n else:\n timestamps[item[2]['t']] = 1\n\n min = -1\n max = -1\n x = []\n y = []\n for k, v in timestamps.items():\n if k > max:\n max = k\n if min > k or min == -1:\n min = k\n x.append(k)\n y.append(v)\n print(min, max)\n print(time.gmtime(min), time.gmtime(max))\n\n def check_SCANA_core(self, u):\n # this function check whether node u can be a core. A node u E V is called\n # a (miu, tau, epsilon)-stable core if there exist a set of neighbors\n # N~(u) E N(u) of u that satisfies the following conditions:\n # 1. |N~(u)| >= miu\n # 2. there are at least tau snapshots containing the star-shaped structure formed by\n # N~(u) of u\n # 3. in each snapshot, sigma(u,v) >= epsilon for any v E N~(u)\n\n # check whether the number of node u's neighbors is larger than miu\n\n # condition 1\n \"\"\"if u in self.visited_node:\n return False\"\"\"\n initial_weight = 0\n if self.G.nodes[u]['l'] < self.miu and self.G.nodes[u]['u'] >= self.miu:\n candidate_set = []\n for v in self.G.adj[u]:\n # check all the neighbors of u. If any of them, (u,v) edge, has been repeated\n # in more tha tau snapshots then add that edge to candidate_set\n #print(self.G.edges[u, v]['t'])\n if len(self.G.edges[u, v]['t']) >= self.tau:\n candidate_set.append(v)\n #initial_weight = initial_weight + round(mean(self.G.edges[u,v]['w']),2)\n initial_weight = initial_weight + sum(self.G.edges[u, v]['w'])\n # condition 2\n\n if initial_weight < self.miu:\n #if len(candidate_set) < self.miu:\n return False\n\n # find what is sum of the weights of candidate neighbors at each timestamp;\n candidate_time = {}\n for v in candidate_set:\n idx = 0\n for time_item in self.G.edges[u, v]['t']:\n if time_item in candidate_time:\n candidate_time[time_item] += self.G.edges[u,v]['w'][idx]\n #candidate_time[time_item] += 1\n else:\n candidate_time[time_item] = self.G.edges[u,v]['w'][idx]\n #candidate_time[time_item] = 1\n idx += 1\n\n # only work on timestamps that their aggregated weight is more than miu\n times_more_than_miu = []\n for key in candidate_time:\n if candidate_time[key] >= self.miu:\n times_more_than_miu.append(key)\n if len(times_more_than_miu) < self.tau:\n return False\n # condition 3\n\n # then check the similarity of each candidate edge at times_more_than_miu.\n # If the similarity is more than eps, add the weight of that edge to miu_calculate. Also update\n # the 'l' attribute of node u and its neighbors.\n # At the end of this method, if the self.G.nodes[u]['u'] is more than mui, it is eligible to be a core.\n tau_calculate = 0\n for t in times_more_than_miu:\n if tau_calculate >= self.tau:\n break\n # sum of weights of desired neighbors at each timestamp\n miu_calculate = 0\n # sum of weights of all candidate neighbors regardless of their similarity\n max_miu_calculate = candidate_time[t]\n for v in candidate_set:\n #if tau_calculate >= self.tau:\n #break\n if v <= u:\n edge_set = (u, v, t)\n else:\n edge_set = (v, u, t)\n if edge_set not in self.visited_edge:\n if edge_set not in self.sigma_t:\n # set the similarity of two nodes at time t\n self.sigma_t[edge_set] = self.compute_sigma_at_one_time(u, v, t)\n\n if self.sigma_t[edge_set] >= self.eps:\n idx_t_u = self.G.edges[u, v]['t'].index(t)\n miu_calculate += self.G.edges[u,v]['w'][idx_t_u]\n self.G.nodes[u]['l'] += self.G.edges[u,v]['w'][idx_t_u]\n self.G.nodes[v]['l'] += self.G.edges[u,v]['w'][idx_t_u]\n self.visited_edge.add(edge_set)\n #miu_calculate += 1\n else:\n if t not in self.G.edges[u, v]['t']:\n pass\n else:\n idx_t_u = self.G.edges[u, v]['t'].index(t)\n max_miu_calculate -= self.G.edges[u,v]['w'][idx_t_u]\n #max_miu_calculate -= 1\n #if miu_calculate >= self.miu:\n #tau_calculate += 1\n #continue\n\n #if max_miu_calculate < self.miu:\n #continue\n else:\n if v <= u:\n edge_set = (u, v, t)\n else:\n edge_set = (v, u, t)\n if edge_set not in self.sigma_t:\n # set the similarity of two nodes at time t\n self.sigma_t[edge_set] = self.compute_sigma_at_one_time(u, v, t)\n\n if self.sigma_t[edge_set] >= self.eps:\n idx_t_u = self.G.edges[u, v]['t'].index(t)\n miu_calculate += self.G.edges[u, v]['w'][idx_t_u]\n else:\n if t not in self.G.edges[u, v]['t']:\n pass\n else:\n idx_t_u = self.G.edges[u, v]['t'].index(t)\n max_miu_calculate -= self.G.edges[u, v]['w'][idx_t_u]\n # max_miu_calculate -= 1\n\n if miu_calculate >= self.miu:\n tau_calculate += 1\n break\n #continue\n\n if max_miu_calculate < self.miu:\n break\n #continue\n\n if miu_calculate >= self.miu:\n tau_calculate += 1\n #continue\n\n if tau_calculate < self.tau:\n #self.G.nodes[u]['l'] = 0\n self.visited_node.add(u)\n return False\n\n \"\"\"for v in candidate_set:\n edge_set = (u, v)\n if v >= u:\n edge_set = (u, v)\n else:\n edge_set = (v, u)\n\n if edge_set not in self.sigma:\n self.sigma[edge_set] = self.compute_sigma(u, v)\n # if the similarity of edge_set is more than sigma in tau snapshots\n if self.sigma[edge_set] >= self.tau:\n for t in times_more_than_miu:\n if t in self.G.edges[u, v]['t']:\n idx = self.G.edges[u,v]['t'].index(t)\n self.G.nodes[u]['l'] += round(self.G.edges[u, v]['w'][idx], 2)\n self.G.nodes[v]['l'] += round(self.G.edges[u, v]['w'][idx], 2)\n self.G.nodes[u]['l'] /= len(times_more_than_miu)\n self.G.nodes[v]['l'] /= len(times_more_than_miu)\n #self.G.nodes[u]['l'] += 1\n #self.G.nodes[v]['l'] += 1\n else:\n for t in times_more_than_miu:\n if t in self.G.edges[u, v]['t']:\n idx = self.G.edges[u, v]['t'].index(t)\n self.G.nodes[u]['l'] -= round(self.G.edges[u, v]['w'][idx], 2)\n self.G.nodes[v]['l'] -= round(self.G.edges[u, v]['w'][idx], 2)\n #self.G.nodes[u]['u'] -= 1\n #self.G.nodes[v]['u'] -= 1\n\n if self.G.nodes[u]['l'] >= self.miu or self.G.nodes[u]['u'] < self.miu:\n break\"\"\"\n\n self.visited_node.add(u)\n return False\n\n def cluster_SCANA_core(self, u):\n candidate_set = []\n for v in self.G.adj[u]:\n if len(self.G.edges[u, v]['t']) >= self.tau:\n candidate_set.append(v)\n\n for v in candidate_set:\n #edge_set = (u, v)\n if v <= u:\n edge_set = (u, v)\n else:\n edge_set = (v, u)\n\n if edge_set in self.sigma:\n if self.G.nodes[v]['l'] >= self.miu and self.sigma[edge_set] >= self.tau:\n self.union(u, v)\n else:\n if self.G.nodes[v]['u'] >= self.miu:\n self.sigma[edge_set] = self.compute_sigma(u, v)\n \"\"\"if self.sigma[edge_set] >= self.tau:\n #self.G.nodes[u]['l'] += sum(self.G.edges[u, v]['w'])\n self.G.nodes[v]['l'] += sum(self.G.edges[u, v]['w'])\n #self.G.nodes[u]['l'] += round(mean(self.G.edges[u, v]['w']))\n #self.G.nodes[v]['l'] += round(mean(self.G.edges[u, v]['w']))\n #self.G.nodes[u]['l'] += 1\n #self.G.nodes[v]['l'] += 1\n else:\n #self.G.nodes[u]['l'] -= sum(self.G.edges[u, v]['w'])\n self.G.nodes[v]['l'] -= sum(self.G.edges[u, v]['w'])\n #self.G.nodes[u]['u'] -= 1\n #self.G.nodes[v]['u'] -= 1\"\"\"\n\n if self.sigma[edge_set] >= self.tau:\n self.check_SCANA_core(v)\n if self.G.nodes[v]['l'] >= self.miu:\n self.union(u, v)\n #self.visited_node.add(u)\n\n def add_node_set(self, u):\n if len(self.union_set):\n for set in self.union_set:\n if u in set:\n return 0\n if u not in set:\n pass\n self.union_set.append([u])\n print(self.union_set)\n else:\n self.union_set.append([u])\n print(self.union_set)\n\n def union(self, u, v):\n if len(self.union_set):\n flag = 0\n set1 = []\n set2 = []\n for set in self.union_set:\n if u in set and v in set:\n flag = -1 # no need to change\n break\n if u in set and v not in set:\n set1 = set\n flag = flag + 1\n if v in set and u not in set:\n set2 = set\n flag = flag + 1\n if flag == 0:\n temp = [u, v]\n self.union_set.append(temp)\n if flag == 1:\n if set1:\n index_temp = self.union_set.index(set1)\n self.union_set[index_temp].append(v)\n if set2:\n index_temp = self.union_set.index(set2)\n self.union_set[index_temp].append(u)\n if flag == 2:\n self.union_set.remove(set1)\n self.union_set.remove(set2)\n union_temp = set1 + set2\n self.union_set.append(union_temp)\n if flag > 2:\n print(\"unnion error\")\n\n else:\n temp = [u, v]\n self.union_set.append(temp)\n\n def compute_sigma_at_one_time(self, u, v, t):\n if t not in self.G.edges[u, v]['t']:\n return 0\n # ~~~~~~~~~~cosine similarity~~~~~~~~~~~~~~~\n w_adju = 0\n adju = []\n #idx_t_u = None\n for vertex in self.adj[u]:\n if t in self.G.edges[u, vertex]['t']:\n adju.append(vertex)\n idx_t_u = self.G.edges[u, vertex]['t'].index(t)\n w_adju += pow(self.G.edges[u, vertex]['w'][idx_t_u],2)\n\n w_adjv = 0\n adjv = []\n #idx_t_v = None\n for vertex in self.adj[v]:\n if t in self.G.edges[v, vertex]['t']:\n adjv.append(vertex)\n idx_t_v = self.G.edges[v, vertex]['t'].index(t)\n w_adjv += pow(self.G.edges[v, vertex]['w'][idx_t_v], 2)\n\n lenuadj = len(adju) + 1\n lenvadj = len(adjv) + 1\n if lenuadj < self.eps * self.eps * lenvadj or lenvadj < self.eps * self.eps * lenuadj:\n return 0\n\n common_nbr = set(adju).intersection(set(adjv))\n similarity_w = 0\n if len(common_nbr) == 0 and t in self.G.edges[u, v]['t']:\n idx_t_u = self.G.edges[u, v]['t'].index(t)\n similarity_w = pow(self.G.edges[u, v]['w'][idx_t_u], 2)\n else:\n idx_t_ = self.G.edges[u, v]['t'].index(t)\n similarity_w = pow(self.G.edges[u, v]['w'][idx_t_], 2)\n for nbr in common_nbr:\n idx_t_u = self.G.edges[u, nbr]['t'].index(t)\n idx_t_v = self.G.edges[v, nbr]['t'].index(t)\n similarity_w += self.G.edges[u, nbr]['w'][idx_t_u] * self.G.edges[v, nbr]['w'][idx_t_v]\n\n sigma = similarity_w / math.sqrt(w_adju*w_adjv)\n\n if sigma < self.eps:\n return 0\n else:\n return self.eps + 0.1\n \"\"\"\n # list of neighbors of u at time t\n adju = []\n for vertex in self.adj[u]:\n if t in self.G.edges[u, vertex]['t']:\n adju.append(vertex)\n\n # list of neighbors of v at time t\n adjv = []\n for vertex in self.adj[v]:\n if t in self.G.edges[v, vertex]['t']:\n adjv.append(vertex)\n\n\n #lenuadj = len(adju) + 1\n lenuadj = w_adju\n #lenvadj = len(adjv) + 1\n lenvadj = w_adjv\n if lenuadj < self.eps * self.eps * lenvadj or lenvadj < self.eps * self.eps * lenuadj:\n # print(u,v,lenuadj,lenuadj)\n return 0\n\n len_v_u = len(set(adju) & set(adjv)) + 2\n if len_v_u < self.eps * math.sqrt(lenuadj * lenvadj):\n # print(u, v, len_v_u, lenuadj, lenuadj)\n return 0\n else:\n return self.eps + 0.1\"\"\"\n\n def compute_sigma(self, u, v):\n tau = 0\n\n if len(self.G.edges[u, v]['t']) < self.tau:\n return 0\n\n for t in self.G.edges[u, v]['t']:\n #edge_set = (u, v, t)\n if v <= u:\n edge_set = (u, v, t)\n else:\n edge_set = (v, u, t)\n\n if edge_set not in self.sigma_t:\n result = self.compute_sigma_at_one_time(u, v, t)\n # print u,v,t,result\n self.sigma_t[edge_set] = result\n if result > self.eps:\n tau += 1\n else:\n if self.sigma_t[edge_set] > self.eps:\n tau += 1\n\n if tau >= self.tau:\n return tau\n return 0\n\n def compute_sigma2(self, u, v):\n tau = 0\n if len(self.G.edges[u, v]['t']) < self.tau:\n return 0\n\n for t in self.G.edges[u, v]['t']:\n if t in self.subgraph:\n pass\n else:\n self.subgraph[t] = nx.subgraph.subgraph(self.path, t, self.theta)\n print(t)\n if self.subgraph[t].compute_sigma(u, v, self.eps) > self.eps:\n tau = tau + 1\n if tau >= self.tau:\n return tau\n return 0\n\n def SCANA(self, miu, tau, eps):\n self.eps = eps\n self.miu = miu\n self.tau = tau\n self.union_set = []\n #for u in self.G.nodes():\n #### this part has done already in tGraph\n #wt = 0\n #self.G.nodes[u]['l'] = 0\n #wt = 0\n #for nbr, eattr in self.G.adj[u].items():\n # wt = wt + round(mean(eattr['w']), 2)\n #self.G.nodes[u]['u'] = len(self.G.adj[u])\n ####\n counter = 0\n starttime = datetime.datetime.now()\n \"\"\"cpu_to_relax = 3\n #pool = ThreadPool(processes=multiprocessing.cpu_count() - cpu_to_relax)\n pool = ThreadPool(processes=1)\n results = pool.starmap(self.SCANA_multiprocess, zip(itertools.repeat(self), self.rank))\n pool.close()\n pool.join()\"\"\"\n for u in self.rank:\n counter += 1\n print(counter)\n value = self.check_SCANA_core(u)\n if self.G.nodes[u]['l'] >= self.miu:\n self.add_node_set(u)\n self.cluster_SCANA_core(u)\n endtime = datetime.datetime.now()\n interval = (endtime - starttime).total_seconds()\n print(\"Runing time of SCANA:\" + str(interval))\n self.write_runtime(interval, sys._getframe().f_code.co_name)\n self.sigma = {}\n self.sigma_t = {}\n self.visited_node = []\n file_name = self.path + '.output-' + str(self.eps) + '-' + str(self.tau) + '-' + str(self.miu) + '_SCANA'\n print(\"Cores output at: \" + file_name)\n print(self.union_set)\n file_object = open(file_name, 'w')\n for unit in self.union_set:\n file_object.write(json.dumps(unit))\n file_object.write(\"\\n\")\n file_object.close()\n #self.union_set = []\n\n def cluster_by_cores(self, file_c, flag):\n self.union_set = []\n nodes_set = []\n\n if flag == 1:\n for line in open(file_c):\n number = int(line.strip('\\n'))\n nodes_set.append(number)\n if flag == 2:\n for line in open(file_c):\n new_dict = json.loads(line)\n for item in new_dict:\n nodes_set.append(item)\n\n for u in nodes_set:\n self.add_node_set(u)\n for v in set(nodes_set) & set(self.G.adj[u]):\n if v < u:\n self.add_node_set(v)\n result = self.compute_sigma(u, v)\n if result >= self.tau:\n self.union(u, v)\n\n cluster_ans = []\n for i in range(len(self.union_set)):\n cluster1 = self.union_set[i][:]\n for u in self.union_set[i]:\n for v in set(self.G.adj[u]):\n if v <= u:\n edge_set = (u, v)\n else:\n edge_set = (v, u)\n if edge_set not in self.sigma:\n result = self.compute_sigma(u, v)\n self.sigma[edge_set] = result\n if self.sigma[edge_set] >= self.tau:\n cluster1.append(v)\n cluster_ans.append(set(cluster1))\n # pprint.pprint(self.union_set)\n file_object = open(file_c + '_cluster', 'w')\n for unit in cluster_ans:\n file_object.write(json.dumps(list(unit)))\n file_object.write(\"\\n\")\n file_object.close()\n\n self.union_set = []\n self.sigma = {}\n self.sigma_t = {}\n self.visited_node = []\n\n def run(self, filename):\n self.SCANA(self.miu, self.tau, self.eps)\n\n def cluster(self, filename):\n newname = filename + '.output-' + str(self.eps) + '-' + str(self.tau) + '-' + str(self.miu) + '_SCANA'\n self.cluster_by_cores(newname, 2)\n\n def write_runtime(self, t, module_name):\n file_object = open('running time', 'a')\n time = {\"name\": self.path, \"eps\": self.eps, \"tau\": self.tau, \"miu\": self.miu, \"time\": t,\n \"method\": module_name}\n file_object.write(json.dumps(time))\n file_object.write(\"\\n\")\n file_object.close()\n\n\nif __name__ == '__main__':\n # get the dataset\n filename = \"temporal_df\"\n # convert the temporal dataset separated by \\t to a networkx. calculate the degree of nodes and find each\n # node's adjacency\n # !call the instructor of the class to initiate members\n # !in this method, these members will be initiated: rank; adj\n # !rank: list of nodes sorted based on degree\n # !adj: dict {node: [list of neighbors sorted based on degree].. }\n G = tGraph(filename)\n #fig, axs = plt.subplots(1, 1, sharey=True, tight_layout=True)\n # We can set the number of bins with the `bins` kwarg\n #degree_list = []\n \"\"\"for node, deg in G.adj.items():\n degree_list.append(len(deg))\n axs.hist(degree_list, bins=10)\"\"\"\n #plt.show()\n\n # set parameters\n G.eps = 0.5\n G.tau = 2\n G.miu = 1\n\n #print(filename, G.eps, G.tau, G.miu)\n\n #G.run(filename)\n G.cluster(filename)\n\n #G.nodes_distribution_by_year()\n #G.degree_distribution_of_nodes_detemporal()\n #G.degree_distribution_of_nodes()\n\n #G.evauluation(filename, \"0.5-3-3500\")\n # G.evaluaition_by_year(filename, \"0.5-3-5\")\n\n #G.analyse()\n" }, { "alpha_fraction": 0.5793103575706482, "alphanum_fraction": 0.5793103575706482, "avg_line_length": 13.5, "blob_id": "0e9478fc6bf94158e167155910532c0cdbd532a2", "content_id": "1f86328c10ae851f07b2339122a0d11951ee7a1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 145, "license_type": "no_license", "max_line_length": 43, "num_lines": 10, "path": "/dbpl/create_temporal_graph.py", "repo_name": "pooneh-nb/temporal_clustering", "src_encoding": "UTF-8", "text": "import xml.dom.minidom\n\n\ndef main():\n doc = xml.dom.minidom.parse(\"dblp.xml\")\n print(doc.nodeName)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6048780679702759, "alphanum_fraction": 0.6162601709365845, "avg_line_length": 33.11111068725586, "blob_id": "7e824f550a9b150b40f3548319b35b5f0fce3f84", "content_id": "f229f75eafeddf271b445f01fa10f9ca437e5470", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 615, "license_type": "no_license", "max_line_length": 66, "num_lines": 18, "path": "/create_df.py", "repo_name": "pooneh-nb/temporal_clustering", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport csv\nfrom random import randint\n#df = pd.read_csv(\"chess_year.csv\", sep=' ')\n\nkeyDict = {\"u\", \"v\", \"t\", \"w\"}\ntemporal_dict = dict([(key, []) for key in keyDict])\n\nwith open('chess_year', newline='') as games:\n game_reader = csv.reader(games, delimiter='\\t')\n for game in game_reader:\n temporal_dict['u'].append(game[0])\n temporal_dict['v'].append(game[1])\n temporal_dict['t'].append(game[2])\n temporal_dict['w'].append(randint(0,500))\n\ndf = pd.DataFrame(data=temporal_dict, columns=['u', 'v', 't','w'])\ndf.to_csv('weighted_chess', sep='\\t', index=False)\n\n" }, { "alpha_fraction": 0.506147563457489, "alphanum_fraction": 0.5404713153839111, "avg_line_length": 29.046154022216797, "blob_id": "0fea6763189e97e4ff964e41b432325afee6b226", "content_id": "6cf9762aa19042ff1dca8900fbbe9c121d9ecb54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1952, "license_type": "no_license", "max_line_length": 92, "num_lines": 65, "path": "/test_graph.py", "repo_name": "pooneh-nb/temporal_clustering", "src_encoding": "UTF-8", "text": "import operator\n\nimport networkx as nx\nimport json\nfrom statistics import mean\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\n\ndef create_graph(path):\n G = nx.Graph()\n for line in open(path):\n if line.find('#') and line.find('%') < 0 and line != '\\n':\n line = line.strip('\\n').split('\\t')\n if int(line[0]) == int(line[1]):\n continue\n\n if G.has_edge(int(line[0]), int(line[1])):\n if int(line[2]) not in G.edges[int(line[0]), int(line[1])]['t']:\n G.edges[int(line[0]), int(line[1])]['t'].append(int(line[2]))\n G.edges[int(line[0]), int(line[1])]['w'].append(int(line[3]))\n # print(int(line[0]), int(line[1]) ,G.edges[int(line[0]), int(line[1])])\n else:\n G.add_edge(int(line[0]), int(line[1]), t=[int(line[2])], w=[int(line[3])])\n return G\n\ndef masure_avg_weight():\n path = \"temporal_df_2016\"\n FG = create_graph(path)\n #FG = nx.Graph()\n #FG.add_weighted_edges_from([(1, 2, 0.125), (1, 3, 0.75), (2, 4, 1.2), (3, 4, 0.375)])\n degree_sequence = sorted([d for n, d in FG.degree()], reverse=True)\n dmax = max(degree_sequence)\n print(dmax)\n\n wt = {}\n for n, nbrs in FG.adj.items():\n wt[n] = 0\n for nbr, eattr in nbrs.items():\n wt[n] = wt[n] + round(mean(eattr['w']), 2)\n\n weight_max = max(wt.items(), key=operator.itemgetter(1))[0]\n print(wt[weight_max])\n\n #with open('weights.json', 'wt') as fp:\n #json.dump(wt, fp, indent=4)\n\n\ndef weight_histogram():\n with open('weights.json', 'rt') as ww:\n weights = json.load(ww)\n weight_data = []\n for key, weight in weights.items():\n if 5000<weight < 100000:\n weight_data.append(round(weight,2))\n print(len(weight_data))\n\n fig, axs = plt.subplots(1, 1)\n axs.hist(weight_data, bins=10)\n plt.show()\n\n\n\n#masure_avg_weight()\n#weight_histogram()" } ]
5
brianjohnhaas/indrops
https://github.com/brianjohnhaas/indrops
2fe2b105034e09c8d72a22e2a309b23c687a59f9
9554c9a385aa63fd8a793adc1d6dc00648c821f0
1592f0afed48f0ea1fb088bd271d89155a0df28d
refs/heads/master
2021-01-19T23:48:20.618178
2019-09-26T14:32:32
2019-09-26T14:32:32
89,029,845
4
6
null
2017-04-21T22:58:02
2017-02-27T16:56:13
2017-03-07T22:49:52
null
[ { "alpha_fraction": 0.6507353186607361, "alphanum_fraction": 0.654411792755127, "avg_line_length": 21.66666603088379, "blob_id": "6f343e8a8def282a9b299ba4830b9c93f2bde005", "content_id": "54fc3bc662c1ce42f4ee4f2405330e3da78abc52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 544, "license_type": "no_license", "max_line_length": 55, "num_lines": 24, "path": "/count_barcode_distribution.py", "repo_name": "brianjohnhaas/indrops", "src_encoding": "UTF-8", "text": "import re\nfrom collections import defaultdict\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\nfrom indrops import from_fastq, to_fastq\n\ndef count():\n\n barcode_read_counter = defaultdict(int)\n\n for name, seq, qual in from_fastq(sys.stdin):\n split_name = name.split(':')\n cell_name = split_name[0]\n\n barcode_read_counter[cell_name] += 1\n sys.stdout.write(to_fastq(name, seq, qual))\n pickle.dump(dict(barcode_read_counter), sys.stderr)\n\nif __name__==\"__main__\":\n import sys, argparse\n count()\n" }, { "alpha_fraction": 0.6684255003929138, "alphanum_fraction": 0.7262681126594543, "avg_line_length": 47.64406967163086, "blob_id": "16cb523de4a408a5f0496ce209085ccb802a9aa2", "content_id": "50d775972e15a87f8bc524bd644e909fba610ed6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 20089, "license_type": "no_license", "max_line_length": 343, "num_lines": 413, "path": "/Leigh_et_al_2018_Supplementary_R_code/blastema_URD_analysis.R", "repo_name": "brianjohnhaas/indrops", "src_encoding": "UTF-8", "text": "#Pseudotime analysis of blastema-resident cells\n\n#load required packages\nlibrary(URD)\nlibrary(Seurat)\nlibrary(dplyr)\n#load medium-bud data\nload('medium_bud_blastema.RData')\n\n#pull out all non-immune blastema cells\ndpa23_FDB <- SubsetData(combined.merged, ident.use=c('5','6','10', '13', '14','15', '16','17'), subset.name = 'day', accept.value = 'd1')\n#remove medium-bud Seurat object\nrm(combined.merged)\n\n#add time point metadata to Seurat object\[email protected]$time <- \"medium-bud\"\n\n#load in early-bud data\nload('early_bud_blastema.Rdata')\n\n#pull out all non-immune blastema cells\ndpa14_blastema <- SubsetData(merged, ident.use= c('3', '4', '5','6','11'))\n#add time point metadata to Seurat object\[email protected]$time <- \"early-bud\"\n\n#remove excess data\nrm(seurat_14dpa, percent.mito.14dpa, mito.genes.14dpa, merged)\n\n#load in wound healing data\nload('wound_healing.Rdata')\n#pull out all non-immune blastema cells\ndpa3_blastema <- SubsetData(merged, ident.use = c('2','21','24','25'))\n\n#add time point metadata to Seurat object\[email protected]$time <- 'wound_healing'\n\n#remove excess data\nrm(seurat_3dpa, mito.genes.3dpa, percent.mito.3dpa, node.scores, merged, all.markers, nodes.merge, nodes.to.merge, n)\n\n#combine above Seurat object so we can make a raw data file and metadata file with only the cells we want to put into URD\ndpa14.23 <- MergeSeurat(dpa14_blastema, dpa23_FDB)\ncombined <- MergeSeurat(dpa14.23,dpa3_blastema)\n\n#pull out raw data\nraw.data <- as.matrix([email protected])\n\n#pull out metadata\nmeta.data <- as.matrix([email protected])\n\n#remove excess Seurat objects\nrm(dpa14.23, dpa3_blastema, dpa14_blastema, dpa23_FDB)\n\n#create URD object with all of the cells we selected above\nblastema <- createURD(count.data = raw.data, meta = meta.data, min.cells=3, min.counts=3)\n\n#now remove raw data and Seurat objects\nrm(raw.data, meta.data, combined)\n\n#add time point info to URD stage slot\[email protected]$stage <- as.character(blastema@meta[rownames([email protected]),\"time\"])\n\n#find variable genes for each time point\nstages <- sort(unique([email protected]$stage))\nvar.3dpa <- findVariableGenes(blastema, cells.fit=cellsInCluster(blastema, \"stage\", 'wound_healing'), set.object.var.genes=F, diffCV.cutoff=0.3, mean.min=.005, mean.max=100, do.plot = T)\nvar.14dpa <- findVariableGenes(blastema, cells.fit=cellsInCluster(blastema, \"stage\", 'early-bud'), set.object.var.genes=F, diffCV.cutoff=0.3, mean.min=.005, mean.max=100, do.plot = T)\nvar.23dpa <- findVariableGenes(blastema, cells.fit=cellsInCluster(blastema, \"stage\", 'medium-bud'), set.object.var.genes=F, diffCV.cutoff=0.3, mean.min=.005, mean.max=100, do.plot = T)\n\n#combine into one \nvar.genes <- sort(unique(unlist(var.3dpa, var.14dpa, var.23dpa)))\n\n#add to URD object\[email protected] <- var.genes\n\n#calculate PCA\nblastema <- calcPCA(blastema, mp.factor = 2)\n\npcSDPlot(blastema)\n\n# Calculate tSNE\nset.seed(19)\nblastema <- calcTsne(object = blastema)\n\n#visualize tSNE by time point\nplotDim(blastema, \"time\", plot.title = \"tSNE: DPA\")\n\n#create an URD object with only medium-bud cells\nblastema.23dpa <- urdSubset(blastema, cells.keep=cellsInCluster(blastema, \"stage\", \"medium-bud\"))\n\n#use variable genes that were calculated only for medium-bud\[email protected] <- var.23dpa\n\n#calculate PCA and tSNE\nblastema.23dpa <- calcPCA(blastema.23dpa, mp.factor = 1.5)\npcSDPlot(blastema.23dpa)\nset.seed(20)\nblastema.23dpa <- calcTsne(blastema.23dpa)\n\n#perform graph based clustering\nblastema.23dpa <- graphClustering(blastema.23dpa, num.nn = c(30, 40, 50, 70), method = 'Louvain', do.jaccard=T)\nblastema.23dpa <- graphClustering(blastema.23dpa, num.nn = c(50, 60, 70, 80, 100), method = 'Infomap', do.jaccard=T)\nclusterings <- c(paste0('Louvain-',c(30, 40, 50, 70)), paste0('Infomap-', c(50, 60, 70, 80, 100)))\nfor (c in clusterings) {plot(plotDim(blastema.23dpa, c, legend=F))}\n\n#we chose to go forward with Infomap-100\nclusters <- unique([email protected]$'Infomap-100')\n\n#find markers for these populations to get an idea of what they are\npr.markers <- lapply(clusters, function(c) markersAUCPR(blastema.23dpa, clust.1 = c, clustering = 'Infomap-100', genes.use= [email protected]))\n\n#you can export each table like below, or if R studio look at top markers in viewer\n#note that cluster number and number in pr.markers[[]] are not necessarily equal!\n#cluster number can be found in headings of the marker lists\nwrite.table(pr.markers[[1]], 'clus1.markers.txt', sep = '\\t')\n\n#looking at cluster genes to see if any cells we don't want to include may have made it through prior clustering\nhead(pr.markers[[3]], 20)\n\n#we will not use clusters 7 (Myeloid cells), 10 (Wound epidermis), 13 (Erythrocytes) so these aren't included in trajectory analysis\n#get cell names for the cells we will use in URD\ndpa23_good_cells <- cellsInCluster(blastema.23dpa, clustering = 'Infomap-100', cluster = c('1','2','3','4','5','6','8','9','11','12'))\n\n#let's clean up the other two time points since it's clear that WE, erythrocytes, etc. may be contaminating the clusters\n#create URD object with just early-bud sample\nblastema.14dpa <- urdSubset(blastema, cells.keep=cellsInCluster(blastema, \"stage\", \"early-bud\"))\n\n#use variable genes found for early-bud\[email protected] <- var.14dpa\n\n#calculate PCA and tSNE\nblastema.14dpa <- calcPCA(blastema.14dpa, mp.factor = 1.5)\npcSDPlot(blastema.14dpa)\nset.seed(20)\nblastema.14dpa <- calcTsne(blastema.14dpa)\n\n#perform graph-based clustering\nblastema.14dpa <- graphClustering(blastema.14dpa, num.nn = c(30, 40, 50, 70), method = 'Louvain', do.jaccard=T)\nblastema.14dpa <- graphClustering(blastema.14dpa, num.nn = c(50, 60, 70, 80, 100), method = 'Infomap', do.jaccard=T)\nclusterings <- c(paste0('Louvain-',c(30, 40, 50, 70)), paste0('Infomap-', c(50, 60, 70, 80, 100)))\nfor (c in clusterings) {plot(plotDim(blastema.14dpa, c, legend=F))}\n\n#iwe chose to go forward with Infomap-50 based clustering\nclusters <- unique([email protected]$'Infomap-50')\n\n#calculate marker genes\npr.markers_14dpa <- lapply(clusters, function(c) markersAUCPR(blastema.14dpa, clust.1 = c, clustering = 'Infomap-50', genes.use= [email protected]))\n\n#inspect genes that define subsets, remembering that cluster number is found in column names\n#either or export to table or inspect in R/Rstudio\nwrite.table(pr.markers_14dpa[[1]], '081218.clus1.txt', sep = '\\t')\n#looking at cluster genes\nhead(pr.markers_14dpa[[10]])\n\n#need to remove clusters 1 (WE), 7 (WE), 11(Erythrocytes), 13(Immune) which would be 217 cells that likely wont participate in this\n#get names of cells we will use in URD\ndpa14_good_cells <- cellsInCluster(blastema.14dpa, clustering = 'Infomap-50', cluster = c('2','3','4','5','6','8','9','10','12','14','15','16')) \n\n#finally, let's clean the wound healing cells\n#create a subsetted object of cells from wound healing\nblastema.3dpa <- urdSubset(blastema, cells.keep=cellsInCluster(blastema, \"stage\", \"wound_healing\"))\n\n#use wound healing variable genes\[email protected] <- var.14dpa\n\n#calculate PCA and tSNE\nblastema.3dpa <- calcPCA(blastema.3dpa, mp.factor = 1.5)\n\npcSDPlot(blastema.3dpa)\nset.seed(20)\nblastema.3dpa <- calcTsne(blastema.3dpa)\n\n#graph-based clustering\nblastema.3dpa <- graphClustering(blastema.3dpa, num.nn = c(30, 40, 50, 70), method = 'Louvain', do.jaccard=T)\nblastema.3dpa <- graphClustering(blastema.3dpa, num.nn = c(50, 60, 70, 80, 100), method = 'Infomap', do.jaccard=T)\nclusterings <- c(paste0('Louvain-',c(30, 40, 50, 70)), paste0('Infomap-', c(50, 60, 70, 80, 100)))\nfor (c in clusterings) {plot(plotDim(blastema.3dpa, c, legend=F))}\n\n\n#we chose to go forward with Infomap-60\nclusters <- unique([email protected]$'Infomap-60')\n\n#find marker genes\npr.markers_3dpa <- lapply(clusters, function(c) markersAUCPR(blastema.3dpa, clust.1 = c, clustering = 'Infomap-60', genes.use= [email protected]))\n\n#inspect marker genes to determine what to remove\nhead(pr.markers_3dpa[[8]])\n\n#we will remove 3 (WE) and 12 (erythrocytes)\ndpa3_good_cells <- cellsInCluster(blastema.3dpa, clustering = 'Infomap-60', cluster = c('1','2','4','5','6','7','8','9','10','11','13'))\n\n#collect all cells to use in URD\npaste(dpa23_good_cells, dpa14_good_cells) -> cells_to_use\npaste(cells_to_use, dpa3_good_cells) -> cells_for_urd\n\n\n#make an URD object with the cells we want to use\ncleaned <- urdSubset(blastema, cells.keep= c(dpa3_good_cells, dpa23_good_cells, dpa14_good_cells))\n\n#re-calculate variable genes for these\nstages <- sort(unique([email protected]$stage))\nvar.3dpa <- findVariableGenes(cleaned, cells.fit=cellsInCluster(cleaned, \"stage\", 'wound_healing'), set.object.var.genes=F, diffCV.cutoff=0.3, mean.min=.005, mean.max=100, do.plot = T)\nvar.14dpa <- findVariableGenes(cleaned, cells.fit=cellsInCluster(cleaned, \"stage\", 'early-bud'), set.object.var.genes=F, diffCV.cutoff=0.3, mean.min=.005, mean.max=100, do.plot = T)\nvar.23dpa <- findVariableGenes(cleaned, cells.fit=cellsInCluster(cleaned, \"stage\", 'medium-bud'), set.object.var.genes=F, diffCV.cutoff=0.3, mean.min=.005, mean.max=100, do.plot = T)\nvar.genes <- sort(unique(unlist(var.3dpa, var.14dpa, var.23dpa)))\[email protected] <- var.genes\n\n#calculate PCA\ncleaned <- calcPCA(cleaned, mp.factor = 2)\npcSDPlot(cleaned)\n\n#calculate tSNE, see Figure 7a\nset.seed(19)\ncleaned <- calcTsne(object = cleaned)\nplotDim(cleaned, \"time\", plot.title = \"tSNE by time point\", legend = F, point.size = 2)\n\n#calculate diffision map, allowing destinty to determine sigma (this value was determined to be 19.538, which we used)\ncleaned <- calcDM(cleaned, knn = 54, sigma = 19.538)\n\n#visualize dim arrays\nplotDimArray(cleaned, reduction.use = \"dm\", dims.to.plot = 1:16, outer.title = \"Diffusion Map (Sigma 19.538, 54 NNs): DPA\", label=\"stage\", plot.title=\"\", legend=F)\n\n#tsne with transitions\nplotDim(cleaned, \"time\", transitions.plot = 10000, plot.title=\"DPA (with transitions)\")\n\n#use cells from wound healing as root\nroot.cells <- cellsInCluster(cleaned, \"stage\", \"wound_healing\")\n\n#run 'flood' simulations\ncleaned.floods <- floodPseudotime(cleaned, root.cells = root.cells, n=50, minimum.cells.flooded = 2, verbose=F)\n\n#process simulations into a pseudotime\ncleaned <- floodPseudotimeProcess(cleaned, cleaned.floods, floods.name=\"pseudotime\")\n\n#check for adequate number of simulations (should reach asymptote)\npseudotimePlotStabilityOverall(cleaned)\n\n#visualize tSNE with pseudotime overlaid\nplotDim(cleaned, \"pseudotime\")\n\n#plot pseudtime at each time point, Figure 7b\nplotDists(cleaned, \"pseudotime\", \"time\", plot.title=\"Pseudotime by time point\", legend = F)\n\n#create URD object with just medium-bud cells\ncleaned.23dpa <- urdSubset(cleaned, cells.keep=cellsInCluster(cleaned, \"stage\", \"medium-bud\"))\n\n#use medium-bud variable genes\[email protected] <- var.23dpa\n\n# Calculate PCA and tSNE\ncleaned.23dpa <- calcPCA(cleaned.23dpa, mp.factor = 1.5)\npcSDPlot(cleaned.23dpa)\nset.seed(20)\ncleaned.23dpa <- calcTsne(cleaned.23dpa)\n\n#perform graph-based clustering\ncleaned.23dpa <- graphClustering(cleaned.23dpa, num.nn = c(30, 40, 50, 70), method = 'Louvain', do.jaccard=T)\ncleaned.23dpa <- graphClustering(cleaned.23dpa, num.nn = c(50, 60, 70, 80, 100), method = 'Infomap', do.jaccard=T)\nclusterings <- c(paste0('Louvain-',c(30, 40, 50, 70)), paste0('Infomap-', c(50, 60, 70, 80, 100)))\nfor (c in clusterings) {plot(plotDim(cleaned.23dpa, c, legend=T))}\n\n#tSNE plot Figure 7c\nplotDim(cleaned.23dpa, 'Infomap-100', plot.title = \"Medium-bud blastema populations\", legend = F, point.size = 2)\n\n#we chose to go forward with Infomap-100\nclusters <- unique([email protected]$'Infomap-100')\n\n#find marker genes, see Supplementary Data files for marker lists\npr.markers_23dpa_cleaned <- lapply(clusters, function(c) markersAUCPR(cleaned.23dpa, clust.1 = c, clustering = 'Infomap-100', genes.use= [email protected]))\n#inspect markers in R\nhead(pr.markers_23dpa_cleaned[[3]], 20)\n\n#distal blastema population has some WE markers, let's remove WE cells\nplotDot(cleaned.23dpa, genes = c('c1069858_g1_i1^sp|Q90X25|HXA13_CHICK^HoxA13_N','c1070920_g1_i4^sp|Q2VL56|PAX9_SAGOE^PAX^Tm2','c1020768_g1_i2^sp|Q9H2S6|TNMD_HUMAN^BRICHOS^Tm1','c1083312_g1_i2^sp|P70390|SHOX2_MOUSE','c1081900_g1_i4^sp|P25815|S100P_HUMAN','c1091168_g1_i2^sp|Q66S13|NATT4_THANI^DUF946^sigP'), clustering = 'Infomap-100')\ndistal.score <- apply([email protected][c('c1069858_g1_i1^sp|Q90X25|HXA13_CHICK^HoxA13_N','c1070920_g1_i4^sp|Q2VL56|PAX9_SAGOE^PAX^Tm2','c1020768_g1_i2^sp|Q9H2S6|TNMD_HUMAN^BRICHOS^Tm1','c1083312_g1_i2^sp|P70390|SHOX2_MOUSE'), cellsInCluster(cleaned.23dpa, 'Infomap-100', '1')], 2, sum.of.logs)\nnew.distal <- names(which(distal.score > 0))\nremove.distal <- names(which(distal.score <= 0))\n\n#add names for all pops\ni100.n <- length(unique([email protected]$'Infomap-100'))\ni100.cluster.assignments <- data.frame(clusters = 1:i100.n, name = rep(NA, i100.n), tip = rep(NA, i100.n), row.names = 1:i100.n)\ni100.cluster.assignments['1', 'name'] <- \"Distal Blastema\"\ni100.cluster.assignments['2', 'name'] <- \"FAPs\"\ni100.cluster.assignments['3', 'name'] <- \"Synovial Fibroblasts\"\ni100.cluster.assignments['4', 'name'] <- \"Cartilage\"\ni100.cluster.assignments['5', 'name'] <- \"Osteoblast-like\"\ni100.cluster.assignments['6', 'name'] <- \"Joint\"\ni100.cluster.assignments['7', 'name'] <- \"Schwann\" \ni100.cluster.assignments['8', 'name'] <- \"Endothelial\"\ni100.cluster.assignments['9', 'name'] <- \"Myogenic\"\ni100.cluster.assignments['10', 'name'] <- \"Pericytes\" \n\n\ncluster.assignments <- i100.cluster.assignments\n\ncluster.assignments <- cluster.assignments[!is.na(cluster.assignments$name), ]\ncluster.assignments$cluster.new <- 1:nrow(cluster.assignments)\n\n\[email protected]$'23dpa-Infomap-100' <- NA\[email protected][rownames([email protected]), '23dpa-Infomap-100'] <- [email protected]$'Infomap-100'\[email protected]$'23dpa-Cluster' <- NA\[email protected]$clusters.23dpa.name <- NA\[email protected]$clusters.23dpa.num <- NA\n\n#so now for cluster 1 I'll just give it the cleaned new.distal \[email protected][new.distal, \"clusters.23dpa.name\"] <- cluster.assignments[1, \"name\"] \[email protected][new.distal, \"clusters.23dpa.num\"] <- as.character(cluster.assignments[1, \"cluster.new\"])\n\ncells_2 <- cellsInCluster(cleaned.23dpa, clustering = 'Infomap-100', cluster = '2') \[email protected][cells_2, \"clusters.23dpa.name\"] <- cluster.assignments[2, \"name\"] \[email protected][cells_2, \"clusters.23dpa.num\"] <- as.character(cluster.assignments[2, \"cluster.new\"])\n\ncells_3 <- cellsInCluster(cleaned.23dpa, clustering = 'Infomap-100', cluster = '3') \[email protected][cells_3, \"clusters.23dpa.name\"] <- cluster.assignments[3, \"name\"] \[email protected][cells_3, \"clusters.23dpa.num\"] <- as.character(cluster.assignments[3, \"cluster.new\"])\n\ncells_4 <- cellsInCluster(cleaned.23dpa, clustering = 'Infomap-100', cluster = '4') \[email protected][cells_4, \"clusters.23dpa.name\"] <- cluster.assignments[4, \"name\"] \[email protected][cells_4, \"clusters.23dpa.num\"] <- as.character(cluster.assignments[4, \"cluster.new\"])\n\ncells_5 <- cellsInCluster(cleaned.23dpa, clustering = 'Infomap-100', cluster = '5') \[email protected][cells_5, \"clusters.23dpa.name\"] <- cluster.assignments[5, \"name\"] \[email protected][cells_5, \"clusters.23dpa.num\"] <- as.character(cluster.assignments[5, \"cluster.new\"])\n\ncells_6 <- cellsInCluster(cleaned.23dpa, clustering = 'Infomap-100', cluster = '6') \[email protected][cells_6, \"clusters.23dpa.name\"] <- cluster.assignments[6, \"name\"] \[email protected][cells_6, \"clusters.23dpa.num\"] <- as.character(cluster.assignments[6, \"cluster.new\"])\n\ncells_7 <- cellsInCluster(cleaned.23dpa, clustering = 'Infomap-100', cluster = '7') \[email protected][cells_7, \"clusters.23dpa.name\"] <- cluster.assignments[7, \"name\"] \[email protected][cells_7, \"clusters.23dpa.num\"] <- as.character(cluster.assignments[7, \"cluster.new\"])\n\ncells_8 <- cellsInCluster(cleaned.23dpa, clustering = 'Infomap-100', cluster = '8') \[email protected][cells_8, \"clusters.23dpa.name\"] <- cluster.assignments[8, \"name\"] \[email protected][cells_8, \"clusters.23dpa.num\"] <- as.character(cluster.assignments[8, \"cluster.new\"])\n\ncells_9 <- cellsInCluster(cleaned.23dpa, clustering = 'Infomap-100', cluster = '9') \[email protected][cells_9, \"clusters.23dpa.name\"] <- cluster.assignments[9, \"name\"] \[email protected][cells_9, \"clusters.23dpa.num\"] <- as.character(cluster.assignments[9, \"cluster.new\"])\n\ncells_10 <- cellsInCluster(cleaned.23dpa, clustering = 'Infomap-100', cluster = '10') \[email protected][cells_10, \"clusters.23dpa.name\"] <- cluster.assignments[10, \"name\"] \[email protected][cells_10, \"clusters.23dpa.num\"] <- as.character(cluster.assignments[10, \"cluster.new\"])\n\n\[email protected]$'23dpa-Infomap-100' <- NA\[email protected][rownames([email protected]), '23dpa-Infomap-100'] <- [email protected]$'Infomap-100'\[email protected][rownames([email protected]), '23dpa-Cluster'] <- [email protected]$clusters.23dpa.name\[email protected]$'23dpa-Cluster-Num'<- NA\[email protected][rownames([email protected]), '23dpa-Cluster-Num'] <- [email protected]$clusters.23dpa.num\n\n\[email protected][rownames([email protected]), \"tip.clusters\"] <- [email protected]$clusters.23dpa.num\n\n#determine potential for terminal populations\npotential <- clusterTipPotential(cleaned, 'pseudotime', 'tip.clusters', name.store = 'tip.potential')\npotential \n\n#tips = 3, 4, 5, 6, 7, 8, 9, 10, so everything but \"distal blastema\" and \"FAPs\"\n\nonly.tips <- cellsInCluster(cleaned, clustering= '23dpa-Cluster-Num', cluster = c('3','4','5','6','7','8','9','10'))\ntips <- urdSubset(cleaned, cells.keep= only.tips)\[email protected][rownames([email protected]), \"real.tip.clusters\"] <- [email protected]$'23dpa-Cluster-Num'\n\n#determine parameters of the logistic used to bias the transition probabilities\ncleaned.ptlogistic <- pseudotimeDetermineLogistic(cleaned, \"pseudotime\", optimal.cells.forward=20, max.cells.back=40, do.plot = T)\n\n#bias the transition matrix acording to pseudotime\ncleaned.biased.tm <- as.matrix(pseudotimeWeightTransitionMatrix(cleaned, \"pseudotime\", logistic.params=cleaned.ptlogistic))\n\n#simulate the biased random walks from each tip\ncleaned.walks <- simulateRandomWalksFromTips(cleaned, tip.group.id=\"real.tip.clusters\", root.cells=root.cells, transition.matrix = cleaned.biased.tm, n.per.tip = 25000, root.visits = 1, max.steps = 5000, verbose = F)\n\n#process the biased random walks into visitation frequencies\ncleaned <- processRandomWalksFromTips(cleaned, cleaned.walks, verbose = F)\n\n#color only tip clusters on tSNE\nplotDim(cleaned, \"real.tip.clusters\", plot.title=\"Cells in each tip\")\n\n#load tip clusters into tree\ncleaned.tree <- loadTipCells(cleaned, \"real.tip.clusters\")\n\n#build tree\ncleaned.tree <- buildTree(cleaned.tree, pseudotime = \"pseudotime\", tips.use=c('3','4','5','6','7','8','9','10'), divergence.method = \"preference\", cells.per.pseudotime.bin = 25, bins.per.pseudotime.window = 8, save.all.breakpoint.info = T, p.thresh=0.001)\n\n#rename clusters\ncleaned.tree <- nameSegments(cleaned.tree, segments=c('3','4','5','6','7','8','9','10'), segment.names = c(\"Synovial Fibros\", \"Cartilage\", \"Osteoblast-like\", \"Joint\",\"Schwann\", \"Endothelial\", \"Myogenic\", \"Pericyte\"), short.names = c(\"Synovial Fibros\", \"Cartilage\", \"Osteoblast-like\", \"Joint\",\"Schwann\", \"Endothelial\", \"Myogenic\", \"Pericyte\"))\n\n#plot tree with time point info overlaid\nplotTree(cleaned.tree, \"stage\", title=\"DPA\")\n\n#plot tree with medium-bud blastema colors overlaid\nplotTree(cleaned.tree, 'tip.clusters', title = '23dpa_clusters', cell.alpha = 0.5, cell.size = 2, tree.alpha = 0.5, tree.size = .25)\n\njoint.markers <- aucprTestAlongTree(cleaned.tree, pseudotime=\"pseudotime\", tips='Joint', log.effect.size=0.4, auc.factor = 1.25, max.auc.threshold = 0.85, frac.must.express = 0.1, frac.min.diff = 0, genes.use=genes.use, only.return.global=F, must.beat.sibs=0.6, report.debug=T)\n\nsynovial.markers <- aucprTestAlongTree(cleaned.tree, pseudotime=\"pseudotime\", tips='Synovial Fibros', log.effect.size=0.4, auc.factor = 1.25, max.auc.threshold = 0.85, frac.must.express = 0.1, frac.min.diff = 0, genes.use=genes.use, only.return.global=F, must.beat.sibs=0.6, report.debug=T)\n\n#visuzalize population specific markers\n#osteoblast-like\n\np1<- plotTree(cleaned.tree, 'c862122_g2_i1^sp|Q6DJ00|OSTCN_XENTR^sp|P40147|OSTCN_XENLA^Gla^sigP', title = 'OSTCN')\np2 <- plotTree(cleaned.tree, 'c1034953_g1_i2^sp|A1YQ92|ODAM_MACMU', title = 'ODAM')\n\n#example of graph in Supplementary Fig7\nplot_grid(p1,p2)\n\n#can continue to overlay markers like shown above\n\n#save\nsave.image('blastema.URD.RData')\n\n#quit\nq()" }, { "alpha_fraction": 0.6266810297966003, "alphanum_fraction": 0.7287520170211792, "avg_line_length": 63.11206817626953, "blob_id": "b8f79b8d0a36370d33c6e2f1e613102b9292d783", "content_id": "dd3de74f0fd22e119d7f07941d1413ac1952bb07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 7436, "license_type": "no_license", "max_line_length": 758, "num_lines": 116, "path": "/Leigh_et_al_2018_Supplementary_R_code/Medium_bud_blastema.R", "repo_name": "brianjohnhaas/indrops", "src_encoding": "UTF-8", "text": "#Medium-bud stage blastema samples\n#This section analyses six samples collected during medium bud stage (23 days post amputation) \n\n#First, load the required packages. \nlibrary(Seurat)\nlibrary(dplyr)\n\n#load in first three medium-bud samples\ndata.d1 = read.table(\"early_and_medium_bud.repGene\", header=T, row.names=1, sep='\\t')\n#sample S1, S2, and S4 are medium-bud samples collected on day 1\nS1_S2_S4 = data.d1[,grep(\"^S[124]_\",colnames(data.d1))]\n\n#create Seurat object and make sparse\nseurat_S1_S2_S4 = CreateSeuratObject(raw.data = S1_S2_S4, project = \"23dpa_d1\", min.cells = 8, min.genes = 200)\nseurat_S1_S2_S4 <- MakeSparse(seurat_S1_S2_S4) \n\n#list mito genes medium-bud day 1 matrix\nmito.genes.i4.d1 <- c(\"c1084180_g1_i1^sp|Q8LWP6|CYB_RANSI\", \"c1043008_g2_i1^sp|Q9B205|CYB_CAICR\", \"c1084180_g3_i1^sp|Q8LWP6|CYB_RANSI\", \"c786641_g1_i1^sp|Q9B205|CYB_CAICR\", \"c1060846_g1_i1^sp|Q8WA47|CYB_MUSMA\", \"c1057599_g1_i1^sp|P00018|CYC_DRONO^Cytochrome_CBB3\", \"c1127119_g1_i1^sp|P81280|CYC_ALLMI\", \"c220469_g1_i1^sp|P00397|COX1_MOUSE\", \"c1451851_g1_i1^sp|Q9ZXY2|COX1_PAPHA\", \"c1088733_g1_i1^sp|Q9ZZM6|COX1_SALSA\", \"c934922_g1_i1^sp|Q9ZXX8|COX3_PAPHA\", \"c959712_g1_i1^sp|P00416|COX3_MOUSE\", \"c1049442_g1_i1^sp|Q96133|COX3_CARAU\", \"c1083417_g1_i2^sp|P00419|COX3_XENLA\", \"c1027109_g1_i1^sp|Q35920|ATP6_SALSA\", \"c1083535_g6_i1^sp|Q4JQI7|NU1M_TETNG\", \"c1060846_g2_i1^sp|P03921|NU5M_MOUSE\", \"c1068681_g4_i1^sp|Q9ZZM3|NU5M_SALSA^sp|P82013|VDAC2_MELGA^Porin_3\")\n\n#calculate the percentage mitochondrial RNA for each cell\npercent.mito.i4.d1 <- Matrix::colSums([email protected][mito.genes.i4.d1, ])/Matrix::colSums([email protected])\n#add the percent mitochondrial content of each cell to the Seurat object\nseurat_S1_S2_S4 <- AddMetaData(object = seurat_S1_S2_S4, metadata = percent.mito.i4.d1, col.name = \"percent.mito\") \n\n#remove raw data\nrm(data.d1, S1_S2_S4)\n\n#load in second set of three medium-bud samples collected on day 2\ndata.d2 = read.table('wound_healing_and_medium_bud.repGene', header=T, row.names=1, sep='\\t')\n#samples N1, N2, and N3 are medium-bud stage blastema samples collected on day 2\nN1_N2_N3 = data.d2[,grep(\"^N[123]\",colnames(data.d2))]\n\n#create seurat object and make sparse\nseurat_N1_N2_N3 = CreateSeuratObject(raw.data = N1_N2_N3, project = \"23dpa_d2\", min.cells = 8, min.genes = 200)\nseurat_N1_N2_N3 <- MakeSparse(seurat_N1_N2_N3)\n\n#list mito genes from day 2 matrix\nmito.genes.i4.d2 <- c(\"c786641_g1_i1^sp|Q9B205|CYB_CAICR\", \"c1084180_g1_i1^sp|Q8LWP6|CYB_RANSI\", \"c1043008_g2_i1^sp|Q9B205|CYB_CAICR\", \"c1060846_g1_i1^sp|Q8WA47|CYB_MUSMA\", \"c1084180_g3_i1^sp|Q8LWP6|CYB_RANSI\", \"c1027109_g1_i1^sp|Q35920|ATP6_SALSA\", \"c1088733_g1_i1^sp|Q9ZZM6|COX1_SALSA\", \"c220469_g1_i1^sp|P00397|COX1_MOUSE\", \"c1451851_g1_i1^sp|Q9ZXY2|COX1_PAPHA\", \"c289614_g1_i1^sp|P05503|COX1_RAT\", \"c959712_g1_i1^sp|P00416|COX3_MOUSE\", \"c1049442_g1_i1^sp|Q96133|COX3_CARAU\", \"c1083417_g1_i2^sp|P00419|COX3_XENLA\", \"c934922_g1_i1^sp|Q9ZXX8|COX3_PAPHA\", \"c1083535_g6_i1^sp|Q4JQI7|NU1M_TETNG\", \"c1060846_g2_i1^sp|P03921|NU5M_MOUSE\", \"c1068681_g4_i4^sp|Q9ZZM3|NU5M_SALSA\")\n\n#calculate the percentage mitochondrial RNA for each cell\npercent.mito.i4.d2 <- Matrix::colSums([email protected][mito.genes.i4.d2, ])/Matrix::colSums([email protected])\n#add the percent mitochondrial content of each cell to the Seurat object\nseurat_N1_N2_N3 <- AddMetaData(object = seurat_N1_N2_N3, metadata = percent.mito.i4.d2, col.name = \"percent.mito\")\n\n#save each sample info in a metadata sample slot\nseurat_N1_N2_N3 <- StashIdent(seurat_N1_N2_N3, save.name = 'sample')\nseurat_S1_S2_S4 <- StashIdent(seurat_S1_S2_S4, save.name = 'sample')\n\n#custom filters\nseurat_S1_S2_S4 <- FilterCells(object = seurat_S1_S2_S4, subset.names = c(\"nGene\", \"percent.mito\"), low.thresholds = c(850, -Inf), high.thresholds = c(6000, 0.1))\nseurat_N1_N2_N3 <- FilterCells(object = seurat_N1_N2_N3, subset.names = c(\"nGene\", \"percent.mito\"), low.thresholds = c(850, -Inf), high.thresholds = c(6000, 0.1))\n\n#normalize data\nseurat_N1_N2_N3 <- NormalizeData(object = seurat_N1_N2_N3, normalization.method = \"LogNormalize\", scale.factor= 10000)\nseurat_S1_S2_S4 <- NormalizeData(object = seurat_S1_S2_S4, normalization.method = \"LogNormalize\", scale.factor= 10000)\n\n#find variable genes\nseurat_N1_N2_N3 <- FindVariableGenes(object = seurat_N1_N2_N3, mean.function = ExpMean, dispersion.function = LogVMR, x.low.cutoff = 0.0125, x.high.cutoff = 3, y.cutoff = 0.5)\nseurat_S1_S2_S4 <- FindVariableGenes(object = seurat_S1_S2_S4, mean.function = ExpMean, dispersion.function = LogVMR, x.low.cutoff = 0.0125, x.high.cutoff = 3, y.cutoff = 0.5)\n\n#scale data and regress out nUMI and percent.mito\nseurat_N1_N2_N3 <- ScaleData(object = seurat_N1_N2_N3, vars.to.regress = c('nUMI', 'percent.mito'))\nseurat_S1_S2_S4 <- ScaleData(object = seurat_S1_S2_S4, vars.to.regress = c('nUMI', 'percent.mito'))\n\n#add day collected information into a metadata column\[email protected]$day <- \"d1\"\[email protected]$day <- \"d2\"\n\n#find highly variable genes\nhvg.d1 <- rownames(x = head(x = [email protected], n = 2000))\nhvg.d2 <- rownames(x = head(x = [email protected], n = 2000))\nhvg.union <- union(x = hvg.d1, y = hvg.d2)\n\n#run canonical correlation analysis and save resulting matrix as a Seurat object called combined\ncombined <- RunCCA(object = seurat_S1_S2_S4, object2= seurat_N1_N2_N3, genes.use = hvg.union, num.cc = 30)\n\n#determine cca dimensions to use in downstream analysis\nDimHeatmap(object = combined, reduction.type = \"cca\", cells.use = 500, dim.use = 1:9, do.balanced = TRUE)\nDimHeatmap(object = combined, reduction.type = \"cca\", cells.use = 500, dim.use = 10:19, do.balanced = TRUE)\nDimHeatmap(object = combined, reduction.type = \"cca\", cells.use = 500, dim.use = 20:29, do.balanced = TRUE)\n\n#we chose first 25 dimensions\ncombined <- AlignSubspace(combined, reduction.type = \"cca\", grouping.var = \"day\", dims.align = 1:25)\n\n#perform integrated analysis on all cells\ncombined <- RunTSNE(combined, reduction.use = \"cca.aligned\", dims.use = 1:25, do.fast = T)\ncombined <- FindClusters(combined, reduction.type = \"cca.aligned\", resolution = 1, dims.use = 1:25)\n\n# Build a phylogenetic tree to see how cells are related while simultaneously renaming and reordering cluster names according to their #position on the tree. This will be important to determine when deciding whether similar populations should be merged. \ncombined <- BuildClusterTree(combined, do.reorder=TRUE, reorder.numeric=TRUE)\n\n#assess nodes\nnode.scores <- AssessNodes(combined)\nnode.scores[order(node.scores$oobe, decreasing = TRUE), ] -> node.scores\nnode.scores\n\n#merge first 2 nodes\nnodes.merge <- node.scores[1:2, ] \nnodes.to.merge <- sort(x = nodes.merge$node)\ncombined.merged <- combined \nfor (n in nodes.to.merge) {combined.merged <- MergeNode(object = combined.merged, node.use = n) }\n\n#visualize tSNE by sample, day collected, and assigned cluster \nTSNEPlot(combined.merged, do.return = T, pt.size = 0.5, group.by = \"sample\")\nTSNEPlot(combined.merged, do.return = T, pt.size = 0.5, group.by = \"day\")\nTSNEPlot(combined.merged, do.label = T, do.return = T, pt.size = 0.5)\n\n#find DE genes for each population\nall.markers.merged <- FindAllMarkers(object = combined.merged, only.pos = TRUE, min.pct = 0.25, thresh.use = 0.25, logfc.threshold = 0.35, max.cells.per.ident = 2000)\n\n#write DE results to table for inspection\nwrite.table(all.markers.merged, 'all.markers.medium.bud.txt', sep = '\\t') \n\nsave.image('medium_bud_blastema.RData')\n\nq()" }, { "alpha_fraction": 0.7181637287139893, "alphanum_fraction": 0.7626492381095886, "avg_line_length": 57.14876174926758, "blob_id": "d82ec0883be96429f30066027a9c9bb029bb0893", "content_id": "dd990ea58ea5ccb1701c347fa8a093c19e253480", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 7036, "license_type": "no_license", "max_line_length": 554, "num_lines": 121, "path": "/Leigh_et_al_2018_Supplementary_R_code/Homeostatic_limb.R", "repo_name": "brianjohnhaas/indrops", "src_encoding": "UTF-8", "text": "# Homeostatic limb\n#This section analyses the homeostatic limb, which includes two samples. \n\n#First, load the required packages. \nlibrary(Seurat)\nlibrary(dplyr)\n\n\n#Now, load the cell by gene matrix that includes the two homeostatic limb samples. This cell by gene matrix has three other samples, that we #will not use right now. So we need to select out only samples 1 and 2. We will use intact to as shorthand for homeostatic limbs. \n\n#load in data\ninDrops3.data = read.table('intact_and_contralateral.repGene', header = T, row.names = 1, sep = '\\t')\n#pull out samples 1 and 2, which are the intact limb samples\ninDrops3.intact = inDrops3.data[,grep('^S[12]_', colnames(inDrops3.data))]\n\n#Remove data matrix with extra samples\nrm(inDrops3.data) \n#Create Seurat object and make sparse\nseurat_inDrops3.intact = CreateSeuratObject(inDrops3.intact, project = 'inDrops3.intact', min.cells = 8, min.genes = 200)\nseurat_inDrops3.intact = MakeSparse(seurat_inDrops3.intact)\n\n#While we did some filtering above, we need to perform further quality control to ensure that the cells we are working with aren't apoptotic #or have a dearth of genes. First, we need to identify the mitochondrial genes present in this matrix. The axolotl mitochondrial genome can #be found here: https://www.ncbi.nlm.nih.gov/nuccore/AJ584639. Remember that the genes are written as protein names when greping for #mitochondrial genes. \n\n#find mitochonrial genes in matrix. The protein name should be used and changed for each gene within the mitochondrial genome.\ngrep(pattern = \"*CYB_*\", x = rownames(x = seurat_inDrops3.intact@data), value = TRUE)\n#list of all mitochondrial genes in this intact matrix\nmito.genes.intact <- c(\"c1084180_g3_i1^sp|Q8LWP6|CYB_RANSI\", \"c1060846_g1_i1^sp|Q8WA47|CYB_MUSMA\", \"c1084180_g1_i1^sp|Q8LWP6|CYB_RANSI\", \"c1451851_g1_i1^sp|Q9ZXY2|COX1_PAPHA\", \"c220469_g1_i1^sp|P00397|COX1_MOUSE\", \"c1088733_g1_i1^sp|Q9ZZM6|COX1_SALSA\", \"c1083417_g1_i2^sp|P00419|COX3_XENLA\", \"c1049442_g1_i1^sp|Q96133|COX3_CARAU\", \"c934922_g1_i1^sp|Q9ZXX8|COX3_PAPHA\", \"c1083535_g6_i1^sp|Q4JQI7|NU1M_TETNG\", \"c1025234_g1_i1^sp|O63796|NU2M_ANACA\", \"c1068681_g4_i1^sp|Q9ZZM3|NU5M_SALSA^sp|P82013|VDAC2_MELGA^Porin_3\", \"c1027109_g1_i1^sp|Q35920|ATP6_SALSA\")\n\n#calculate the percentage mitochondrial RNA for each cell\npercent.mito.intact <- Matrix::colSums([email protected][mito.genes.intact, ])/Matrix::colSums([email protected])\n#add the percent mitochondrial content of each cell to the Seurat object\nseurat_inDrops3.intact <- AddMetaData(object = seurat_inDrops3.intact, metadata = percent.mito.intact, col.name = \"percent.mito\")\n\n#Now perform quality control on matrix by filtering out cells with high percent mitochondrial RNA and low and high number of genes. We filter #out cells that by visualize inspection appear to have relatively high mitochondrial RNA content or high or low number of genes. These #numbers can be modified to be more or less inclusive. \n\n#visualize number of genes, unique molecular identifiers (UMI), and percent mitochondrial RNA\nVlnPlot(object = seurat_inDrops3.intact, features.plot = c(\"nGene\", \"nUMI\", \"percent.mito\"), nCol = 3)\n\n#filter out cells\nseurat_inDrops3.intact <- FilterCells(object = seurat_inDrops3.intact, subset.names = c(\"nGene\", \"percent.mito\"), low.thresholds = c(850, -Inf), high.thresholds = c(4000, 0.125))\n\n#normalize data\nseurat_inDrops3.intact <- NormalizeData(seurat_inDrops3.intact, normalization.method= \"LogNormalize\", scale.factor= 10000)\n\n#find variable genes\nseurat_inDrops3.intact <- FindVariableGenes(object = seurat_inDrops3.intact, mean.function = ExpMean, dispersion.function = LogVMR, x.low.cutoff = 0.0125, x.high.cutoff = 3, y.cutoff = 0.5, do.plot = FALSE)\n\n#scale data and regress out nUMI and percent.mito\nseurat_inDrops3.intact <- ScaleData(seurat_inDrops3.intact, vars.to.regress = c('nUMI', 'percent.mito'))\n\n\n#Next, we perform linear dimensional reduction and visualize the results in a few different ways. \n\nseurat_inDrops3.intact <- RunPCA(object = seurat_inDrops3.intact, pc.genes = [email protected], do.print = TRUE, pcs.print = 1:5, genes.print = 5)\n\n#visualize results\nPrintPCA(object = seurat_inDrops3.intact, pcs.print = 1:5, genes.print = 5, use.full = FALSE)\nVizPCA(object = seurat_inDrops3.intact, pcs.use = 1:2)\nPCAPlot(object = seurat_inDrops3.intact, dim.1 = 1, dim.2 = 2)\nseurat_inDrops3.intact <- ProjectPCA(object = seurat_inDrops3.intact, do.print = FALSE)\nPCHeatmap(object = seurat_inDrops3.intact, pc.use = 1, cells.use = 500, do.balanced = TRUE, label.columns = FALSE)\nPCHeatmap(object = seurat_inDrops3.intact, pc.use = 1:12, cells.use = 500, do.balanced = TRUE, label.columns = FALSE, use.full = FALSE)\nPCHeatmap(object = seurat_inDrops3.intact, pc.use = 13:20, cells.use = 500, do.balanced = TRUE, label.columns = FALSE, use.full = FALSE)\n\n#plot standard deviations to chose PCs to use in downstream analysis, here we chose 18\nPCElbowPlot(object = seurat_inDrops3.intact)\n\n\n#Now we can identify cell populations within the homeostatic limb, vizualize the resulting populations using tSNE, and subsequently find markers that define these different populations \n\n#find clusters using first 18 PCs\nseurat_inDrops3.intact <- FindClusters(object = seurat_inDrops3.intact, reduction.type = \"pca\", dims.use = 1:18, resolution = 1.5, print.output = 0, save.SNN = TRUE)\n\n#run non-linear dimensional reduction\nseurat_inDrops3.intact <- RunTSNE(object = seurat_inDrops3.intact, dims.use = 1:18, do.fast = TRUE)\n\n# Build a phylogenetic tree to see how cells are related while simultaneously renaming and reordering cluster names according to their #position on the tree. This will be important to determine when deciding whether similar populations should be merged. \nseurat_inDrops3.intact <- BuildClusterTree(seurat_inDrops3.intact, do.reorder=TRUE, reorder.numeric=TRUE)\n\n#visualize tSNE \nset.seed(5)\nTSNEPlot(object = seurat_inDrops3.intact, do.label = T)\n\n#visulize tSNE based on sample to determine how similar the two samples are to one another\nTSNEPlot(object = seurat_inDrops3.intact, group.by = 'orig.ident')\n\n#assess nodes\nnode.scores <- AssessNodes(seurat_inDrops3.intact)\nnode.scores[order(node.scores$oobe, decreasing = TRUE), ] -> node.scores\nnode.scores\n\n\n#merge first 7 nodes\n#select nodes to merge\nnodes.merge <- node.scores[1:7, ]\nnodes.to.merge <- sort(x = nodes.merge$node)\n\n#create a new Seurat object in which we will merge our selected nodes\nmerged <- seurat_inDrops3.intact\n#merge nodes\nfor (n in nodes.to.merge) {merged <- MergeNode(object = merged, node.use = n)}\n\n\n#re-visualize the tSNE after we have merged the non-distinct nodes\nset.seed(5)\nTSNEPlot(merged, do.label = TRUE)\n\n\n#determine differentially expressed genes for each population\n\n#find markers for each population\nall.markers <- FindAllMarkers(merged, only.pos = TRUE, min.pct = 0.25, thresh.use = 0.25)\n\n#write DE results to table for inspection\nwrite.table(all.markers, 'intact.only.markers.txt', sep = '\\t')\n\n#save Rdata\nsave.image('intact.Rdata')\n\n#end session\nq()\n" }, { "alpha_fraction": 0.6964285969734192, "alphanum_fraction": 0.7551947832107544, "avg_line_length": 53.522125244140625, "blob_id": "4fad5601c06dca9d0cae21cd18e35e2b78ba8d01", "content_id": "e326655001ca1a1682414306ff409c78c2f453ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 6160, "license_type": "no_license", "max_line_length": 589, "num_lines": 113, "path": "/Leigh_et_al_2018_Supplementary_R_code/Early_bud_blastema.R", "repo_name": "brianjohnhaas/indrops", "src_encoding": "UTF-8", "text": "#Early-bud stage blastema samples\n#This section analyses two samples collected during early bud stage (14 days post amputation) \n\n#First, load the required packages. \nlibrary(Seurat)\nlibrary(dplyr)\n\n#load in data\ninDrops4.d1 = read.table('early_and_medium_bud.repGene', header = T, row.names = 1, sep = '\\t')\n#pull out samples S3 and S5 which are the early-bud blastema samples\nS3_S5 = inDrops4.d1[,grep('^S[35]_', colnames(inDrops4.d1))]\n\n#Remove data matrix with extra samples\nrm(inDrops4.d1)\n#Create Seurat object and make sparse\nseurat_14dpa = CreateSeuratObject(S3_S5, project = '14dpa', min.cells = 8, min.genes = 200)\nseurat_14dpa = MakeSparse(seurat_14dpa)\n\n#While we did some filtering above, we need to perform further quality control to ensure that the cells we are working with aren't apoptotic #or have a dearth of genes. First, we need to identify the mitochondrial genes present in this matrix. The axolotl mitochondrial genome can #be found here: https://www.ncbi.nlm.nih.gov/nuccore/AJ584639. Remember that the genes are written as protein names when greping for #mitochondrial genes. \n\n#list of all mitochondrial genes in this early-bud blastema matrix\nmito.genes.14dpa <- c(\"c220469_g1_i1^sp|P00397|COX1_MOUSE\", \"c1451851_g1_i1^sp|Q9ZXY2|COX1_PAPHA\", \"c1088733_g1_i1^sp|Q9ZZM6|COX1_SALSA\", \"c934922_g1_i1^sp|Q9ZXX8|COX3_PAPHA\", \"c1049442_g1_i1^sp|Q96133|COX3_CARAU\", \"c1083417_g1_i2^sp|P00419|COX3_XENLA\", \"c1084180_g1_i1^sp|Q8LWP6|CYB_RANSI\", \"c1043008_g2_i1^sp|Q9B205|CYB_CAICR\", \"c1084180_g3_i1^sp|Q8LWP6|CYB_RANSI\", \"c786641_g1_i1^sp|Q9B205|CYB_CAICR\", \"c1060846_g1_i1^sp|Q8WA47|CYB_MUSMA\", \"c1083535_g6_i1^sp|Q4JQI7|NU1M_TETNG\", \"c1068681_g4_i1^sp|Q9ZZM3|NU5M_SALSA^sp|P82013|VDAC2_MELGA^Porin_3\", \"c1027109_g1_i1^sp|Q35920|ATP6_SALSA\")\n\n#calculate the percentage mitochondrial RNA for each cell\npercent.mito.14dpa <- Matrix::colSums([email protected][mito.genes.14dpa, ])/Matrix::colSums([email protected])\n#add the percent mitochondrial content of each cell to the Seurat object\nseurat_14dpa <- AddMetaData(object = seurat_14dpa, metadata = percent.mito.14dpa, col.name = \"percent.mito\")\n\n#Now perform quality control on matrix by filtering out cells with high percent mitochondrial RNA and low and high number of genes. We filter #out cells that by visualize inspection appear to have relatively high mitochondrial RNA content or high or low number of genes. These #numbers can be modified to be more or less inclusive. \n\n#visualize number of genes, unique molecular identifiers (UMI), and percent mitochondrial RNA\nVlnPlot(object = seurat_14dpa, features.plot = c(\"nGene\", \"nUMI\", \"percent.mito\"), nCol = 3)\n\n#filter out cells\nseurat_14dpa <- FilterCells(object = seurat_14dpa, subset.names = c(\"nGene\", \"percent.mito\"), low.thresholds = c(850, -Inf), high.thresholds = c(5000, 0.10))\n\n#normalize data\nseurat_14dpa <- NormalizeData(object = seurat_14dpa, normalization.method = \"LogNormalize\", scale.factor = 10000)\n\n#find variable genes\nseurat_14dpa <- FindVariableGenes(object = seurat_14dpa, mean.function = ExpMean, dispersion.function = LogVMR, x.low.cutoff = 0.0125, x.high.cutoff = 3, y.cutoff = 0.5)\n\n#scale data and regress out nUMI and percent.mito\nseurat_14dpa <- ScaleData(object = seurat_14dpa, vars.to.regress = c(\"nUMI\", \"percent.mito\"))\n\n\n#Next, we perform linear dimensional reduction and visualize the results in a few different ways. \nseurat_14dpa <- RunPCA(object = seurat_14dpa, pc.genes = [email protected], do.print = TRUE, pcs.print = 1:5, genes.print = 5)\n\n#visualize results\nVizPCA(object = seurat_14dpa, pcs.use = 1:2)\nPCAPlot(object = seurat_14dpa, dim.1 = 1, dim.2 = 2)\nseurat_14dpa <- ProjectPCA(object = seurat_14dpa, do.print = FALSE)\nPCHeatmap(object = seurat_14dpa, pc.use = 1, cells.use = 500, do.balanced = TRUE, label.columns = FALSE)\nPCHeatmap(object = seurat_14dpa, pc.use = 1:12, cells.use = 500, do.balanced = TRUE, label.columns = FALSE, use.full = FALSE)\nPCElbowPlot(object = seurat_14dpa)\n\n#plot standard deviations to chose PCs to use in downstream analysis, here we chose 19\nseurat_14dpa <- FindClusters(object = seurat_14dpa, reduction.type = \"pca\", dims.use = 1:19, resolution = 1, print.output = 0, save.SNN = TRUE)\n\n\n#Now we can identify cell populations within the homeostatic limb, vizualize the resulting populations using tSNE, and subsequently find markers that define these different populations \n\n#find clusters using first 18 PCs\nseurat_inDrops3.intact <- FindClusters(object = seurat_inDrops3.intact, reduction.type = \"pca\", dims.use = 1:18, resolution = 1.5, print.output = 0, save.SNN = TRUE)\n\n#run non-linear dimensional reduction\nseurat_14dpa <- RunTSNE(object = seurat_14dpa, dims.use = 1:19, do.fast = TRUE)\n\n# Build a phylogenetic tree to see how cells are related while simultaneously renaming and reordering cluster names according to their #position on the tree. This will be important to determine when deciding whether similar populations should be merged. \nseurat_14dpa <- BuildClusterTree(seurat_14dpa, do.reorder=TRUE, reorder.numeric=TRUE)\n\n#visualize tSNE \nset.seed(5)\nTSNEPlot(object = seurat_14dpa)\n#visulize tSNE based on sample to determine how similar the two samples are to one another\nTSNEPlot(object = seurat_14dpa, group.by = \"orig.ident\")\n\n#assess nodes\nnode.scores <- AssessNodes(seurat_14dpa)\nnode.scores[order(node.scores$oobe, decreasing = TRUE), ] -> node.scores\nnode.scores\n\n\n#merge first nodes\n#select nodes to merge\nnodes.merge=node.scores[1:1,]\nnodes.to.merge <- sort(x = nodes.merge$node)\n\n#create a new Seurat object in which we will merge our selected nodes\nmerged <- seurat_14dpa\n#merge nodes\nfor (n in nodes.to.merge) {merged <- MergeNode(object = merged, node.use = n)}\n\n\n#re-visualize the tSNE after we have merged the non-distinct nodes\nset.seed(5)\nTSNEPlot(merged, do.label = TRUE)\n\n\n#determine differentially expressed genes for each population\n\n#find markers for each population\nall.markers <- FindAllMarkers(merged, only.pos = TRUE, min.pct = 0.25, thresh.use = 0.25)\n\n#write DE results to table for inspection\nwrite.table(all.markers, 'early_bud_blastema.markers.txt', sep = '\\t')\n\n#save Rdata\nsave.image('early_bud_blastema.Rdata')\n\n#end session\nq()" }, { "alpha_fraction": 0.5858738422393799, "alphanum_fraction": 0.5921662449836731, "avg_line_length": 44.1541862487793, "blob_id": "95fe227958e1fa0c624506ad1fc8b4a776d44a5d", "content_id": "4c0114dda19770b874adbbd9102c9a6944675aaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20501, "license_type": "no_license", "max_line_length": 401, "num_lines": 454, "path": "/quantify_umifm_from_alignments.py", "repo_name": "brianjohnhaas/indrops", "src_encoding": "UTF-8", "text": "import pysam\nfrom collections import defaultdict\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\nfrom copy import copy\nfrom itertools import combinations\nfrom numpy import memmap\n# from indrops import load_indexed_memmapped_array\n\ndef print_to_log(msg):\n \"\"\"\n Wrapper to eventually log in smart way, instead of using 'print()'\n \"\"\"\n sys.stderr.write(str(msg)+'\\n')\n\ndef quant(args):\n #Convert arg to more explicit names\n multiple_alignment_threshold = args.m\n distance_from_tx_end = args.d\n split_ambiguities = args.split_ambi\n ambig_count_threshold = args.u\n using_mixed_ref = args.mixed_ref\n\n #Assume that references are named 'transcript_name|gene_name'\n tx_to_gid = lambda tx: tx.split('|')[1] \n\n umis_for_geneset = defaultdict(set)\n sam_input = pysam.AlignmentFile(\"-\", \"r\" )\n\n # Tuple containing lengths of reference sequences\n ref_lengths = copy(sam_input.lengths)\n\n # Bam file to be generated\n if args.bam:\n sam_output = pysam.AlignmentFile(args.bam, \"wb\", template=sam_input)\n\n\n # Load cache of low complexity regions\n soft_masked_regions = None\n if args.soft_masked_regions:\n low_complexity_regions = pickle.load(args.soft_masked_regions)\n soft_masked_regions = defaultdict(set)\n for tx, regions in low_complexity_regions.items():\n if regions:\n soft_masked_regions[tx] = set.union(*[set(range(a,b)) for a,b in regions])\n soft_masked_fraction_threshold = 0.5\n\n def process_read_alignments(alignments):\n \"\"\"input: one-element list of a single alignment from a bam file \n corresponding to a given barcode\"\"\"\n\n # Remove any alignments that aren't supported by a certain number of non-poly A bases.\n dependent_on_polyA_tail = False\n if args.min_non_polyA > 0:\n polyA_independent_alignments = []\n for a in alignments:\n start_of_polyA = ref_lengths[a.reference_id] - args.polyA\n if a.reference_end < start_of_polyA:\n # The alignment doesn't overlap the polyA tail. \n polyA_independent_alignments.append(a)\n else:\n non_polyA_part = start_of_polyA - a.reference_start\n if non_polyA_part > args.min_non_polyA:\n polyA_independent_alignments.append(a)\n\n dependent_on_polyA_tail = len(polyA_independent_alignments) == 0\n alignments = polyA_independent_alignments\n\n # Remove any alignments that are mostly to low complexity regions\n if soft_masked_regions:\n for a in alignments:\n tx_id = sam_input.getrname(a.reference_id)\n soft_masked_bases = soft_masked_regions[tx_id].intersection(set(range(a.reference_start, a.reference_end)))\n soft_masked_fraction = float(len(soft_masked_bases))/(a.reference_end - a.reference_start)\n a.setTag('XC', '%.2f' % soft_masked_fraction)\n\n alignments = [a for a in alignments if float(a.opt('XC')) < soft_masked_fraction_threshold]\n\n # We need to obtain Transcript IDs in terms of reference names (Transcrupt_ID|Gene_ID)\n # as opposed to the arbitrary 'a.reference_id' number\n tx_ids = [sam_input.getrname(a.reference_id) for a in alignments]\n\n #Map to Gene IDs\n g_ids = [tx_to_gid(tx_id) for tx_id in tx_ids]\n # finally remove all copies to get a comprehensive unique list of genes\n # found for this barcode\n genes = set(g_ids)\n\n # Does the alignment map to multiple genes or just one?\n unique = True\n # Was the alignment non-unique, but then rescued to being unique?\n rescued_non_unique = False\n # Even after rescue, was the alignment mapping to more than M genes?\n failed_m_threshold = False\n\n # The same read could align to transcripts from different genes. \n if 1 < len(genes):\n unique = False\n\n close_alignments = [a for a in alignments if (ref_lengths[a.reference_id] - a.reference_end)<distance_from_tx_end]\n close_tx_ids = [sam_input.getrname(a.reference_id) for a in close_alignments]\n close_g_ids = [tx_to_gid(tx_id) for tx_id in close_tx_ids]\n close_genes = set(close_g_ids)\n\n if 0 < len(close_genes) < len(genes):\n alignments = close_alignments\n genes = close_genes\n if len(close_genes) == 1:\n rescued_non_unique = True\n\n #Choose 1 alignment per gene, that we will write to the output BAM.\n chosen_alignments = {}\n keep_read = 0 < len(genes) <= multiple_alignment_threshold\n\n # We need different logic if we are using a mixed organism reference\n if using_mixed_ref:\n refs = set(g.split(':')[1] for g in genes)\n keep_read = (len(refs) == 1) and (0 < len(genes) <= multiple_alignment_threshold)\n \n\n if keep_read:\n for gene in genes:\n gene_alignments = [a for a in alignments if tx_to_gid(sam_input.getrname(a.reference_id)) == gene]\n chosen_alignment = sorted(gene_alignments, key=lambda a: ref_lengths[a.reference_id], reverse=True)[0]\n chosen_alignments[gene] = chosen_alignment\n \n else:\n failed_m_threshold = True\n\n read_filter_status = (unique, rescued_non_unique, failed_m_threshold, dependent_on_polyA_tail)\n return chosen_alignments, read_filter_status\n\n # --------------------------\n # Process SAM input\n # (we load everything into memory, so if a single barcode has truly very deep sequencing, we could get into trouble\n # --------------------------\n\n uniq_count = 0\n rescued_count = 0\n non_uniq_count = 0\n failed_m_count = 0\n not_aligned_count = 0\n\n current_read = None\n read_alignments = []\n\n reads_by_umi = defaultdict(dict)\n\n rev = 0\n non_rev = 0\n for alignment in sam_input:\n #Skip alignments that failed to align...\n if alignment.reference_id == -1:\n not_aligned_count += 1\n # if args.bam:\n # sam_output.write(alignment)\n continue\n\n # The If statements detects that Bowtie is giving info about a different read,\n # so let's process the last one before proceeding\n if not current_read == alignment.query_name: \n #Check that our read has any alignments\n if read_alignments: \n chosen_alignments, processing_stats = process_read_alignments(read_alignments)\n if chosen_alignments:\n split_name = current_read.split(':')\n if len(split_name) == 2:\n umi = split_name[1] #Old Adrian Format\n elif len(split_name) == 3:\n umi = split_name[1] #Adrian format\n else:\n umi = split_name[4] #Old Allon format\n seq = read_alignments[0].seq\n reads_by_umi[umi][alignment.query_name] = chosen_alignments\n\n uniq_count += processing_stats[0]\n non_uniq_count += not(processing_stats[0] or processing_stats[1] or processing_stats[2])\n rescued_count += processing_stats[1]\n failed_m_count += processing_stats[2]\n\n # We reset the current read info\n current_read = alignment.query_name\n read_alignments = []\n\n read_alignments.append(alignment)\n\n # Only runs if preceding for loop terminated without break\n # This is not very DRY...\n else:\n if read_alignments:\n chosen_alignments, processing_stats = process_read_alignments(read_alignments)\n if chosen_alignments:\n split_name = current_read.split(':')\n if len(split_name) == 2:\n umi = split_name[1] #Old Adrian Format\n elif len(split_name) == 3:\n umi = split_name[1] #Adrian format\n else:\n umi = split_name[4] #Allon format\n seq = read_alignments[0].seq\n reads_by_umi[umi][alignment.query_name] = chosen_alignments\n\n uniq_count += processing_stats[0]\n non_uniq_count += not(processing_stats[0] or processing_stats[1] or processing_stats[2])\n rescued_count += processing_stats[1]\n failed_m_count += processing_stats[2]\n\n # -----------------------------\n # Time to filter based on UMIs\n # (and output)\n # --------------------------\n \n umi_counts = defaultdict(float)\n ambig_umi_counts = defaultdict(float)\n ambig_gene_partners = defaultdict(set)\n ambig_clique_count = defaultdict(list)\n\n oversequencing = []\n distance_from_transcript_end = []\n\n temp_sam_output = []\n\n for umi, umi_reads in reads_by_umi.items():\n \n #Invert the (read, gene) mapping\n aligns_by_gene = defaultdict(lambda: defaultdict(set))\n for read, read_genes in umi_reads.items():\n for gene, alignment in read_genes.items():\n aligns_by_gene[gene][len(read_genes)].add(alignment)\n\n #Pick the best alignment for each gene:\n # - least other alignments\n # - highest alignment quality \n # - longest read\n best_alignment_for_gene = {}\n\n for gene, alignments in aligns_by_gene.items():\n # min_ambiguity_alignments = alignments[min(alignments.keys())]\n # max_qual = max(a.mapq for a in min_ambiguity_alignments)\n # max_qual_alignments = filter(lambda a: a.mapq==max_qual, min_ambiguity_alignments)\n # best_alignment_for_gene[gene] = max(max_qual_alignments, key=lambda a: a.qlen)\n best_alignment_for_gene[gene] = alignments[min(alignments.keys())]\n\n # Compute hitting set\n g0 = set.union(*(set(gs) for gs in umi_reads.values())) #Union of the gene sets of all reads from that UMI\n r0 = set(umi_reads.keys())\n gene_read_mapping = dict()\n for g in g0:\n for r in r0:\n gene_read_mapping[(g, r)] = float(g in umi_reads[r])/(len(umi_reads[r])**2)\n\n target_genes = dict()\n #Keys are genes, values are the number of ambiguous partner of each gene\n while len(r0) > 0:\n #For each gene in g0, compute how many reads point ot it\n gene_contrib = dict((gi, sum(gene_read_mapping[(gi, r)] for r in r0)) for gi in g0)\n\n #Maximum value of how many reads poitn to any gene\n max_contrib = max(gene_contrib.values())\n\n #Gene with max contrib\n max_contrib_genes = filter(lambda g: gene_contrib[g]==max_contrib, gene_contrib.keys())\n\n #Pick a gene among those with the highest value. Which doesn't matter until the last step\n g = max_contrib_genes[0]\n \n read_count_for_umifm = 0\n umifm_assigned_unambiguously = False\n\n\n for r in copy(r0): #Take a copy of r0 doesn't change as we iterate through it\n if gene_read_mapping[(g, r)]: #Remove any reads from r0 that contributed to the picked gene.\n r0.remove(r)\n\n #Count how many reads we are removing (this is the degree of over-sequencing)\n read_count_for_umifm += 1\n # umifm_reads.append(r)\n\n # If we had equivalent picks, \n # and their gene contrib value is now 0\n # they were ambiguity partners\n if len(max_contrib_genes) > 1:\n\n # Update the gene contribs based on the new r0, but on the 'old' g0.\n # That is why we remove g from g0 after this step only\n gene_contrib = dict((gi, sum(gene_read_mapping[(gi, r)] for r in r0)) for gi in g0)\n ambig_partners = filter(lambda g: gene_contrib[g]==0, max_contrib_genes)\n \n \n #Ambig partners will often be a 1-element set. That's ok.\n #Then it will be equivalent to \"target_genes[g] = 1.\"\n if len(ambig_partners) <= ambig_count_threshold:\n if len(ambig_partners) == 1:\n umifm_assigned_unambiguously = True\n ambig_clique_count[0].append(umi)\n \n for g_alt in ambig_partners:\n ambig_gene_partners[g_alt].add(frozenset(ambig_partners))\n target_genes[g_alt] = float(len(ambig_partners))\n ambig_clique_count[len(ambig_partners)].append(umi)\n\n else:\n umifm_assigned_unambiguously = True\n target_genes[g] = 1.\n ambig_clique_count[1].append(umi)\n\n #Remove g here, so that g is part of the updated gene_contrib, when necessary\n g0.remove(g)\n\n #For each target gene, output the best alignment\n #and record umi count\n for gene, ambigs in target_genes.items():\n supporting_alignments = best_alignment_for_gene[gene]\n if args.bam:\n for alignment_for_output in best_alignment_for_gene[gene]:\n # Add the following tags to aligned reads:\n # XB - Library Name\n # XB - Barcode Name\n # XU - UMI sequence\n # XO - Oversequencing number (how many reads with the same UMI are assigned to this gene)\n # YG - Gene identity\n # YK - Start of the alignment, relative to the transcriptome\n # YL - End of the alignment, relative to the transcriptome\n # YT - Length of alignment transcript\n alignment_for_output.setTag('XL', args.library)\n alignment_for_output.setTag('XB', args.barcode)\n alignment_for_output.setTag('XU', umi)\n alignment_for_output.setTag('XO', len(supporting_alignments))\n alignment_for_output.setTag('YG', gene)\n alignment_for_output.setTag('YK', int(alignment_for_output.pos))\n alignment_for_output.setTag('YL', int(alignment_for_output.reference_end))\n alignment_for_output.setTag('YT', int(ref_lengths[alignment.reference_id]))\n temp_sam_output.append(alignment_for_output)\n \n split_between = ambigs if split_ambiguities else 1.\n umi_counts[gene] += 1./split_between\n ambig_umi_counts[gene] += (1./split_between if ambigs>1 else 0)\n\n #Output the counts per gene\n all_genes = set()\n for ref in sam_input.references:\n gene = ref.split('|')[1]\n all_genes.add(gene)\n\n\n sorted_all_genes = sorted(all_genes)\n sorted_metric_columns = ['total_input_reads','single_alignment','rescued_single_alignment','non_unique_less_than_m','non_unique_more_than_m','not_aligned','unambiguous_umifm','umifm_degrees_of_ambiguity_2','umifm_degrees_of_ambiguity_3','umifm_degrees_of_ambiguity_>3']\n output_umi_counts = [umi_counts[gene] for gene in sorted_all_genes]\n\n if args.write_header:\n args.counts.write('\\t'.join(['barcode'] + sorted_all_genes) + '\\n')\n args.ambigs.write('\\t'.join(['barcode'] + sorted_all_genes) + '\\n')\n args.metrics.write('\\t'.join([\"Barcode\",\"Reads\",\"Reads with unique alignment\",\"Reads with unique alignment within shorter distance of 3'-end\",\"Reads with less than `m` multiple alignments\",\"Reads with more than than `m` multiple alignments\",\"Reads with no alignments\", \"UMIFM\",\"Ambig UMIFM (between 2 genes)\",\"Ambig UMIFM (between 3 genes)\",\"Ambig UMIFM (between more than 3 genes)\",]) + '\\n')\n\n\n if sum(output_umi_counts) >= args.min_counts:\n ignored = False\n args.counts.write('\\t'.join([args.barcode] + [str(int(u)) for u in output_umi_counts]) + '\\n')\n\n # Output sam data\n if args.bam:\n for alignment in temp_sam_output:\n sam_output.write(alignment)\n sam_output.close()\n\n # Output ambig data\n output_ambig_counts = [ambig_umi_counts[gene] for gene in sorted_all_genes]\n if sum(output_ambig_counts) > 0:\n\n args.ambigs.write('\\t'.join([args.barcode] + [str(int(u)) for u in output_ambig_counts]) + '\\n') \n output_ambig_partners = {}\n for gene in sorted_all_genes:\n if ambig_gene_partners[gene]:\n gene_partners = frozenset.union(*ambig_gene_partners[gene])-frozenset((gene,))\n if gene_partners:\n output_ambig_partners[gene] = gene_partners\n args.ambig_partners.write(args.barcode + '\\t'+ str(output_ambig_partners) + '\\n')\n else:\n ignored = True\n with open(args.counts.name + '.ignored', 'a') as f:\n f.write(args.barcode + '\\n')\n\n args.counts.close()\n args.ambigs.close()\n args.ambig_partners.close()\n \n\n #Output the fixing metrics\n total_input_reads = uniq_count + rescued_count + non_uniq_count + failed_m_count + not_aligned_count\n metrics_data = {\n 'total_input_reads': total_input_reads,\n 'single_alignment': uniq_count,\n 'rescued_single_alignment': rescued_count,\n 'non_unique_less_than_m': non_uniq_count,\n 'non_unique_more_than_m': failed_m_count,\n 'not_aligned': not_aligned_count,\n 'unambiguous_umifm' : 0,\n 'umifm_degrees_of_ambiguity_2' : 0,\n 'umifm_degrees_of_ambiguity_3' : 0,\n 'umifm_degrees_of_ambiguity_>3' : 0,\n }\n\n for k, v in ambig_clique_count.items():\n if k == 0:\n metrics_data['unambiguous_umifm'] += len(v)\n elif k == 1:\n metrics_data['unambiguous_umifm'] += len(v)\n elif k == 2:\n metrics_data['umifm_degrees_of_ambiguity_2'] += len(v)\n elif k == 3:\n metrics_data['umifm_degrees_of_ambiguity_3'] += len(v)\n elif k > 3:\n metrics_data['umifm_degrees_of_ambiguity_>3'] += len(v)\n\n\n args.metrics.write('\\t'.join([args.barcode] + [str(metrics_data[c]) for c in sorted_metric_columns]) + '\\n')\n log_output_line = \"{0:<8d}{1:<8d}{2:<10d}\".format(total_input_reads, metrics_data['unambiguous_umifm'],\n metrics_data['umifm_degrees_of_ambiguity_2']+metrics_data['umifm_degrees_of_ambiguity_3']+metrics_data['umifm_degrees_of_ambiguity_>3'])\n if ignored:\n log_output_line += ' [Ignored from output]'\n print_to_log(log_output_line)\n\nif __name__==\"__main__\":\n import sys, argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-m', help='Ignore reads with more than M alignments, after filtering on distance from transcript end.', type=int, default=4)\n parser.add_argument('-u', help='Ignore counts from UMI that should be split among more than U genes.', type=int, default=4)\n parser.add_argument('-d', help='Maximal distance from transcript end.', type=int, default=525)\n parser.add_argument('--polyA', help='Length of polyA tail in reference transcriptome.', type=int, default=5)\n parser.add_argument('--split_ambi', help=\"If umi is assigned to m genes, add 1/m to each gene's count (instead of 1)\", action='store_true', default=False)\n parser.add_argument('--mixed_ref', help=\"Reference is mixed, with records named 'gene:ref', should only keep reads that align to one ref.\", action='store_true', default=False)\n parser.add_argument('--min_non_polyA', type=int, default=0)\n\n # parser.add_argument('--counts', type=argparse.FileType('w'))\n # parser.add_argument('--metrics', type=argparse.FileType('w'))\n\n parser.add_argument('--counts', type=argparse.FileType('a'))\n parser.add_argument('--metrics', type=argparse.FileType('a'))\n parser.add_argument('--ambigs', type=argparse.FileType('a'))\n parser.add_argument('--ambig-partners', type=argparse.FileType('a'))\n\n parser.add_argument('--barcode', type=str)\n parser.add_argument('--library', type=str, default='')\n parser.add_argument('--min-counts', type=int, default=0)\n parser.add_argument('--write-header', action='store_true')\n \n \n parser.add_argument('--bam', type=str, nargs='?', default='')\n parser.add_argument('--soft-masked-regions', type=argparse.FileType('r'), nargs='?')\n args = parser.parse_args()\n quant(args)\n\n" }, { "alpha_fraction": 0.6740707159042358, "alphanum_fraction": 0.7007961869239807, "avg_line_length": 58.57401657104492, "blob_id": "332637d753618da421de012239d942112db80726", "content_id": "f92065a14487b2c0b14ebad94a475724e569cdc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19723, "license_type": "no_license", "max_line_length": 671, "num_lines": 331, "path": "/README.md", "repo_name": "brianjohnhaas/indrops", "src_encoding": "UTF-8", "text": "** Note, this is forked from [https://github.com/indrops/indrops](https://github.com/indrops/indrops) ** and was modified for our use.\n\n## Project YAML file.\n\nAn inDrops project is composed of a series of sequencing runs, each including one (or several) indrops libraries within it. A sequencing run can further be split into several parts (effectively arbitrary chunks) to parallelize the analysis. Give example of a chunk\n\nThe project yaml file contains the details of all sequencing runs and libraries within a project. \n\nThe same project can contain runs from different versions of the inDrops platform. \n\nA project will be aligned against the same reference genome with the same alignment parameters. \n\nWe have provided the yaml files used for this paper above, though you will need to modify them for your own use (i.e. change paths). They are named \"Early_med_bud.yaml\" and this yaml file should be used for fastq #'s SRR8147022-SRR8147025 (containing biological replicates 1-3 of medium-bud and 1-2 of early-bud blastemas). yaml file wound_healing_med_bud.yaml should be used with SRR8147026-SRR8147029 (containing biological replicates 4-6 of medium-bud blastemas and 1-3 of wound healing stage). Finally, intact_and_contralateral.yaml should be used for SRR8147030-SRR8147033 (containing biological replicates 1-2 of intact and 1-3 of contralateral). \n\n## Important information for downloading and using SRA data\n\nAll fastq files associated with Leigh et al. PMID 30514844 can all be found here: https://www.ncbi.nlm.nih.gov/sra?term=SRP167700. To download the files, it's essential that you use the fastq-dump --split-files command so that all 4 Reads are separated. When you download, for example, SRR8147022 (which contains early- and medium-bud samples) this is going to give you 4 Reads. You will also need to download SRR8147023, SRR8147024, and SRR8147025 (again all using --split-files) to yield 16 total fastq files associated with the the early- and medium-bud samples. These 16 fastq files will be four from Read 1, four from Read 2, four from Read 3, and four from Read 4. \n\nIt should be that SRR8147022_1.fastq = Read 1, SRR8147022_2.fastq = Read 2, SRR8147022_3.fastq = Read 3, and SRR8147022_4.fastq = Read 4. However, manual inspection of the SRR downloads should allow for confirmation of appropriate read number. Read structure is as follows:\n\n1. 61bp Read 1 transcript\n2. 8bp Index Read 1 (i7) 8bp part 1 single cell barcode\n3. 8bp Index Read 2 (i5) library index\n4. 14bp Read 2 part 2 cell barcode (8bp) / UMI (6bp) / Poly T\n\nYou can use the library index to determine the difference between i7 and i5 reads. The other two will be clear based on the length information in the fastq file. \n\nFinally, you will need to combine all Read 1 files to make one fastq file for Read 1. The same should be done for Read 2, Read 3, and Read 4. So you'll have gone from 16 fastq files (4 of each Read) to four files one FOR each read. At this point you'll want to rename the files so that they are compatible with your yaml. This should be something like:\n\nSRR8147022_{read}.fastq \n\nWhere {read} is either R1, R1, R3, or R4, as appropriate.\n\nA table has been provided above further describing where to find each of the biological replicates within the fastq's deposited on SRA. It is important to note that each SRR number does NOT correlate with one sample. For example, SRR8147022 has reads from early- and medium- bud blastemas (see sample.table.txt above). You will need all fastq's (SRR8147022-SRR8147025) to obatin all 4 Reads for running the pipeline (as described above). When running pipeline the yaml file will demultiplex these samples.\n\n## Supported library versions\n - v1 : original design where R2 is the biological read and R1 is the metadata read. \n - v2 : inversion of v1 where R1 is the biological read and R2 is the metadata read.\n - v3 : summer 2016 redesign requiring manual demultiplexing. R1 is the biological read.\n R2 carries the first half of the gel barcode, R3 carries the library index and R4\n the second half of the gel barcode, the UMI and a fraction of the polyA tail.\n\n## Installation\nThe package requires\n - Python 2.7 (with the packages numpy, scipy, matplotlib, pysam>0.9.0, pyyaml, pyfasta). [See Appendix 2]\n - RSEM (1.2.16+)\n - Bowtie (1.1.1+)\n - samtools (1.3.1+) [See Appendix 3] *This specific version is needed to account for a BAM-format oddity in RSEM output.\n - Java \nThe path to the directories containing these executables should be set in the project YAML.\nIf these executables can be found in the PATH variables, this project YAML paths can be left empty, or not specified.\n\n### March 7th Notice -- PySAM version.\n\nPrevious installation instructions install PySAM version 0.6.0. To install the correct PySAM version, use the following commands:\n\n conda remove pysam\n conda install pip\n pip install pysam==0.9.1\n\n\n## Project YAML file\n\nAn example YAML file is provided in `test/test_project.yaml`. It should contain the following information:\n\n project_name : \"project_name\"\n project_dir : \"/path/to/project/dir\" #This dir should be user-owned and writable, all output will go into this dir.\n paths : \n bowtie_index : \"/path/to/index\" #This index will be built automatically\n # The paths below can be omitted if the relevant directories are already on $PATH\n bowtie_dir : \"/path/to/bowtie/dir/\"\n python_dir : \"/path/to/env/bins/\"\n java_dir: \"/path/to/java/dir/\"\n rsem_dir: \"/path/to/rsem/dir/\"\n samtools_dir: \"/path/to/samtools-1.3.1/bin/\" #This needs to be version 1.3.1, 1.3 is not good enough!\n\n sequencing_runs : \n # A list of sequencing runs which form the project. \n # Each run should have:\n - name : \"MyRun\" # The name of the run will be used as a prefix in filenames, so keep it sane.\n version : \"vN\" # Can be 'v1', 'v2' or 'v3'\n\n # For a run with a single 'part', and a single library\n dir : \"/path/to/run_files/\"\n fastq_path : \"{read}.fastq.gz\" # Read with be replaced by R1, R2, R3, R4 as appropriate.\n library_name : \"my_library\"\n\n # This will expect to find the files:\n # /path/to/run_files/R1.fastq.gz (and R2...)\n\n # For a run with several parts, but a single library\n dir : \"/path/to/run_files/\"\n fastq_path : \"{split_affix}_{read}.fastq.gz\" # Read with be replaced by R1, R2, R3, R4 as appropriate.\n split_affixes : [\"L001\", \"L002\"]\n library_name : \"my_library\"\n\n # This will expect to find the files:\n # /path/to/run_files/L001_R1.fastq.gz (and R2...)\n # /path/to/run_files/L002_R1.fastq.gz (and R2...)\n\n # For a run with several parts, several libraries, that have already been demultiplexed\n dir : \"/path/to/run_files/\"\n fastq_path : \"{library_prefix}_{split_affix}_{read}.fastq.gz\" # Read with be replaced by R1, R2, R3, R4 as appropriate.\n split_affixes : [\"L001\", \"L002\"]\n libraries : \n - {library_name: \"test_lib1\", library_prefix: \"lib1\"}\n - {library_name: \"test_lib2\", library_prefix: \"lib2\"}\n\n # This will expect to find the files:\n # /path/to/run_files/lib1_L001_R1.fastq.gz (and R2...)\n # /path/to/run_files/lib1_L002_R1.fastq.gz (and R2...)\n # /path/to/run_files/lib2_L001_R1.fastq.gz (and R2...)\n # /path/to/run_files/lib2_L002_R1.fastq.gz (and R2...)\n\n # For a V3 run with several parts, with several libraries that are not already demultiplexed\n dir : \"/path/to/run_files/\"\n fastq_path : \"{library_prefix}_{split_affix}_{read}.fastq.gz\" # Read with be replaced by R1, R2, R3, R4 as appropriate.\n split_affixes : [\"L001\", \"L002\", \"L003\", \"L004\"]\n libraries : # The library index is what the expected index read sequence (on a NextSeq, this is the reverse complement of the index sequence)\n - {library_name: \"test_lib3\", library_index: \"ATAGAG\"}\n - {library_name: \"test_lib4\", library_index: \"AGAGGA\"}\n\n # This will expect to find the files:\n # /path/to/run_files/lib1_L001_R1.fastq.gz (and R2, R3, R4...)\n # /path/to/run_files/lib1_L002_R1.fastq.gz (and R2, R3, R4...)\n # /path/to/run_files/lib1_L003_R1.fastq.gz (and R2, R3, R4...)\n # /path/to/run_files/lib1_L004_R1.fastq.gz (and R2, R3, R4...)\n\n#### Note about v3 runs. \nThe raw BCL files are needed for manual demultiplexing. Move the raw BCL files to a run directory, then use the following command to extract the R1,R2,R3 and R4 files.\n\n cd /run/dir/\n bcl2fastq --use-bases-mask y*,y*,y*,y* --mask-short-adapter-reads 0 --minimum-trimmed-read-length 0\n # The 'dir' used in the project YAML file should then be:\n # /run/dir/Data/Intensities/BaseCalls/\n\n## Analysis steps\n\n### 0. Generate bowtie index from axolotl transcriptome\nThe version of the transcriptome we used can be downloaded here:https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE121737&format=file&file=GSE121737%5FAxolotl%2ETrinity%2ECellReports2017%2Efasta%2Egz\n\nTo generate a bowtie index with this transcriptome.\n ```\n bowtie-build Axolotl.Trinity.CellReports2017.fasta Axolotl.Trinity.CellReports2017.fasta\n \n ```\n \n\n### 1. Filter\nThis iterates over sequencing run parts, optionally filtered by a list of sequencing parts, and a list of libraries of interest.\n\n python indrops.py project.yaml filter [--total-workers 1] [--worker-index 0]\n [-r --runs RUNS ] [-l --libraries LIBRARIES ]\n\n # --runs comma-separated list of runs : If specified, step will be restricted to run parts coming # from runs in the list\n # --libraries comma-separated list of libraries : If specified, step will be restricted to run parts that \n # contain reads from a library in the list\n # \n # Resulting workload (a list of run parts), will be split among N --total-workers,\n # where worker with --worker-index i will do steps (i, N+i, 2N+i, ...)\n\nThis step reads the raw FastQ files as input and filters them:\n - For every raw read, it determines if the read has the expected structure (depending on library version). \n - For reads with correct structure, it runs Trimmomatic.\n - For reads surviving Trimmomatic, it finds and trims the polyA tail a maximum length of 4, and checks if the reads are still above MIN_LEN.\n - For surviving reads, it determines which fraction of the read is composed of runs of the same base (only considering runs of 5 or more). \n It rejects reads whose fraction is greater than `low_complexity_filter_arguments:max_low_complexity_fraction`.\n\nAs output, for every input run part, this produces a filtered FastQ file for every library contained in that run. These files are referred to as 'parts of libraries'.\n\nA log is created detailing what happened to every input read. An index is created that lists the number of reads found for every barcode. \n\n### 2. Identify abundant barcodes\nThis iterates over libraries, optionally filtered by a list. \n\n python indrops.py project.yaml identify_abundant_barcodes [--total-workers 1] [--worker-index 0]\n [-l --libraries LIBRARIES]\n \n # --libraries comma-separated list of librares : If specified, step will be restricted to libraries in this list.\n # \n # Resulting workload (a list of libraries), will be split among N --total-workers,\n # where worker with --worker-index i will do steps (i, N+i, 2N+i, ...)\n # \n # *Note* This step is fast, it does not need to be dispatched to several workers.\n\nFor each library, this collates the results of filtering all the sequencing run parts that have reads related to this library. It then outputs,\n - Histogram of the distribution barcode abundances\n - Summary table of filtering for that library\n - An index to be used by `sort`. \n\n### This is where we diverge from the original inDrops processing pipeline!\n\n### 3. Sort reads according to their barcode of origin.\nThis iterates over parts of libraries, optionally filtered by a list.\n\n python extract_barcoded_reads.py project.yaml sort [--total-workers 1] [--worker-index 0]\n [-l --libraries LIBRARIES]\n\n # --libraries comma-separated list of libraries : If specified, step will be restricted to library-run-parts\n # that contain reads from a library in the list\n # \n # Resulting workload (a list of library-run-parts), will be split among N --total-workers,\n # where worker with --worker-index i will do steps (i, N+i, 2N+i, ...)\n #\n # *Note* this step is currently memory intensive, as it loads the entire 'library-run-part' in memory. \n\nThis sorts the reads according to the name of their barcode of origin. Barcodes with less than 250 total reads (across all library-run-parts) are ignored, and placed at the end of the file.\n\nAs output, this creates a gzipped FastQ file and an index of the byte offsets for every barcode with more than 250 reads.\n\n### 4. Quantify expression\nThis iterates over a list of barcodes, from a list of optionally filtered libraries. \n\n python extract_barcoded_reads.py project.yaml quantify --no-bam > barcoded_reads.fastq \n [--total-workers 1] [--worker-index 0]\n [-l --libraries LIBRARIES] [-r --runs RUNS ]\n [--min-reads 750] [--min-counts 0]\n [--analysis prefix '']\n [--no-bam]\n\n # --min-reads INT : Ignore barcodes with less than specified number of reads.\n # --min-counts INT : Ignore output for barcodes with less than the specified number\n # of UMIFM counts. This significantly speeds up\n # downstream processing.\n # --analysis-prefix STR : Prefix output data files with the specified prefix.\n # (filename --> prefix.filename)\n # --no-bam : If specified, do not output and process BAM files. \n # \n # --libraries comma-separated list of libraries If specified, step will be restricted to libraries\n # in this list.\n # --runs comma-separated list of runs If specified, only align reads coming from these runs\n # [This is an uncommon use case.]\n # \n # \n # The resulting list of barcodes will be split among --total-workers, with worker identified by --worker-index.\n # *Note* This step requires ~2Gb of memory. \n\nThis step is resumable. If the same --analysis-prefix/--total-workers/--worker-index was previously running, another run will only quantify barcodes that were not previously quantified (or whose data was lost). To force requantification, delete files in /project_dir/library_dir/quant_dir/[prefix.]worker\\*_[total_workers]\\*\n\n## note, must make the read name unique:\n```\nencode_read_number_in_fastq.pl barcoded_reads.fastq > barcoded_reads.adj.fq\n```\n\n# align reads to target transcripts using bowtie:\n```\nbowtie target.fasta.bowtie -q -p 20 -a --best --strata --chunkmbs 1000 --sam -m 200 -n 1 -l 15 -e 100 barcoded_reads.adj.fq > target.bowtie.adj.sam\n```\n# generate count matrix:\nnote: bam_to_count_matrix.pl can be modified to output more or less cells to the sc.counts.matrix file. This can be done by changing the value for max_top_cells. We expect about 3000 cells per library. \n\n```\nbam_to_count_matrix.pl --bam target.bowtie.adj.sam > sc.counts.matrix\n```\n# annotate count matrix\nTrinity (https://github.com/trinityrnaseq/trinityrnaseq/wiki) has a nice perl script to take care of this for us, so download and install Trinity for these next few steps (we used Trinity v2.5.1). You should run these next two commands from within the Trinity codebase. The script we will use for annotation can be found here: https://github.com/trinityrnaseq/trinityrnaseq/blob/master/Analysis/DifferentialExpression/rename_matrix_feature_identifiers.pl \n\nTo perform the annotation, we need two things 1) the sc.counts.matrix and 2) an annotation mapping file for the transcriptome of interest. The annotation mapping file for the the Axolotl.Trinity.CellReports2017.fasta transcriptome is called Axo.Mar2014.Trinotate.xls.annot_mapping and is available above.\n\n```\nrename_matrix_feature_identifiers.pl sc.counts.matrix Axo.Mar2014.Trinotate.xls.annot_mapping > sc.counts.annotated.matrix\n```\n\n# create representative gene count matrix\nSince the transcriptome we used has a lot of isoforms, we collapsed these down to one representative isoform. Perl script can be found here: https://github.com/trinityrnaseq/trinityrnaseq/blob/master/util/misc/trinity_trans_matrix_to_rep_trans_gene_matrix.pl and again run from the Trinity codebase. \n\n```\ntrinity_trans_matrix_to_rep_trans_gene_matrix.pl sc.counts.annotated.matrix > sc.counts.annotated.matrix.repGene\n```\n\nThis count matrix is now ready to go into an analysis software. We used Seurat (https://satijalab.org/seurat/) for our manuscript! \n\n\n## Appendix 1: Parallelizing the analysis steps\n\n### Analyzing only parts of the project.\n\nMost parts of the analysis can be filtered by specifying a list of sequencing runs,\na list of sequencing libraries, or both. When a filter is provided, the analysis will\nonly be carried out on data matching the filter.\n\nEvery part of the analysis can be filtered based on both libraries and sequencing runs.\n\n # Will filter all parts from runs Run5 and Run6:\n python indrops.py test_project.yaml filter --runs Run5,Run6\n\n # Will sort all parts from all runs of libraries test_lib3 and test_lib4:\n python indrops.py test_project.yaml sort --libraries test_lib3,test_lib4\n\n### Dividing the analysis between jobs\n\nMost parts of the analysis can easily be divided for concurrent processing in different jobs,\nby specifying the total number of jobs (--total-workers) and the index of the current worker (--worker-index). \n\n # Submitting the 20 commands below would filter all run parts within the project in 20 different parts.\n python indrops.py test_project.yaml filter --total-workers 20 --worker-index [0-19]\n\n## Appendix 2: Using a custom Python environment on the a cluster\n\n### How to install conda and create a new environment\n\nDownload Miniconda (the anaconda package manager, without all the packages)\n\n mkdir -pv /user_owned/path\n cd /user_owned/path\n wget https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh\n\nInstall Miniconda\n\n bash Miniconda-latest-Linux-x86_64.sh\n # Agree to license with “yes”, and choose to install in a directory that is user owned.\n # I installed it in: /groups/klein/adrian/miniconda\n\nCreate a new Python environment (in this example, in /groups/klein/adrian/pyndrops)\ninstall Python2.7, Numpy, Scipy, Pandas, Matplotlib, PyYaml, PySAM\n\n conda create -p /groups/klein/adrian/pyndrops python numpy scipy pandas pyyaml matplotlib pip\n source activate /groups/klein/adrian/pyndrops\n pip install pyfasta pysam==0.9.1\n\n## Appendix 3: Installing Samtools 1.3.1\n\n mkdir -pv SAMTOOLS_DIR\n cd SAMTOOLS_DIR\n wget https://github.com/samtools/samtools/releases/download/1.3.1/samtools-1.3.1.tar.bz2\n tar xvfj samtools-1.3.1.tar.bz2\n cd samtools-1.3.1\n make\n make prefix=. install\n\nNow add `SAMTOOLS_DIR/samtools-1.3.1/bin/` as the `samtools_dir` in your project YAML file.\n" }, { "alpha_fraction": 0.5850436687469482, "alphanum_fraction": 0.5945969820022583, "avg_line_length": 47.36597442626953, "blob_id": "570ccd9a3327380c5ae65598992d5ed3ef3f0efe", "content_id": "43aaa1499dff7c778c8fb5cb2a3f7ed463b3b739", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 79030, "license_type": "no_license", "max_line_length": 216, "num_lines": 1634, "path": "/indrops.py", "repo_name": "brianjohnhaas/indrops", "src_encoding": "UTF-8", "text": "import os, subprocess\nimport itertools\nimport operator\nfrom collections import defaultdict, OrderedDict\nimport errno\n\n# cPickle is a faster version of pickle that isn't installed in python3\n# inserted try statement just in case\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\nfrom io import BytesIO\n\nimport numpy as np\nimport re\nimport shutil\nimport gzip\n\n# product: product(A, B) returns the same as ((x,y) for x in A for y in B).\n# combination: Return r length subsequences of elements from the input iterable.\nfrom itertools import product, combinations\nimport time\n\nimport yaml\nimport pysam\n\nimport tempfile\nimport string\nfrom contextlib import contextmanager\n\n# -----------------------\n#\n# Helper functions\n#\n# -----------------------\n\ndef string_hamming_distance(str1, str2):\n \"\"\"\n Fast hamming distance over 2 strings known to be of same length.\n In information theory, the Hamming distance between two strings of equal \n length is the number of positions at which the corresponding symbols \n are different.\n\n eg \"karolin\" and \"kathrin\" is 3.\n \"\"\"\n return sum(itertools.imap(operator.ne, str1, str2))\n\ndef rev_comp(seq):\n tbl = {'A':'T', 'T':'A', 'C':'G', 'G':'C', 'N':'N'}\n return ''.join(tbl[s] for s in seq[::-1])\n\n\ndef to_fastq(name, seq, qual):\n \"\"\"\n Return string that can be written to fastQ file\n \"\"\"\n return '@'+name+'\\n'+seq+'\\n+\\n'+qual+'\\n'\n\ndef to_fastq_lines(bc, umi, seq, qual, read_name=''):\n \"\"\"\n Return string that can be written to fastQ file\n \"\"\"\n reformated_name = read_name.replace(':', '_')\n name = '%s:%s:%s' % (bc, umi, reformated_name)\n return to_fastq(name, seq, qual)\n\ndef from_fastq(handle):\n while True:\n name = next(handle).rstrip()[1:] #Read name\n seq = next(handle).rstrip() #Read seq\n next(handle) #+ line\n qual = next(handle).rstrip() #Read qual\n if not name or not seq or not qual:\n break\n yield name, seq, qual\n\ndef seq_neighborhood(seq, n_subs=1):\n \"\"\"\n Given a sequence, yield all sequences within n_subs substitutions of \n that sequence by looping through each combination of base pairs within\n each combination of positions.\n \"\"\"\n for positions in combinations(range(len(seq)), n_subs):\n # yields all unique combinations of indices for n_subs mutations\n for subs in product(*(\"ATGCN\",)*n_subs):\n # yields all combinations of possible nucleotides for strings of length\n # n_subs\n seq_copy = list(seq)\n for p, s in zip(positions, subs):\n seq_copy[p] = s\n yield ''.join(seq_copy)\n\ndef build_barcode_neighborhoods(barcode_file, expect_reverse_complement=True):\n \"\"\"\n Given a set of barcodes, produce sequences which can unambiguously be\n mapped to these barcodes, within 2 substitutions. If a sequence maps to \n multiple barcodes, get rid of it. However, if a sequences maps to a bc1 with \n 1change and another with 2changes, keep the 1change mapping.\n \"\"\"\n\n # contains all mutants that map uniquely to a barcode\n clean_mapping = dict()\n\n # contain single or double mutants \n mapping1 = defaultdict(set)\n mapping2 = defaultdict(set)\n \n #Build the full neighborhood and iterate through barcodes\n with open(barcode_file, 'rU') as f:\n # iterate through each barcode (rstrip cleans string of whitespace)\n for line in f:\n barcode = line.rstrip()\n if expect_reverse_complement:\n barcode = rev_comp(line.rstrip())\n\n # each barcode obviously maps to itself uniquely\n clean_mapping[barcode] = barcode\n\n # for each possible mutated form of a given barcode, either add\n # the origin barcode into the set corresponding to that mutant or \n # create a new entry for a mutant not already in mapping1\n # eg: barcodes CATG and CCTG would be in the set for mutant CTTG\n # but only barcode CATG could generate mutant CANG\n for n in seq_neighborhood(barcode, 1):\n mapping1[n].add(barcode)\n \n # same as above but with double mutants\n for n in seq_neighborhood(barcode, 2):\n mapping2[n].add(barcode) \n \n # take all single-mutants and find those that could only have come from one\n # specific barcode\n for k, v in mapping1.items():\n if k not in clean_mapping:\n if len(v) == 1:\n clean_mapping[k] = list(v)[0]\n \n for k, v in mapping2.items():\n if k not in clean_mapping:\n if len(v) == 1:\n clean_mapping[k] = list(v)[0]\n del mapping1\n del mapping2\n return clean_mapping\n\ndef check_dir(path):\n \"\"\"\n Checks if directory already exists or not and creates it if it doesn't\n \"\"\"\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\ndef print_to_stderr(msg, newline=True):\n \"\"\"\n Wrapper to eventually write to stderr\n \"\"\"\n sys.stderr.write(str(msg))\n if newline:\n sys.stderr.write('\\n')\n\ndef worker_filter(iterable, worker_index, total_workers):\n return (p for i,p in enumerate(iterable) if (i-worker_index)%total_workers==0)\n\nclass FIFO():\n \"\"\"\n A context manager for a named pipe.\n \"\"\"\n def __init__(self, filename=\"\", suffix=\"\", prefix=\"tmp_fifo_dir\", dir=None):\n if filename:\n self.filename = filename\n else:\n self.tmpdir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)\n self.filename = os.path.join(self.tmpdir, 'fifo')\n\n def __enter__(self):\n if os.path.exists(self.filename):\n os.unlink(self.filename)\n os.mkfifo(self.filename)\n return self\n\n def __exit__(self, type, value, traceback):\n os.remove(self.filename)\n if hasattr(self, 'tmpdir'):\n shutil.rmtree(self.tmpdir)\n\n# -----------------------\n#\n# Core objects\n#\n# -----------------------\n\nclass IndropsProject():\n\n def __init__(self, project_yaml_file_handle):\n\n self.yaml = yaml.load(project_yaml_file_handle)\n\n self.name = self.yaml['project_name']\n self.project_dir = self.yaml['project_dir']\n\n self.libraries = OrderedDict()\n self.runs = OrderedDict()\n\n for run in self.yaml['sequencing_runs']:\n \"\"\"\n After filtering, each sequencing run generates between 1 ... X files with filtered reads.\n X = (N x M)\n - N: The run is often split into several files (a typical NextSeq run is split into L001,\n L002, L003, L004 which match different lanes, but this can also be done artificially.\n - M: The same run might contain several libraries. The demultiplexing can be handled by the script (or externally).\n If demultiplexing is done externally, there will be a different .fastq file for each library.\n \"\"\"\n version = run['version']\n\n filtered_filename = '{library_name}_{run_name}'\n if run['version'] == 'v3':\n filtered_filename += '_{library_index}'\n # Prepare to iterate over run split into several files\n if 'split_affixes' in run:\n filtered_filename += '_{split_affix}'\n split_affixes = run['split_affixes']\n else:\n split_affixes = ['']\n\n filtered_filename += '.fastq'\n\n # Prepare to iterate over libraries\n if 'libraries' in run:\n run_libraries = run['libraries']\n elif 'library_name' in run:\n run_libraries = [{'library_name' : run['library_name'], 'library_prefix':''}]\n else:\n raise Exception('No library name or libraries specified.')\n\n if run['version']=='v1' or run['version']=='v2':\n for affix in split_affixes:\n for lib in run_libraries:\n lib_name = lib['library_name']\n if lib_name not in self.libraries:\n self.libraries[lib_name] = IndropsLibrary(name=lib_name, project=self, version=run['version'])\n else:\n assert self.libraries[lib_name].version == run['version']\n\n if version == 'v1':\n metaread_filename = os.path.join(run['dir'],run['fastq_path'].format(split_affix=affix, read='R1', library_prefix=lib['library_prefix']))\n bioread_filename = os.path.join(run['dir'],run['fastq_path'].format(split_affix=affix, read='R2', library_prefix=lib['library_prefix']))\n elif version == 'v2':\n metaread_filename = os.path.join(run['dir'],run['fastq_path'].format(split_affix=affix, read='R2', library_prefix=lib['library_prefix']))\n bioread_filename = os.path.join(run['dir'],run['fastq_path'].format(split_affix=affix, read='R1', library_prefix=lib['library_prefix']))\n\n filtered_part_filename = filtered_filename.format(run_name=run['name'], split_affix=affix, library_name=lib_name)\n filtered_part_path = os.path.join(self.project_dir, lib_name, 'filtered_parts', filtered_part_filename)\n part = V1V2Filtering(filtered_fastq_filename=filtered_part_path,\n project=self, \n bioread_filename=bioread_filename,\n metaread_filename=metaread_filename,\n run_name=run['name'],\n library_name=lib_name,\n part_name=affix)\n\n if run['name'] not in self.runs:\n self.runs[run['name']] = []\n self.runs[run['name']].append(part)\n self.libraries[lib_name].parts.append(part)\n\n elif run['version'] == 'v3':\n for affix in split_affixes:\n filtered_part_filename = filtered_filename.format(run_name=run['name'], split_affix=affix,\n library_name='{library_name}', library_index='{library_index}')\n part_filename = os.path.join(self.project_dir, '{library_name}', 'filtered_parts', filtered_part_filename)\n\n input_filename = os.path.join(run['dir'], run['fastq_path'].format(split_affix=affix, read='{read}'))\n part = V3Demultiplexer(run['libraries'], project=self, part_filename=part_filename, input_filename=input_filename, run_name=run['name'], part_name=affix)\n\n if run['name'] not in self.runs:\n self.runs[run['name']] = []\n self.runs[run['name']].append(part)\n\n for lib in run_libraries:\n lib_name = lib['library_name']\n lib_index = lib['library_index']\n if lib_name not in self.libraries:\n self.libraries[lib_name] = IndropsLibrary(name=lib_name, project=self, version=run['version'])\n self.libraries[lib_name].parts.append(part.libraries[lib_index])\n\n\n @property\n def paths(self):\n if not hasattr(self, '_paths'):\n script_dir = os.path.dirname(os.path.realpath(__file__))\n #Read defaults\n with open(os.path.join(script_dir, 'default_parameters.yaml'), 'r') as f:\n paths = yaml.load(f)['paths']\n # Update with user provided values\n paths.update(self.yaml['paths'])\n\n paths['python'] = os.path.join(paths['python_dir'], 'python')\n paths['java'] = os.path.join(paths['java_dir'], 'java')\n paths['bowtie'] = os.path.join(paths['bowtie_dir'], 'bowtie')\n paths['samtools'] = os.path.join(paths['samtools_dir'], 'samtools')\n paths['trimmomatic_jar'] = os.path.join(script_dir, 'bins', 'trimmomatic-0.33.jar')\n paths['rsem_tbam2gbam'] = os.path.join(paths['rsem_dir'], 'rsem-tbam2gbam')\n paths['rsem_prepare_reference'] = os.path.join(paths['rsem_dir'], 'rsem-prepare-reference')\n\n self._paths = type('Paths_anonymous_object',(object,),paths)()\n self._paths.trim_polyA_and_filter_low_complexity_reads_py = os.path.join(script_dir, 'trim_polyA_and_filter_low_complexity_reads.py')\n self._paths.quantify_umifm_from_alignments_py = os.path.join(script_dir, 'quantify_umifm_from_alignments.py')\n self._paths.count_barcode_distribution_py = os.path.join(script_dir, 'count_barcode_distribution.py')\n self._paths.gel_barcode1_list = os.path.join(script_dir, 'ref/barcode_lists/gel_barcode1_list.txt')\n self._paths.gel_barcode2_list = os.path.join(script_dir, 'ref/barcode_lists/gel_barcode2_list.txt')\n return self._paths\n\n @property\n def parameters(self):\n if not hasattr(self, '_parameters'):\n #Read defaults\n with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'default_parameters.yaml'), 'r') as f:\n self._parameters = yaml.load(f)['parameters']\n # Update with user provided values\n if 'parameters' in self.yaml:\n for k, d in self.yaml['parameters'].items():\n self._parameters[k].update(d)\n\n return self._parameters\n\n @property\n def gel_barcode1_revcomp_list_neighborhood(self):\n if not hasattr(self, '_gel_barcode1_list_neighborhood'):\n self._gel_barcode1_revcomp_list_neighborhood = build_barcode_neighborhoods(self.paths.gel_barcode1_list, True)\n return self._gel_barcode1_revcomp_list_neighborhood\n \n @property\n def gel_barcode2_revcomp_list_neighborhood(self):\n if not hasattr(self, '_gel_barcode2_revcomp_list_neighborhood'):\n self._gel_barcode2_revcomp_list_neighborhood = build_barcode_neighborhoods(self.paths.gel_barcode2_list, True)\n return self._gel_barcode2_revcomp_list_neighborhood\n\n @property\n def gel_barcode2_list_neighborhood(self):\n if not hasattr(self, '_gel_barcode2_list_neighborhood'):\n self._gel_barcode2_list_neighborhood = build_barcode_neighborhoods(self.paths.gel_barcode2_list, False)\n return self._gel_barcode2_list_neighborhood\n\n @property\n def stable_barcode_names(self):\n if not hasattr(self, '_stable_barcode_names'):\n with open(self.paths.gel_barcode1_list) as f:\n rev_bc1s = [rev_comp(line.rstrip()) for line in f]\n with open(self.paths.gel_barcode2_list) as f:\n bc2s = [line.rstrip() for line in f]\n rev_bc2s = [rev_comp(bc2) for bc2 in bc2s]\n\n # V1, V2 names:\n v1v2_names = {}\n barcode_iter = product(rev_bc1s, rev_bc2s)\n name_iter = product(string.ascii_uppercase, repeat=4)\n for barcode, name in zip(barcode_iter, name_iter):\n v1v2_names['-'.join(barcode)] = 'bc' + ''.join(name)\n\n # V3 names:\n v3_names = {}\n barcode_iter = product(bc2s, rev_bc2s)\n name_iter = product(string.ascii_uppercase, repeat=4)\n for barcode, name in zip(barcode_iter, name_iter):\n v3_names['-'.join(barcode)] = 'bc' + ''.join(name)\n\n\n self._stable_barcode_names = {\n 'v1' : v1v2_names,\n 'v2' : v1v2_names,\n 'v3': v3_names,\n }\n return self._stable_barcode_names\n\n def build_transcriptome(self, gzipped_genome_softmasked_fasta_filename, gzipped_transcriptome_gtf):\n import pyfasta\n \n index_dir = os.path.dirname(self.paths.bowtie_index)\n check_dir(index_dir)\n\n genome_filename = os.path.join(index_dir, '.'.join(gzipped_genome_softmasked_fasta_filename.split('.')[:-1]))\n\n gtf_filename = os.path.join(index_dir, gzipped_transcriptome_gtf.split('/')[-1])\n gtf_prefix = '.'.join(gtf_filename.split('.')[:-2])\n gtf_with_genenames_in_transcript_id = gtf_prefix + '.annotated.gtf'\n\n accepted_gene_biotypes_for_NA_transcripts = set([\"protein_coding\",\"IG_V_gene\",\"IG_J_gene\",\"TR_J_gene\",\"TR_D_gene\",\"TR_V_gene\",\"IG_C_gene\",\"IG_D_gene\",\"TR_C_gene\"])\n tsl1_or_tsl2_strings = ['transcript_support_level \"1\"', 'transcript_support_level \"1 ', 'transcript_support_level \"2\"', 'transcript_support_level \"2 ']\n tsl_NA = 'transcript_support_level \"NA'\n\n print_to_stderr('Filtering GTF')\n output_gtf = open(gtf_with_genenames_in_transcript_id, 'w')\n for line in subprocess.Popen([\"gzip\", \"--stdout\", \"-d\", gzipped_transcriptome_gtf], stdout=subprocess.PIPE).stdout:\n if 'transcript_id' not in line:\n continue\n\n line_valid_for_output = False\n for string in tsl1_or_tsl2_strings:\n if string in line:\n line_valid_for_output = True\n break\n \n if tsl_NA in line:\n gene_biotype = re.search(r'gene_biotype \\\"(.*?)\\\";', line)\n if gene_biotype and gene_biotype.group(1) in accepted_gene_biotypes_for_NA_transcripts:\n line_valid_for_output = True\n\n if line_valid_for_output:\n gene_name = re.search(r'gene_name \\\"(.*?)\\\";', line)\n if gene_name:\n gene_name = gene_name.group(1)\n out_line = re.sub(r'(?<=transcript_id \")(.*?)(?=\";)', r'\\1|'+gene_name, line)\n output_gtf.write(out_line)\n output_gtf.close()\n\n print_to_stderr('Gunzipping Genome')\n p_gzip = subprocess.Popen([\"gzip\", \"-dfc\", gzipped_genome_softmasked_fasta_filename], stdout=open(genome_filename, 'wb'))\n if p_gzip.wait() != 0:\n raise Exception(\" Error in rsem-prepare reference \")\n\n p_rsem = subprocess.Popen([self.paths.rsem_prepare_reference, '--bowtie', '--bowtie-path', self.paths.bowtie_dir,\n '--gtf', gtf_with_genenames_in_transcript_id, \n '--polyA', '--polyA-length', '5', genome_filename, self.paths.bowtie_index])\n\n if p_rsem.wait() != 0:\n raise Exception(\" Error in rsem-prepare reference \")\n\n print_to_stderr('Finding soft masked regions in transcriptome')\n \n transcripts_fasta = pyfasta.Fasta(self.paths.bowtie_index + '.transcripts.fa')\n soft_mask = {}\n for tx, seq in transcripts_fasta.items():\n seq = str(seq)\n soft_mask[tx] = set((m.start(), m.end()) for m in re.finditer(r'[atcgn]+', seq))\n with open(self.paths.bowtie_index + '.soft_masked_regions.pickle', 'w') as out:\n pickle.dump(soft_mask, out)\n\n\nclass IndropsLibrary():\n\n def __init__(self, name='', project=None, version=''):\n self.project = project\n self.name = name\n self.parts = []\n self.version = version\n\n self.paths = {}\n for lib_dir in ['filtered_parts', 'quant_dir']:\n dir_path = os.path.join(self.project.project_dir, self.name, lib_dir)\n check_dir(dir_path)\n self.paths[lib_dir] = dir_path\n self.paths = type('Paths_anonymous_object',(object,),self.paths)()\n\n self.paths.abundant_barcodes_names_filename = os.path.join(self.project.project_dir, self.name, 'abundant_barcodes.pickle')\n self.paths.filtering_statistics_filename = os.path.join(self.project.project_dir, self.name, self.name+'.filtering_stats.csv')\n self.paths.barcode_abundance_histogram_filename = os.path.join(self.project.project_dir, self.name, self.name+'.barcode_abundance.png')\n self.paths.missing_quants_filename = os.path.join(self.project.project_dir, self.name, self.name+'.missing_barcodes.pickle')\n\n @property\n def barcode_counts(self):\n if not hasattr(self, '_barcode_counts'):\n self._barcode_counts = defaultdict(int)\n for part in self.parts:\n for k, v in part.part_barcode_counts.items():\n self._barcode_counts[k] += v\n\n return self._barcode_counts\n\n @property\n def abundant_barcodes(self):\n if not hasattr(self, '_abundant_barcodes'):\n with open(self.paths.abundant_barcodes_names_filename) as f:\n self._abundant_barcodes = pickle.load(f)\n return self._abundant_barcodes\n\n def sorted_barcode_names(self, min_reads=0):\n return [name for bc,(name,abun) in sorted(self.abundant_barcodes.items(), key=lambda i:-i[1][1]) if abun>min_reads]\n\n def identify_abundant_barcodes(self, make_histogram=True, absolute_min_reads=250):\n \"\"\"\n Identify which barcodes are above the absolute minimal abundance, \n and make a histogram summarizing the barcode distribution\n \"\"\"\n keep_barcodes = []\n for k, v in self.barcode_counts.items():\n if v > absolute_min_reads:\n keep_barcodes.append(k)\n\n abundant_barcodes = {}\n print_to_stderr(\" %d barcodes above absolute minimum threshold\" % len(keep_barcodes))\n for bc in keep_barcodes:\n abundant_barcodes[bc] = (self.project.stable_barcode_names[self.version][bc], self.barcode_counts[bc])\n\n self._abundant_barcodes = abundant_barcodes\n with open(self.paths.abundant_barcodes_names_filename, 'w') as f:\n pickle.dump(abundant_barcodes, f)\n\n # Create table about the filtering process\n with open(self.paths.filtering_statistics_filename, 'w') as filtering_stats:\n\n header = ['Run', 'Part', 'Input Reads', 'Valid Structure', 'Surviving Trimmomatic', 'Surviving polyA trim and complexity filter']\n\n if self.version == 'v1' or self.version == 'v2':\n structure_parts = ['W1_in_R2', 'empty_read', 'No_W1', 'No_polyT', 'BC1', 'BC2', 'Umi_error']\n header += ['W1 in R2', 'empty read', 'No W1 in R1', 'No polyT', 'BC1', 'BC2', 'UMI_contains_N']\n elif self.version == 'v3':\n structure_parts = ['Invalid_BC1', 'Invalid_BC2', 'UMI_contains_N']\n header += ['Invalid BC1', 'Invalid BC2', 'UMI_contains_N']\n\n trimmomatic_parts = ['dropped']\n header += ['Dropped by Trimmomatic']\n\n complexity_filter_parts = ['rejected_because_too_short', 'rejected_because_complexity_too_low']\n header += ['Too short after polyA trim', 'Read complexity too low']\n\n filtering_stats.write(','.join(header)+'\\n')\n\n for part in self.parts:\n with open(part.filtering_metrics_filename) as f:\n part_stats = yaml.load(f)\n line = [part.run_name, part.part_name, part_stats['read_structure']['Total'], part_stats['read_structure']['Valid'], part_stats['trimmomatic']['output'], part_stats['complexity_filter']['output']]\n line += [part_stats['read_structure'][k] for k in structure_parts]\n line += [part_stats['trimmomatic'][k] for k in trimmomatic_parts]\n line += [part_stats['complexity_filter'][k] for k in complexity_filter_parts]\n line = [str(l) for l in line]\n filtering_stats.write(','.join(line)+'\\n')\n\n print_to_stderr(\"Created Library filtering summary:\")\n print_to_stderr(\" \" + self.paths.filtering_statistics_filename)\n \n # Make the histogram figure\n if not make_histogram:\n return\n\n count_freq = defaultdict(int)\n for bc, count in self.barcode_counts.items():\n count_freq[count] += 1\n\n x = np.array(count_freq.keys())\n y = np.array(count_freq.values())\n w = x*y\n\n # need to use non-intenactive Agg backend\n import matplotlib\n matplotlib.use('Agg')\n from matplotlib import pyplot as plt\n ax = plt.subplot(111)\n ax.hist(x, bins=np.logspace(0, 6, 50), weights=w)\n ax.set_xscale('log')\n ax.set_xlabel('Reads per barcode')\n ax.set_ylabel('#reads coming from bin')\n plt.savefig(self.paths.barcode_abundance_histogram_filename)\n\n print_to_stderr(\"Created Barcode Abundance Histogram at:\")\n print_to_stderr(\" \" + self.paths.barcode_abundance_histogram_filename)\n\n def sort_reads_by_barcode(self, index=0):\n self.parts[index].sort_reads_by_barcode(self.abundant_barcodes)\n\n def get_reads_for_barcode(self, barcode, run_filter=[]):\n for part in self.parts:\n if (not run_filter) or (part.run_name in run_filter):\n for line in part.get_reads_for_barcode(barcode):\n yield line\n\n def quantify_expression(self, analysis_prefix='', min_reads=750, min_counts=0, total_workers=1, worker_index=0, no_bam=False, run_filter=[]):\n if analysis_prefix:\n analysis_prefix += '.'\n\n sorted_barcode_names = self.sorted_barcode_names(min_reads=min_reads)\n\n # Identify which barcodes belong to this worker\n barcodes_for_this_worker = []\n i = worker_index\n while i < len(sorted_barcode_names):\n barcodes_for_this_worker.append(sorted_barcode_names[i])\n i += total_workers\n\n counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.counts.tsv' % (analysis_prefix, worker_index, total_workers))\n ambig_counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.counts.tsv' % (analysis_prefix, worker_index, total_workers))\n ambig_partners_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.partners' % (analysis_prefix, worker_index, total_workers))\n metrics_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.metrics.tsv' % (analysis_prefix, worker_index, total_workers))\n ignored_for_output_filename = counts_output_filename+'.ignored'\n\n merged_bam_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.bam'% (analysis_prefix, worker_index, total_workers))\n merged_bam_index_filename = merged_bam_filename + '.bai'\n\n get_barcode_genomic_bam_filename = lambda bc: os.path.join(self.paths.quant_dir, '%s%s.genomic.sorted.bam' % (analysis_prefix, bc))\n\n # If we wanted BAM output, and the merge BAM and merged BAM index are present, then we are done\n if (not no_bam) and (os.path.isfile(merged_bam_filename) and os.path.isfile(merged_bam_index_filename)):\n print_to_stderr('Indexed, merged BAM file detected for this worker. Done.')\n return \n\n # Otherwise, we have to check what we need to quantify\n\n \n \"\"\"\n Function to determine which barcodes this quantification worker might have already quantified.\n This tries to handle interruption during any step of the process.\n\n The worker is assigned some list of barcodes L. For every barcode:\n - It could have been quantified\n - but have less than min_counts ---> so it got written to `ignored` file.\n - and quantification succeeded, meaning\n 1. there is a line (ending in \\n) in the `metrics` file. \n 2. there is a line (ending in \\n) in the `quantification` file.\n 3. there (could) be a line (ending in \\n) in the `ambiguous quantification` file.\n 4. there (could) be a line (ending in \\n) in the `ambiguous quantification partners` file.\n [If any line doesn't end in \\n, then likely the output of that line was interrupted!]\n 5. (If BAM output is desired) There should be a sorted genomic BAM\n 6. (If BAM output is desired) There should be a sorted genomic BAM index\n \"\"\"\n succesfully_previously_quantified = set()\n previously_ignored = set()\n header_written = False\n\n if os.path.isfile(counts_output_filename) and os.path.isfile(metrics_output_filename):\n # Load in list of ignored barcodes\n if os.path.isfile(ignored_for_output_filename):\n with open(ignored_for_output_filename, 'r') as f:\n previously_ignored = set([line.rstrip().split('\\t')[0] for line in f])\n\n # Load the metrics data into memory\n # (It should be fairly small, this is fast and safe)\n existing_metrics_data = {}\n with open(metrics_output_filename, 'r') as f:\n existing_metrics_data = dict((line.partition('\\t')[0], line) for line in f if line[-1]=='\\n')\n\n\n # Quantification data could be large, read it line by line and output it back for barcodes that have a matching metrics line.\n with open(counts_output_filename, 'r') as in_counts, \\\n open(counts_output_filename+'.tmp', 'w') as tmp_counts, \\\n open(metrics_output_filename+'.tmp', 'w') as tmp_metrics:\n\n for line in in_counts:\n # The first worker is reponsible for written the header.\n # Make sure we carry that over\n if (not header_written) and (worker_index==0):\n tmp_counts.write(line)\n tmp_metrics.write(existing_metrics_data['Barcode'])\n header_written = True\n continue\n\n # This line has incomplete output, skip it.\n # (This can only happen with the last line)\n if line[-1] != '\\n':\n continue\n\n barcode = line.partition('\\t')[0]\n\n # Skip barcode if we don't have existing metrics data\n if barcode not in existing_metrics_data:\n continue\n\n # Check if we BAM required BAM files exist\n barcode_genomic_bam_filename = get_barcode_genomic_bam_filename(barcode)\n bam_files_required_and_present = no_bam or (os.path.isfile(barcode_genomic_bam_filename) and os.path.isfile(barcode_genomic_bam_filename+'.bai'))\n if not bam_files_required_and_present:\n continue\n\n # This passed all the required checks, write the line to the temporary output files\n tmp_counts.write(line)\n tmp_metrics.write(existing_metrics_data[barcode])\n succesfully_previously_quantified.add(barcode)\n\n shutil.move(counts_output_filename+'.tmp', counts_output_filename)\n shutil.move(metrics_output_filename+'.tmp', metrics_output_filename)\n\n # For any 'already quantified' barcode, make sure we also copy over the ambiguity data\n with open(ambig_counts_output_filename, 'r') as in_f, \\\n open(ambig_counts_output_filename+'.tmp', 'w') as tmp_f:\n f_first_line = (worker_index == 0)\n for line in in_f:\n if f_first_line:\n tmp_f.write(line)\n f_first_line = False\n continue\n if (line.partition('\\t')[0] in succesfully_previously_quantified) and (line[-1]=='\\n'):\n tmp_f.write(line)\n shutil.move(ambig_counts_output_filename+'.tmp', ambig_counts_output_filename)\n\n with open(ambig_partners_output_filename, 'r') as in_f, \\\n open(ambig_partners_output_filename+'.tmp', 'w') as tmp_f:\n for line in in_f:\n if (line.partition('\\t')[0] in succesfully_previously_quantified) and (line[-1]=='\\n'):\n tmp_f.write(line)\n shutil.move(ambig_partners_output_filename+'.tmp', ambig_partners_output_filename)\n\n barcodes_to_quantify = [bc for bc in barcodes_for_this_worker if (bc not in succesfully_previously_quantified and bc not in previously_ignored)]\n\n\n print_to_stderr(\"\"\"[%s] This worker assigned %d out of %d total barcodes.\"\"\" % (self.name, len(barcodes_for_this_worker), len(sorted_barcode_names)))\n if len(barcodes_for_this_worker)-len(barcodes_to_quantify) > 0:\n print_to_stderr(\"\"\" %d previously quantified, %d previously ignored, %d left for this run.\"\"\" % (len(succesfully_previously_quantified), len(previously_ignored), len(barcodes_to_quantify)))\n \n\n\n print_to_stderr(('{0:<14.12}'.format('Prefix') if analysis_prefix else '') + '{0:<14.12}{1:<9}'.format(\"Library\", \"Barcode\"), False)\n print_to_stderr(\"{0:<8s}{1:<8s}{2:<10s}\".format(\"Reads\", \"Counts\", \"Ambigs\"))\n for barcode in barcodes_to_quantify:\n self.quantify_expression_for_barcode(barcode,\n counts_output_filename, metrics_output_filename,\n ambig_counts_output_filename, ambig_partners_output_filename,\n no_bam=no_bam, write_header=(not header_written) and (worker_index==0), analysis_prefix=analysis_prefix,\n min_counts = min_counts, run_filter=run_filter)\n header_written = True\n print_to_stderr(\"Per barcode quantification completed.\")\n\n if no_bam:\n return\n\n #Gather list of barcodes with output from the metrics file\n genomic_bams = []\n with open(metrics_output_filename, 'r') as f:\n for line in f:\n bc = line.partition('\\t')[0]\n if bc == 'Barcode': #This is the line in the header\n continue\n genomic_bams.append(get_barcode_genomic_bam_filename(bc))\n\n print_to_stderr(\"Merging BAM output.\")\n try:\n subprocess.check_output([self.project.paths.samtools, 'merge', '-f', merged_bam_filename]+genomic_bams, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError, err:\n print_to_stderr(\" CMD: %s\" % str(err.cmd)[:400])\n print_to_stderr(\" stdout/stderr:\")\n print_to_stderr(err.output)\n raise Exception(\" === Error in samtools merge === \")\n\n print_to_stderr(\"Indexing merged BAM output.\")\n try:\n subprocess.check_output([self.project.paths.samtools, 'index', merged_bam_filename], stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError, err:\n print_to_stderr(\" CMD: %s\" % str(err.cmd)[:400])\n print_to_stderr(\" stdout/stderr:\")\n print_to_stderr(err.output)\n raise Exception(\" === Error in samtools index === \")\n\n print(genomic_bams)\n for filename in genomic_bams:\n os.remove(filename)\n os.remove(filename + '.bai')\n\n def quantify_expression_for_barcode(self, barcode, counts_output_filename, metrics_output_filename,\n ambig_counts_output_filename, ambig_partners_output_filename,\n min_counts=0, analysis_prefix='', no_bam=False, write_header=False, run_filter=[]):\n print_to_stderr(('{0:<14.12}'.format(analysis_prefix) if analysis_prefix else '') + '{0:<14.12}{1:<9}'.format(self.name, barcode), False)\n\n unaligned_reads_output = os.path.join(self.paths.quant_dir, '%s%s.unaligned.fastq' % (analysis_prefix,barcode))\n aligned_bam = os.path.join(self.paths.quant_dir, '%s%s.aligned.bam' % (analysis_prefix,barcode))\n\n # Bowtie command\n bowtie_cmd = [self.project.paths.bowtie, self.project.paths.bowtie_index, '-q', '-',\n '-p', '1', '-a', '--best', '--strata', '--chunkmbs', '1000', '--norc', '--sam',\n '-shmem', #should sometimes reduce memory usage...?\n '-m', str(self.project.parameters['bowtie_arguments']['m']),\n '-n', str(self.project.parameters['bowtie_arguments']['n']),\n '-l', str(self.project.parameters['bowtie_arguments']['l']),\n '-e', str(self.project.parameters['bowtie_arguments']['e']),\n ]\n if self.project.parameters['output_arguments']['output_unaligned_reads_to_other_fastq']:\n bowtie_cmd += ['--un', unaligned_reads_output]\n\n # Quantification command\n script_dir = os.path.dirname(os.path.realpath(__file__))\n quant_cmd = [self.project.paths.python, self.project.paths.quantify_umifm_from_alignments_py,\n '-m', str(self.project.parameters['umi_quantification_arguments']['m']),\n '-u', str(self.project.parameters['umi_quantification_arguments']['u']),\n '-d', str(self.project.parameters['umi_quantification_arguments']['d']),\n '--min_non_polyA', str(self.project.parameters['umi_quantification_arguments']['min_non_polyA']),\n '--library', str(self.name),\n '--barcode', str(barcode),\n '--counts', counts_output_filename,\n '--metrics', metrics_output_filename,\n '--ambigs', ambig_counts_output_filename,\n '--ambig-partners', ambig_partners_output_filename,\n '--min-counts', str(min_counts),\n ]\n if not no_bam:\n quant_cmd += ['--bam', aligned_bam]\n if write_header:\n quant_cmd += ['--write-header']\n\n if self.project.parameters['umi_quantification_arguments']['split-ambigs']:\n quant_cmd.append('--split-ambig')\n if self.project.parameters['output_arguments']['filter_alignments_to_softmasked_regions']:\n quant_cmd += ['--soft-masked-regions', self.project.paths.bowtie_index + '.soft_masked_regions.pickle']\n\n # Spawn processes\n\n p1 = subprocess.Popen(bowtie_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p2 = subprocess.Popen(quant_cmd, stdin=p1.stdout, stderr=subprocess.PIPE)\n \n \n for line in self.get_reads_for_barcode(barcode, run_filter=run_filter):\n p1.stdin.write(line)\n\n p1.stdin.close()\n\n if p1.wait() != 0:\n print_to_stderr('\\n')\n print_to_stderr(p1.stderr.read())\n raise Exception('\\n === Error on bowtie ===')\n\n if p2.wait() != 0:\n print_to_stderr(p2.stderr.read())\n raise Exception('\\n === Error on Quantification Script ===')\n print_to_stderr(p2.stderr.read(), False)\n\n if no_bam:\n # We are done here\n return False\n\n if not os.path.isfile(aligned_bam):\n raise Exception(\"\\n === No aligned bam was output for barcode %s ===\" % barcode)\n\n genomic_bam = os.path.join(self.paths.quant_dir, '%s%s.genomic.bam' % (analysis_prefix,barcode))\n sorted_bam = os.path.join(self.paths.quant_dir, '%s%s.genomic.sorted.bam' % (analysis_prefix,barcode))\n try:\n subprocess.check_output([self.project.paths.rsem_tbam2gbam, self.project.paths.bowtie_index, aligned_bam, genomic_bam], stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError, err:\n print_to_stderr(\" CMD: %s\" % str(err.cmd)[:100])\n print_to_stderr(\" stdout/stderr:\")\n print_to_stderr(err.output)\n raise Exception(\" === Error in rsem-tbam2gbam === \")\n\n try:\n subprocess.check_output([self.project.paths.samtools, 'sort', '-o', sorted_bam, genomic_bam], stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError, err:\n print_to_stderr(\" CMD: %s\" % str(err.cmd)[:100])\n print_to_stderr(\" stdout/stderr:\")\n print_to_stderr(err.output)\n raise Exception(\" === Error in samtools sort === \")\n\n try:\n subprocess.check_output([self.project.paths.samtools, 'index', sorted_bam], stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError, err:\n print_to_stderr(\" CMD: %s\" % str(err.cmd)[:100])\n print_to_stderr(\" stdout/stderr:\")\n print_to_stderr(err.output)\n raise Exception(\" === Error in samtools index === \")\n\n os.remove(aligned_bam)\n os.remove(genomic_bam)\n\n\n return True\n\n def aggregate_counts(self, analysis_prefix='', process_ambiguity_data=False):\n if analysis_prefix:\n analysis_prefix += '.'\n \n quant_output_files = [fn[len(analysis_prefix):].split('.')[0] for fn in os.listdir(self.paths.quant_dir) if ('worker' in fn and fn[:len(analysis_prefix)]==analysis_prefix)]\n worker_names = [w[6:] for w in quant_output_files]\n worker_indices = set(int(w.split('_')[0]) for w in worker_names)\n\n total_workers = set(int(w.split('_')[1]) for w in worker_names)\n if len(total_workers) > 1:\n raise Exception(\"\"\"Quantification for library %s, prefix '%s' was run with different numbers of total_workers.\"\"\" % (self.name, analysis_prefix))\n total_workers = list(total_workers)[0]\n\n missing_workers = []\n for i in range(total_workers):\n if i not in worker_indices:\n missing_workers.append(i)\n if missing_workers:\n missing_workers = ','.join([str(i) for i in sorted(missing_workers)])\n raise Exception(\"\"\"Output from workers %s (total %d) is missing. \"\"\" % (missing_workers, total_workers))\n\n aggregated_counts_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.counts.tsv')\n aggregated_quant_metrics_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.quant_metrics.tsv')\n aggregated_ignored_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.ignored_barcodes.txt')\n aggregated_bam_output = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.bam')\n\n aggregated_ambig_counts_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.ambig_counts.tsv')\n aggregated_ambig_partners_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.ambig_partners.tsv')\n\n agg_counts = open(aggregated_counts_filename, mode='w')\n agg_metrics = open(aggregated_quant_metrics_filename, mode='w')\n agg_ignored = open(aggregated_ignored_filename, mode='w')\n if process_ambiguity_data:\n agg_ambigs = open(aggregated_ambig_counts_filename, mode='w')\n agg_ambig_partners = open(aggregated_ambig_partners_filename, mode='w')\n\n end_of_counts_header = 0\n end_of_metrics_header = 0\n end_of_ambigs_header = 0\n print_to_stderr(' Concatenating output from all workers.')\n for worker_index in range(total_workers):\n counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.counts.tsv' % (analysis_prefix, worker_index, total_workers))\n ambig_counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.counts.tsv' % (analysis_prefix, worker_index, total_workers))\n ambig_partners_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.partners' % (analysis_prefix, worker_index, total_workers))\n metrics_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.metrics.tsv' % (analysis_prefix, worker_index, total_workers))\n ignored_for_output_filename = counts_output_filename+'.ignored'\n\n # Counts\n with open(counts_output_filename, 'r') as f:\n shutil.copyfileobj(f, agg_counts)\n\n # Metrics\n with open(metrics_output_filename, 'r') as f:\n shutil.copyfileobj(f, agg_metrics)\n\n # Ignored\n if os.path.isfile(counts_output_filename+'.ignored'):\n with open(counts_output_filename+'.ignored', 'r') as f:\n shutil.copyfileobj(f, agg_ignored)\n\n if process_ambiguity_data:\n with open(ambig_counts_output_filename, 'r') as f:\n shutil.copyfileobj(f, agg_ambigs)\n\n with open(ambig_partners_output_filename, 'r') as f:\n shutil.copyfileobj(f, agg_ambig_partners)\n\n print_to_stderr(' GZIPping concatenated output.')\n agg_counts.close()\n subprocess.Popen(['gzip', '-f', aggregated_counts_filename]).wait()\n agg_metrics.close()\n subprocess.Popen(['gzip', '-f', aggregated_quant_metrics_filename]).wait()\n print_to_stderr('Aggregation completed in %s.gz' % aggregated_counts_filename)\n\n if process_ambiguity_data:\n agg_ambigs.close()\n subprocess.Popen(['gzip', '-f', aggregated_ambig_counts_filename]).wait()\n agg_ambig_partners.close()\n subprocess.Popen(['gzip', '-f', aggregated_ambig_partners_filename]).wait()\n\n target_bams = [os.path.join(self.paths.quant_dir, '%sworker%d_%d.bam'% (analysis_prefix, worker_index, total_workers)) for worker_index in range(total_workers)]\n target_bams = [t for t in target_bams if os.path.isfile(t)]\n if target_bams:\n print_to_stderr(' Merging BAM files.')\n p1 = subprocess.Popen([self.project.paths.samtools, 'merge', '-f', aggregated_bam_output]+target_bams, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n if p1.wait() == 0:\n print_to_stderr(' Indexing merged BAM file.')\n p2 = subprocess.Popen([self.project.paths.samtools, 'index', aggregated_bam_output], stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n if p2.wait() == 0:\n for filename in target_bams:\n os.remove(filename)\n os.remove(filename + '.bai')\n else:\n print_to_stderr(\" === Error in samtools index ===\")\n print_to_stderr(p2.stderr.read())\n else:\n print_to_stderr(\" === Error in samtools merge ===\")\n print_to_stderr(p1.stderr.read()) \n\n # print_to_stderr('Deleting per-worker counts files.')\n # for worker_index in range(total_workers):\n # counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.counts.tsv' % (analysis_prefix, worker_index, total_workers))\n # os.remove(counts_output_filename)\n\n # ambig_counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.counts.tsv' % (analysis_prefix, worker_index, total_workers))\n # os.remove(ambig_counts_output_filename)\n\n # ambig_partners_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.partners' % (analysis_prefix, worker_index, total_workers))\n # os.remove(ambig_partners_output_filename)\n\n # metrics_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.metrics.tsv' % (analysis_prefix, worker_index, total_workers))\n # os.remove(metrics_output_filename)\n\n # ignored_for_output_filename = counts_output_filename+'.ignored'\n # os.remove(ignored_for_output_filename)\n \n\nclass LibrarySequencingPart():\n def __init__(self, filtered_fastq_filename=None, project=None, run_name='', library_name='', part_name=''):\n self.project = project\n self.run_name = run_name\n self.part_name = part_name\n self.library_name = library_name\n self.filtered_fastq_filename = filtered_fastq_filename\n self.barcode_counts_pickle_filename = filtered_fastq_filename + '.counts.pickle'\n self.filtering_metrics_filename = '.'.join(filtered_fastq_filename.split('.')[:-1]) + 'metrics.yaml'\n\n self.sorted_gzipped_fastq_filename = filtered_fastq_filename + '.sorted.fastq.gz'\n self.sorted_gzipped_fastq_index_filename = filtered_fastq_filename + '.sorted.fastq.gz.index.pickle'\n\n @property\n def is_filtered(self):\n if not hasattr(self, '_is_filtered'):\n self._is_filtered = os.path.exists(self.filtered_fastq_filename) and os.path.exists(self.barcode_counts_pickle_filename)\n return self._is_filtered\n \n @property\n def is_sorted(self):\n if not hasattr(self, '_is_sorted'):\n self._is_sorted = os.path.exists(self.sorted_gzipped_fastq_filename) and os.path.exists(self.sorted_gzipped_fastq_index_filename)\n return self._is_sorted\n\n @property\n def part_barcode_counts(self):\n if not hasattr(self, '_part_barcode_counts'):\n with open(self.barcode_counts_pickle_filename, 'r') as f:\n self._part_barcode_counts = pickle.load(f)\n return self._part_barcode_counts\n\n @property\n def sorted_index(self):\n if not hasattr(self, '_sorted_index'):\n with open(self.sorted_gzipped_fastq_index_filename, 'r') as f:\n self._sorted_index = pickle.load(f)\n return self._sorted_index\n\n def contains_library_in_query(self, query_libraries):\n return self.library_name in query_libraries\n\n def sort_reads_by_barcode(self, abundant_barcodes={}):\n sorted_barcodes = [j for j,v in sorted(abundant_barcodes.items(), key=lambda i:-i[1][1])]\n sorted_barcodes = [j for j in sorted_barcodes if j in self.part_barcode_counts]\n\n barcode_buffers = {}\n barcode_gzippers = {}\n for bc in sorted_barcodes + ['ignored']:\n barcode_buffers[bc] = BytesIO()\n barcode_gzippers[bc] = gzip.GzipFile(fileobj=barcode_buffers[bc], mode='wb')\n\n total_processed_reads = 0\n total_ignored_reads = 0\n bcs_with_data = set()\n bcs_with_tmp_data = set()\n barcode_tmp_filename = lambda bc: '%s.%s.tmp.gz' % (self.sorted_gzipped_fastq_filename, bc)\n\n\n total_reads = sum(self.part_barcode_counts.values())\n print_to_stderr('Sorting %d reads from %d barcodes above absolute minimum threshold.' % (total_reads, len(abundant_barcodes)))\n with open(self.filtered_fastq_filename, 'r') as input_fastq:\n for name, seq, qual in from_fastq(input_fastq):\n total_processed_reads += 1\n bc = name.split(':')[0]\n\n if total_processed_reads%1000000 == 0:\n print_to_stderr('Read in %.02f percent of all reads (%d)' % (100.*total_processed_reads/total_reads, total_processed_reads))\n \n if bc in abundant_barcodes:\n barcode_gzippers[bc].write(to_fastq(name, seq, qual))\n bcs_with_data.add(bc)\n else:\n total_ignored_reads += 1\n barcode_gzippers['ignored'].write(to_fastq(name, seq, qual))\n bcs_with_data.add('ignored')\n\n\n sorted_output_index = {}\n with open(self.sorted_gzipped_fastq_filename, 'wb') as sorted_output:\n for original_bc in sorted_barcodes + ['ignored']:\n if original_bc != 'ignored':\n new_bc_name = abundant_barcodes[original_bc][0]\n barcode_reads_count = self.part_barcode_counts[original_bc]\n else:\n new_bc_name = 'ignored'\n barcode_reads_count = total_ignored_reads\n\n start_pos = sorted_output.tell()\n barcode_gzippers[original_bc].close()\n if original_bc in bcs_with_data:\n barcode_buffers[original_bc].seek(0)\n shutil.copyfileobj(barcode_buffers[original_bc], sorted_output)\n barcode_buffers[original_bc].close()\n end_pos = sorted_output.tell()\n\n if end_pos > start_pos:\n sorted_output_index[new_bc_name] = (original_bc, start_pos, end_pos, end_pos-start_pos, barcode_reads_count)\n\n with open(self.sorted_gzipped_fastq_index_filename, 'w') as f:\n pickle.dump(sorted_output_index, f) \n\n def get_reads_for_barcode(self, barcode):\n if barcode not in self.sorted_index:\n raise StopIteration\n\n original_barcode, start_byte_offset, end_byte_offset, byte_length, barcode_reads = self.sorted_index[barcode]\n\n with open(self.sorted_gzipped_fastq_filename, 'rb') as sorted_output:\n sorted_output.seek(start_byte_offset)\n byte_buffer = BytesIO(sorted_output.read(byte_length))\n ungzipper = gzip.GzipFile(fileobj=byte_buffer, mode='rb')\n while True:\n yield next(ungzipper)\n\n @contextmanager\n def trimmomatic_and_low_complexity_filter_process(self):\n \"\"\"\n We start 3 processes that are connected with Unix pipes.\n\n Process 1 - Trimmomatic. Doesn't support stdin/stdout, so we instead use named pipes (FIFOs). It reads from FIFO1, and writes to FIFO2. \n Process 2 - In line complexity filter, a python script. It reads from FIFO2 (Trimmomatic output) and writes to the ouput file. \n Process 3 - Indexer that counts the number of reads for every barcode. This reads from stdin, writes the reads to stdout and writes the index as a pickle to stderr.\n\n When these are done, we start another process to count the results on the FastQ file.\n \"\"\"\n filtered_dir = os.path.dirname(self.filtered_fastq_filename) #We will use the same directory for creating temporary FIFOs, assuming we have write access.\n \n self.filtering_statistics_counter = defaultdict(int)\n with FIFO(dir=filtered_dir) as fifo2, open(self.filtered_fastq_filename, 'w') as filtered_fastq_file, open(self.filtered_fastq_filename+'.counts.pickle', 'w') as filtered_index_file:\n \n low_complexity_filter_cmd = [self.project.paths.python, self.project.paths.trim_polyA_and_filter_low_complexity_reads_py,\n '-input', fifo2.filename, \n '--min-post-trim-length', self.project.parameters['trimmomatic_arguments']['MINLEN'],\n '--max-low-complexity-fraction', str(self.project.parameters['low_complexity_filter_arguments']['max_low_complexity_fraction']),\n ]\n counter_cmd = [self.project.paths.python, self.project.paths.count_barcode_distribution_py]\n\n p2 = subprocess.Popen(low_complexity_filter_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p3 = subprocess.Popen(counter_cmd, stdin=p2.stdout, stdout=filtered_fastq_file, stderr=filtered_index_file)\n\n with FIFO(dir=filtered_dir) as fifo1:\n\n trimmomatic_cmd = [self.project.paths.java, '-Xmx500m', '-jar', self.project.paths.trimmomatic_jar,\n 'SE', '-threads', \"1\", '-phred33', fifo1.filename, fifo2.filename]\n for arg in self.project.parameters['trimmomatic_arguments']['argument_order']:\n val = self.project.parameters['trimmomatic_arguments'][arg]\n trimmomatic_cmd.append('%s:%s' % (arg, val))\n\n p1 = subprocess.Popen(trimmomatic_cmd, stderr=subprocess.PIPE)\n\n fifo1_filehandle = open(fifo1.filename, 'w')\n yield fifo1_filehandle\n fifo1_filehandle.close()\n trimmomatic_stderr = p1.stderr.read().splitlines()\n if trimmomatic_stderr[2] != 'TrimmomaticSE: Completed successfully':\n raise Exception('Trimmomatic did not complete succesfully on %s' % filtered_filename)\n trimmomatic_metrics = trimmomatic_stderr[1].split() \n # ['Input', 'Reads:', #READS, 'Surviving:', #SURVIVING, (%SURVIVING), 'Dropped:', #DROPPED, (%DROPPED)]\n trimmomatic_metrics = {'input' : trimmomatic_metrics[2], 'output': trimmomatic_metrics[4], 'dropped': trimmomatic_metrics[7]}\n p1.wait()\n\n complexity_filter_metrics = pickle.load(p2.stderr)\n p2.wait()\n p3.wait()\n\n\n filtering_metrics = {\n 'read_structure' : dict(self.filtering_statistics_counter),\n 'trimmomatic' : trimmomatic_metrics,\n 'complexity_filter': complexity_filter_metrics,\n }\n with open(self.filtering_metrics_filename, 'w') as f:\n yaml.dump(dict(filtering_metrics), f, default_flow_style=False)\n\n\nclass V1V2Filtering(LibrarySequencingPart):\n\n def __init__(self, bioread_filename=None, metaread_filename=None, *args, **kwargs):\n\n self.bioread_filename = bioread_filename\n self.metaread_filename = metaread_filename\n LibrarySequencingPart.__init__(self, *args, **kwargs)\n\n\n def filter_and_count_reads(self):\n \"\"\"\n Input the two raw FastQ files\n Output: \n - A single fastQ file that uses the read name to store the barcoding information\n - A pickle of the number of reads originating from each barcode \n \"\"\"\n # Relevant paths\n r1_filename, r2_filename = self.metaread_filename, self.bioread_filename\n\n #Get barcode neighborhoods\n bc1s = self.project.gel_barcode1_revcomp_list_neighborhood\n bc2s = self.project.gel_barcode2_revcomp_list_neighborhood \n\n\n # This starts a Trimmomatic process, a low complexity filter process, and will \n # upon closing, start the barcode distribution counting process.\n last_ping = time.time()\n ping_every_n_reads = 1000000\n ping_header = \"{0:>12}{1:>16}{2:>12}{3:>10}{4:>10}{5:>10}{6:>10}{7:>10}{8:>10}{9:>10}\"\n ping_header = ping_header.format(\"Total Reads\", \"\", \"Valid Reads\", \"W1 in R2\", \"Empty\", \"No W1\", \"No polyT\", \"No BC1\", \"No BC2\", \"No UMI\")\n ping_template = \"{total:12d} {rate:5.1f} sec/M {Valid:12.1%}{W1_in_R2:10.1%}{empty_read:10.1%}{No_W1:10.1%}{No_polyT:10.1%}{BC1:10.1%}{BC2:10.1%}{Umi_error:10.1%}\"\n def print_ping_to_log(last_ping):\n sec_per_mil = (time.time()-last_ping)/(ping_every_n_reads/10**6) if last_ping else 0.0\n total = self.filtering_statistics_counter['Total']\n if total > 0:\n ping_format_data = {k: float(self.filtering_statistics_counter[k])/total for k in ['Valid', 'W1_in_R2', 'empty_read', 'No_W1', 'No_polyT', 'BC1', 'BC2', 'Umi_error']}\n print_to_stderr(ping_template.format(total=total, rate=sec_per_mil, **ping_format_data))\n\n\n with self.trimmomatic_and_low_complexity_filter_process() as trim_process:\n #Iterate over the weaved reads\n for r_name, r1_seq, r1_qual, r2_seq, r2_qual in self._weave_fastqs(r1_filename, r2_filename):\n \n # Check if they should be kept\n keep, result = self._process_reads(r1_seq, r2_seq, valid_bc1s=bc1s, valid_bc2s=bc2s)\n\n # Write the the reads worth keeping\n if keep:\n bc, umi = result\n trim_process.write(to_fastq_lines(bc, umi, r2_seq, r2_qual, r_name))\n self.filtering_statistics_counter['Valid'] += 1\n else:\n self.filtering_statistics_counter[result] += 1\n\n # Track speed per M reads\n self.filtering_statistics_counter['Total'] += 1\n if self.filtering_statistics_counter['Total']%(10*ping_every_n_reads) == 1:\n print_to_stderr(ping_header)\n\n if self.filtering_statistics_counter['Total']%ping_every_n_reads == 0:\n print_ping_to_log(last_ping)\n last_ping = time.time()\n\n print_ping_to_log(False)\n\n print_to_stderr(self.filtering_statistics_counter)\n\n def _weave_fastqs(self, r1_fastq, r2_fastq):\n \"\"\"\n Merge 2 FastQ files by returning paired reads for each.\n Returns only R1_seq, R2_seq and R2_qual.\n \"\"\"\n\n is_gz_compressed = False\n is_bz_compressed = False\n if r1_fastq.split('.')[-1] == 'gz' and r2_fastq.split('.')[-1] == 'gz':\n is_gz_compressed = True\n \n #Added bz2 support VS\n if r1_fastq.split('.')[-1] == 'bz2' and r2_fastq.split('.')[-1] == 'bz2':\n is_bz_compressed = True\n\n # Decompress Gzips using subprocesses because python gzip is incredibly slow.\n if is_gz_compressed: \n r1_gunzip = subprocess.Popen(\"gzip --stdout -d %s\" % (r1_fastq), shell=True, stdout=subprocess.PIPE)\n r1_stream = r1_gunzip.stdout\n r2_gunzip = subprocess.Popen(\"gzip --stdout -d %s\" % (r2_fastq), shell=True, stdout=subprocess.PIPE)\n r2_stream = r2_gunzip.stdout\n elif is_bz_compressed:\n r1_bunzip = subprocess.Popen(\"bzcat %s\" % (r1_fastq), shell=True, stdout=subprocess.PIPE)\n r1_stream = r1_bunzip.stdout\n r2_bunzip = subprocess.Popen(\"bzcat %s\" % (r2_fastq), shell=True, stdout=subprocess.PIPE)\n r2_stream = r2_bunzip.stdout\n else:\n r1_stream = open(r1_fastq, 'r')\n r2_stream = open(r2_fastq, 'r')\n\n while True:\n #Read 4 lines from each FastQ\n name = next(r1_stream).rstrip()[1:].split()[0] #Read name\n r1_seq = next(r1_stream).rstrip() #Read seq\n next(r1_stream) #+ line\n r1_qual = next(r1_stream).rstrip() #Read qual\n \n next(r2_stream) #Read name\n r2_seq = next(r2_stream).rstrip() #Read seq\n next(r2_stream) #+ line\n r2_qual = next(r2_stream).rstrip() #Read qual\n \n # changed to allow for empty reads (caused by adapter trimming)\n if name:\n yield name, r1_seq, r1_qual, r2_seq, r2_qual\n else:\n # if not r1_seq or not r2_seq:\n break\n\n r1_stream.close()\n r2_stream.close()\n\n def _process_reads(self, name, read, valid_bc1s={}, valid_bc2s={}):\n \"\"\"\n Returns either:\n True, (barcode, umi)\n (if read passes filter)\n False, name of filter that failed\n (for stats collection)\n \n R1 anatomy: BBBBBBBB[BBB]WWWWWWWWWWWWWWWWWWWWWWCCCCCCCCUUUUUUTTTTTTTTTT______________\n B = Barcode1, can be 8, 9, 10 or 11 bases long.\n W = 'W1' sequence, specified below\n C = Barcode2, always 8 bases\n U = UMI, always 6 bases\n T = Beginning of polyT tail.\n _ = Either sequencing survives across the polyT tail, or signal starts dropping off\n (and start being anything, likely with poor quality)\n \"\"\"\n\n minimal_polyT_len_on_R1 = 7\n hamming_threshold_for_W1_matching = 3\n\n w1 = \"GAGTGATTGCTTGTGACGCCTT\"\n rev_w1 = \"AAGGCGTCACAAGCAATCACTC\" #Hard-code so we don't recompute on every one of millions of calls\n # If R2 contains rev_W1, this is almost certainly empty library\n if rev_w1 in read:\n return False, 'W1_in_R2'\n\n # # With reads sufficiently long, we will often see a PolyA sequence in R2. \n # if polyA in read:\n # return False, 'PolyA_in_R2'\n\n # Check for polyT signal at 3' end.\n # 44 is the length of BC1+W1+BC2+UMI, given the longest PolyT\n #BC1: 8-11 bases\n #W1 : 22 bases\n #BC2: 8 bases\n #UMI: 6 bases\n\n # check for empty reads (due to adapter trimming)\n if not read:\n return False, 'empty_read'\n \n #Check for W1 adapter\n #Allow for up to hamming_threshold errors\n if w1 in name:\n w1_pos = name.find(w1)\n if not 7 < w1_pos < 12:\n return False, 'No_W1'\n else:\n #Try to find W1 adapter at start positions 8-11\n #by checking hamming distance to W1.\n for w1_pos in range(8, 12):\n if string_hamming_distance(w1, name[w1_pos:w1_pos+22]) <= hamming_threshold_for_W1_matching:\n break\n else:\n return False, 'No_W1'\n \n bc2_pos=w1_pos+22\n umi_pos=bc2_pos+8\n polyTpos=umi_pos+6\n expected_poly_t = name[polyTpos:polyTpos+minimal_polyT_len_on_R1]\n if string_hamming_distance(expected_poly_t, 'T'*minimal_polyT_len_on_R1) > 3:\n return False, 'No_polyT'\n \n bc1 = str(name[:w1_pos])\n bc2 = str(name[bc2_pos:umi_pos])\n umi = str(name[umi_pos:umi_pos+6])\n \n #Validate barcode (and try to correct when there is no ambiguity)\n if valid_bc1s and valid_bc2s:\n # Check if BC1 and BC2 can be mapped to expected barcodes\n if bc1 in valid_bc1s:\n # BC1 might be a neighboring BC, rather than a valid BC itself. \n bc1 = valid_bc1s[bc1]\n else:\n return False, 'BC1'\n if bc2 in valid_bc2s:\n bc2 = valid_bc2s[bc2]\n else:\n return False, 'BC2'\n if 'N' in umi:\n return False, 'UMI_error'\n bc = '%s-%s'%(bc1, bc2)\n return True, (bc, umi)\n\nclass V3Demultiplexer():\n\n def __init__(self, library_indices, project=None, part_filename=\"\", input_filename=\"\", run_name=\"\", part_name=\"\"):\n\n self.input_filename = input_filename\n self.project = project\n self.run_name = run_name\n self.part_name = part_name\n self.libraries = {}\n for lib in library_indices:\n lib_index = lib['library_index']\n lib_name = lib['library_name']\n library_part_filename = part_filename.format(library_name=lib_name, library_index=lib_index)\n self.libraries[lib_index] = LibrarySequencingPart(filtered_fastq_filename=library_part_filename, project=project, run_name=run_name, library_name=lib_name, part_name=part_name)\n\n def _weave_fastqs(self, fastqs):\n last_extension = [fn.split('.')[-1] for fn in fastqs]\n if all(ext == 'gz' for ext in last_extension):\n processes = [subprocess.Popen(\"gzip --stdout -d %s\" % (fn), shell=True, stdout=subprocess.PIPE) for fn in fastqs]\n streams = [r.stdout for r in processes]\n elif all(ext == 'bz2' for ext in last_extension):\n processes = [subprocess.Popen(\"bzcat %s\" % (fn), shell=True, stdout=subprocess.PIPE) for fn in fastqs]\n streams = [r.stdout for r in processes]\n elif all(ext == 'fastq' for ext in last_extension):\n streams = [open(fn, 'r') for fn in fastqs]\n else:\n raise(\"ERROR: Different files are compressed differently. Check input.\")\n\n while True:\n names = [next(s)[:-1].split()[0] for s in streams]\n seqs = [next(s)[:-1] for s in streams]\n blanks = [next(s)[:-1] for s in streams]\n quals = [next(s)[:-1] for s in streams]\n assert all(name==names[0] for name in names)\n yield names[0], seqs, quals\n\n for s in streams:\n s.close()\n\n\n def _process_reads(self, name, seqs, quals, valid_bc1s={}, valid_bc2s={}, valid_libs={}):\n \"\"\"\n Returns either:\n True, (barcode, umi)\n (if read passes filter)\n False, name of filter that failed\n (for stats collection)\n \"\"\"\n\n r1, r2, r3, r4 = seqs\n\n if r3 in valid_libs:\n lib_index = valid_libs[r3]\n else:\n return False, r3, 'Invalid_library_index'\n\n if r2 in valid_bc1s:\n bc1 = valid_bc1s[r2]\n else:\n return False, lib_index, 'Invalid_BC1'\n\n orig_bc2 = r4[:8]\n umi = r4[8:8+6]\n polyA = r4[8+6:]\n\n if orig_bc2 in valid_bc2s:\n bc2 = valid_bc2s[orig_bc2]\n else:\n return False, lib_index, 'Invalid_BC2'\n\n if 'N' in umi:\n return False, lib_index, 'UMI_contains_N'\n\n final_bc = '%s-%s' % (bc1, bc2)\n return True, lib_index, (final_bc, umi)\n\n\n def filter_and_count_reads(self):\n # Prepare error corrected index sets\n self.sequence_to_index_mapping = {}\n libs = self.libraries.keys()\n self.sequence_to_index_mapping = dict(zip(libs, libs))\n index_neighborhoods = [set(seq_neighborhood(lib, 1)) for lib in libs]\n for lib, clibs in zip(libs, index_neighborhoods):\n # Quick check that error-correction maps to a single index\n for clib in clibs:\n if sum(clib in hood for hood in index_neighborhoods)==1:\n self.sequence_to_index_mapping[clib] = lib\n\n # Prepare error corrected barcode sets\n error_corrected_barcodes = self.project.gel_barcode2_list_neighborhood\n error_corrected_rev_compl_barcodes = self.project.gel_barcode2_revcomp_list_neighborhood\n\n # Open up our context managers\n manager_order = [] #It's imperative to exit managers the opposite order than we open them!\n trim_processes = {}\n trim_processes_managers = {}\n\n for lib in self.libraries.keys():\n manager_order.append(lib)\n trim_processes_managers[lib] = self.libraries[lib].trimmomatic_and_low_complexity_filter_process()\n trim_processes[lib] = trim_processes_managers[lib].__enter__()\n\n overall_filtering_statistics = defaultdict(int)\n\n # Paths for the 4 expected FastQs\n input_fastqs = []\n for r in ['R1', 'R2', 'R3', 'R4']:\n input_fastqs.append(self.input_filename.format(read=r))\n\n last_ping = time.time()\n ping_every_n_reads = 1000000\n ping_header = \"{0:>12}{1:>16}{2:>12}{3:>10}{4:>10}{5:>10}{6:>10} |\" + ''.join(\"{%d:>12.10}\"%i for i in range(7,7+len(manager_order)))\n ping_header = ping_header.format(\"Total Reads\", \"\", \"Valid Reads\", \"No index\", \"No BC1\", \"No BC2\", \"No UMI\", *[self.libraries[k].library_name for k in manager_order])\n ping_template = \"{total:12d} {rate:5.1f} sec/M {Valid:12.1%}{Invalid_library_index:10.1%}{Invalid_BC1:10.1%}{Invalid_BC2:10.1%}{UMI_contains_N:10.1%} |{\"+\":>12.1%}{\".join(manager_order)+\":>12.1%}\"\n \n def print_ping_to_log(last_ping):\n sec_per_mil = (time.time() - last_ping)/(float(ping_every_n_reads)/10**6) if last_ping else 0\n total = overall_filtering_statistics['Total']\n ping_format_data = {k: float(overall_filtering_statistics[k])/total for k in ['Valid', 'Invalid_library_index', 'Invalid_BC1', 'Invalid_BC2', 'UMI_contains_N']}\n if overall_filtering_statistics['Valid'] > 0:\n ping_format_data.update({k: float(self.libraries[k].filtering_statistics_counter['Valid'])/overall_filtering_statistics['Valid'] for k in manager_order})\n print_to_stderr(ping_template.format(total=total, rate=sec_per_mil, **ping_format_data))\n\n common__ = defaultdict(int)\n print_to_stderr('Filtering %s, file %s' % (self.run_name, self.input_filename))\n for r_name, seqs, quals in self._weave_fastqs(input_fastqs):\n\n # Python 3 compatibility in mind!\n seqs = [s.decode('utf-8') for s in seqs]\n\n keep, lib_index, result = self._process_reads(r_name, seqs, quals,\n error_corrected_barcodes, error_corrected_rev_compl_barcodes, \n self.sequence_to_index_mapping)\n common__[seqs[1]] += 1\n if keep:\n bc, umi = result\n bio_read = seqs[0]\n bio_qual = quals[0]\n trim_processes[lib_index].write(to_fastq_lines(bc, umi, bio_read, bio_qual, r_name[1:]))\n self.libraries[lib_index].filtering_statistics_counter['Valid'] += 1\n self.libraries[lib_index].filtering_statistics_counter['Total'] += 1\n overall_filtering_statistics['Valid'] += 1\n\n else:\n if result != 'Invalid_library_index':\n self.libraries[lib_index].filtering_statistics_counter[result] += 1\n self.libraries[lib_index].filtering_statistics_counter['Total'] += 1\n overall_filtering_statistics[result] += 1\n\n # Track speed per M reads\n overall_filtering_statistics['Total'] += 1\n\n if overall_filtering_statistics['Total']%(ping_every_n_reads*10)==1:\n print_to_stderr(ping_header)\n \n if overall_filtering_statistics['Total']%ping_every_n_reads == 0:\n print_ping_to_log(last_ping)\n last_ping = time.time()\n \n print_ping_to_log(False)\n # Close up the context managers\n for lib in manager_order[::-1]:\n trim_processes_managers[lib].__exit__(None, None, None)\n\n def contains_library_in_query(self, query_libraries):\n for lib in self.libraries.values():\n if lib.contains_library_in_query(query_libraries):\n return True\n return False\n\n\n\nif __name__==\"__main__\":\n\n import sys, argparse\n parser = argparse.ArgumentParser()\n\n parser.add_argument('project', type=argparse.FileType('r'), help='Project YAML File.')\n parser.add_argument('-l', '--libraries', type=str, help='[all] Library name(s) to work on. If blank, will iterate over all libraries in project.', nargs='?', default='')\n parser.add_argument('-r', '--runs', type=str, help='[all] Run name(s) to work on. If blank, will iterate over all runs in project.', nargs='?', default='')\n parser.add_argument('command', type=str, choices=['info', 'filter', 'identify_abundant_barcodes', 'sort', 'quantify', 'aggregate', 'build_index', 'get_reads'])\n parser.add_argument('--total-workers', type=int, help='[all] Total workers that are working together. This takes precedence over barcodes-per-worker.', default=1)\n parser.add_argument('--worker-index', type=int, help='[all] Index of current worker (the first worker should have index 0).', default=0)\n parser.add_argument('--min-reads', type=int, help='[quantify] Minimun number of reads for barcode to be processed', nargs='?', default=750)\n parser.add_argument('--min-counts', type=int, help='[aggregate] Minimun number of UMIFM counts for barcode to be aggregated', nargs='?', default=0)\n parser.add_argument('--analysis-prefix', type=str, help='[quantify/aggregate/convert_bam/merge_bam] Prefix for analysis files.', nargs='?', default='')\n parser.add_argument('--no-bam', help='[quantify] Do not output alignments to bam file.', action='store_true')\n parser.add_argument('--genome-fasta-gz', help='[build_index] Path to gzipped soft-masked genomic FASTA file.')\n parser.add_argument('--ensembl-gtf-gz', help='[build_index] Path to gzipped ENSEMBL GTF file. ')\n parser.add_argument('--override-yaml', help=\"[all] Dictionnary to update project YAML with.. [You don't need this.]\", nargs='?', default='')\n\n args = parser.parse_args()\n project = IndropsProject(args.project)\n if args.override_yaml:\n override = eval(args.override_yaml)\n if 'paths' in override:\n project.yaml['paths'].update(override['paths'])\n if 'parameters' in override:\n for k,v in override['parameters'].items():\n project.yaml['parameters'][k].update(v)\n if hasattr(project, '_paths'):\n del project._paths\n if hasattr(project, '_parameters'):\n del project._parameters\n\n target_libraries = []\n if args.libraries:\n for lib in args.libraries.split(','):\n assert lib in project.libraries\n if lib not in target_libraries:\n target_libraries.append(lib)\n else:\n target_libraries = project.libraries.keys()\n lib_query = set(target_libraries)\n\n target_runs = []\n if args.runs:\n for run in args.runs.split(','):\n assert run in project.runs\n target_runs.append(run)\n else:\n target_runs = project.runs.keys()\n\n target_library_parts = []\n for lib in target_libraries:\n for pi, part in enumerate(project.libraries[lib].parts):\n if part.run_name in target_runs:\n target_library_parts.append((lib, pi))\n\n if args.command == 'info':\n print_to_stderr('Project Name: ' + project.name)\n target_run_parts = []\n for run in target_runs:\n target_run_parts += [part for part in project.runs[run] if part.contains_library_in_query(lib_query)]\n print_to_stderr('Total library parts in search query: ' + str(len(target_run_parts)))\n\n elif args.command == 'filter':\n target_run_parts = []\n for run in target_runs:\n target_run_parts += [part for part in project.runs[run] if part.contains_library_in_query(lib_query)]\n\n for part in worker_filter(target_run_parts, args.worker_index, args.total_workers):\n print_to_stderr('Filtering run \"%s\", part \"%s\"' % (part.run_name, part.part_name))\n part.filter_and_count_reads()\n\n elif args.command == 'identify_abundant_barcodes':\n for library in worker_filter(target_libraries, args.worker_index, args.total_workers):\n project.libraries[library].identify_abundant_barcodes()\n\n elif args.command == 'sort':\n for library, part_index in worker_filter(target_library_parts, args.worker_index, args.total_workers):\n print_to_stderr('Sorting %s, part \"%s\"' % (library, project.libraries[library].parts[part_index].filtered_fastq_filename))\n project.libraries[library].sort_reads_by_barcode(index=part_index)\n\n elif args.command == 'quantify':\n for library in target_libraries:\n project.libraries[library].quantify_expression(worker_index=args.worker_index, total_workers=args.total_workers,\n min_reads=args.min_reads, min_counts=args.min_counts,\n analysis_prefix=args.analysis_prefix,\n no_bam=args.no_bam, run_filter=target_runs)\n\n for part in project.libraries[library].parts:\n if hasattr(part, '_sorted_index'):\n del part._sorted_index\n\n elif args.command == 'aggregate':\n for library in worker_filter(target_libraries, args.worker_index, args.total_workers):\n project.libraries[library].aggregate_counts(analysis_prefix=args.analysis_prefix)\n\n elif args.command == 'build_index':\n project.build_transcriptome(args.genome_fasta_gz, args.ensembl_gtf_gz)\n\n elif args.command == 'get_reads':\n for library in target_libraries:\n sorted_barcode_names = project.libraries[library].sorted_barcode_names(min_reads=args.min_reads)\n for bc in sorted_barcode_names:\n for line in project.libraries[library].get_reads_for_barcode(bc, run_filter=target_runs):\n sys.stdout.write(line)\n\n for part in project.libraries[library].parts:\n if hasattr(part, '_sorted_index'):\n del part._sorted_index\n" }, { "alpha_fraction": 0.6932941675186157, "alphanum_fraction": 0.7532447576522827, "avg_line_length": 55.78070068359375, "blob_id": "1f2309235e1802cb0cb769886dd5403c8e227cf1", "content_id": "ec8eb0664e22f4557b4faeb39826adb334b6ea10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 6472, "license_type": "no_license", "max_line_length": 764, "num_lines": 114, "path": "/Leigh_et_al_2018_Supplementary_R_code/Wound_healing.R", "repo_name": "brianjohnhaas/indrops", "src_encoding": "UTF-8", "text": "#Wound healing samples\n#This section analyses three samples collected during wound healing (3 days post amputation) \n\n#First, load the required packages. \nlibrary(Seurat)\nlibrary(dplyr)\n\n#load in data\ndata = read.table('wound_healing_and_medium_bud.repGene', header=T, row.names=1, sep='\\t')\n#pull out samples N4, N5, and N6 which are the wound healing stage limb samples\nN4_N5_N6 = data[,grep('^N[456]', colnames(data))]\n\n#Remove data matrix with extra samples\nrm(data)\n#Create Seurat object and make sparse\nseurat_3dpa = CreateSeuratObject(N4_N5_N6, project = '3dpa', min.cells = 8, min.genes = 200)\nseurat_3dpa = MakeSparse(seurat_3dpa)\n\n#While we did some filtering above, we need to perform further quality control to ensure that the cells we are working with aren't apoptotic #or have a dearth of genes. First, we need to identify the mitochondrial genes present in this matrix. The axolotl mitochondrial genome can #be found here: https://www.ncbi.nlm.nih.gov/nuccore/AJ584639. Remember that the genes are written as protein names when greping for #mitochondrial genes. \n\n#find mitochonrial genes in matrix. The protein name should be used and changed for each gene within the mitochondrial genome.\ngrep(pattern = \"*CYB_*\", x = rownames(x = seurat_3dpa.intact@data), value = TRUE)\n#list of all mitochondrial genes in this wound healing matrix\nmito.genes.3dpa <- c(\"c786641_g1_i1^sp|Q9B205|CYB_CAICR\", \"c1084180_g1_i1^sp|Q8LWP6|CYB_RANSI\", \"c1043008_g2_i1^sp|Q9B205|CYB_CAICR\", \"c1060846_g1_i1^sp|Q8WA47|CYB_MUSMA\", \"c1084180_g3_i1^sp|Q8LWP6|CYB_RANSI\", \"c1057599_g1_i2^sp|P00018|CYC_DRONO^Cytochrome_CBB3\", \"c1088733_g1_i1^sp|Q9ZZM6|COX1_SALSA\", \"c1053715_g3_i1^sp|O03539|COX1_NOTPE\", \"c220469_g1_i1^sp|P00397|COX1_MOUSE\", \"c1451851_g1_i1^sp|Q9ZXY2|COX1_PAPHA\", \"c289614_g1_i1^sp|P05503|COX1_RAT\", \"c959712_g1_i1^sp|P00416|COX3_MOUSE\", \"c1049442_g1_i1^sp|Q96133|COX3_CARAU\", \"c1083417_g1_i2^sp|P00419|COX3_XENLA\", \"c934922_g1_i1^sp|Q9ZXX8|COX3_PAPHA\", \"c1027109_g1_i1^sp|Q35920|ATP6_SALSA\", \"c1083535_g6_i1^sp|Q4JQI7|NU1M_TETNG\", \"c1025234_g1_i1^sp|O63796|NU2M_ANACA\", \"c1068681_g4_i4^sp|Q9ZZM3|NU5M_SALSA\")\n\n#calculate the percentage mitochondrial RNA for each cell\npercent.mito.3dpa <- Matrix::colSums([email protected][mito.genes.3dpa, ])/Matrix::colSums([email protected])\n#add the percent mitochondrial content of each cell to the Seurat object\nseurat_3dpa <- AddMetaData(object = seurat_3dpa, metadata = percent.mito.3dpa, col.name = \"percent.mito\")\n\n#Now perform quality control on matrix by filtering out cells with high percent mitochondrial RNA and low and high number of genes. We filter #out cells that by visualize inspection appear to have relatively high mitochondrial RNA content or high or low number of genes. These #numbers can be modified to be more or less inclusive. \n\n#visualize number of genes, unique molecular identifiers (UMI), and percent mitochondrial RNA\nVlnPlot(object = seurat_3dpa, features.plot = c(\"nGene\", \"nUMI\", \"percent.mito\"), nCol = 3)\n\n#filter out cells\nseurat_3dpa <- FilterCells(object = seurat_3dpa, subset.names = c(\"nGene\", \"percent.mito\"), low.thresholds = c(850, -Inf), high.thresholds = c(5000, 0.15))\n\n#normalize data\nseurat_3dpa <- NormalizeData(object = seurat_3dpa, normalization.method = \"LogNormalize\", scale.factor = 10000)\n\n#find variable genes\nseurat_3dpa <- FindVariableGenes(object = seurat_3dpa, mean.function = ExpMean, dispersion.function = LogVMR, x.low.cutoff = 0.0125, x.high.cutoff = 3, y.cutoff = 0.5)\n\n#scale data and regress out nUMI and percent.mito\nseurat_3dpa <- ScaleData(object = seurat_3dpa, vars.to.regress = c(\"nUMI\", \"percent.mito\"))\n\n\n#Next, we perform linear dimensional reduction and visualize the results in a few different ways. \nseurat_3dpa <- RunPCA(object = seurat_3dpa, pc.genes = [email protected], do.print = TRUE, pcs.print = 1:5, genes.print = 5)\n\n#visualize results\nVizPCA(object = seurat_3dpa, pcs.use = 1:2)\nPCAPlot(object = seurat_3dpa, dim.1 = 1, dim.2 = 2)\nseurat_3dpa <- ProjectPCA(object = seurat_3dpa, do.print = FALSE)\nPCHeatmap(object = seurat_3dpa, pc.use = 1, cells.use = 500, do.balanced = TRUE, label.columns = FALSE)\nPCHeatmap(object = seurat_3dpa, pc.use = 1:12, cells.use = 500, do.balanced = TRUE, label.columns = FALSE, use.full = FALSE)\nPCElbowPlot(object = seurat_3dpa)\n\n#plot standard deviations to chose PCs to use in downstream analysis, here we chose 19\nseurat_3dpa <- FindClusters(object = seurat_3dpa, reduction.type = \"pca\", dims.use = 1:19, resolution = 1.5, print.output = 0, save.SNN = TRUE)\n\n#Now we can identify cell populations within the homeostatic limb, vizualize the resulting populations using tSNE, and subsequently find markers that define these different populations \n\n#find clusters using first 18 PCs\nseurat_inDrops3.intact <- FindClusters(object = seurat_inDrops3.intact, reduction.type = \"pca\", dims.use = 1:18, resolution = 1.5, print.output = 0, save.SNN = TRUE)\n\n#run non-linear dimensional reduction\nseurat_3dpa <- RunTSNE(object = seurat_3dpa, dims.use = 1:19, do.fast = TRUE)\n\n# Build a phylogenetic tree to see how cells are related while simultaneously renaming and reordering cluster names according to their #position on the tree. This will be important to determine when deciding whether similar populations should be merged. \nseurat_3dpa <- BuildClusterTree(seurat_3dpa, do.reorder=TRUE, reorder.numeric=TRUE)\n\n#visualize tSNE \nset.seed(5)\nTSNEPlot(object = seurat_3dpa)\n#visulize tSNE based on sample to determine how similar the two samples are to one another\nTSNEPlot(object = seurat_3dpa, group.by = \"orig.ident\")\n\n#assess nodes\nnode.scores <- AssessNodes(seurat_3dpa)\nnode.scores[order(node.scores$oobe, decreasing = TRUE), ] -> node.scores\nnode.scores\n\n\n#merge first 5 nodes\n#select nodes to merge\nnodes.merge=node.scores[1:5,]\nnodes.to.merge <- sort(x = nodes.merge$node)\n\n#create a new Seurat object in which we will merge our selected nodes\nmerged <- seurat_3dpa\n#merge nodes\nfor (n in nodes.to.merge) {merged <- MergeNode(object = merged, node.use = n)}\n\n\n#re-visualize the tSNE after we have merged the non-distinct nodes\nset.seed(5)\nTSNEPlot(merged, do.label = TRUE)\n\n\n#determine differentially expressed genes for each population\n\n#find markers for each population\nall.markers <- FindAllMarkers(merged, only.pos = TRUE, min.pct = 0.25, thresh.use = 0.25)\n\n#write DE results to table for inspection\nwrite.table(all.markers, 'wound.healing.markers.txt', sep = '\\t')\n\n#save Rdata\nsave.image('wound_healing.Rdata')\n\n#end session\nq()" }, { "alpha_fraction": 0.613725483417511, "alphanum_fraction": 0.6245098114013672, "avg_line_length": 69.37931060791016, "blob_id": "5f644713b668b85e997542edbeba01cbf0e41169", "content_id": "51b7a3294b3ef265b5f8603c75e377bd7b0b9595", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2040, "license_type": "no_license", "max_line_length": 198, "num_lines": 29, "path": "/annotate_mouse_transcriptome.py", "repo_name": "brianjohnhaas/indrops", "src_encoding": "UTF-8", "text": "import re\nin_genes=\"Mus_musculus.GRCm38.84.with_tid.gtf\"\nout_genes=\"Mus_musculus.GRCm38.84.annotated.gtf\"\naccepted_gene_biotypes_for_NA_transcripts = set([\"IG_V_gene\",\"IG_J_gene\",\"protein_coding\",\"TR_J_gene\",\"TR_D_gene\",\"TR_V_gene\",\"IG_C_gene\",\"IG_D_gene\",\"TR_C_gene\"])\nwith open(in_genes, 'r') as in_f, open(out_genes, 'w') as out_f:\n for line in in_f:\n chr_name = line.rstrip().split('\\t')[0]\n # Check the transcript_support level\n # This should be faster than a regex\n # We need to support the case where see these two types of annotations:\n # transcript_support_level \"1\"\n # transcript_support_level \"1 (assigned to previous version X)\"\n # transcript_support_level \"2\" <- Clear example of a gene like this is NKX6.1\n # transcript_support_level \"2 (assigned to previous version X)\"\n line_valid_for_output = False\n if 'transcript_support_level \"1\"' in line or 'transcript_support_level \"1 ' in line or 'transcript_support_level \"2\"' in line or 'transcript_support_level \"2 ' in line:\n line_valid_for_output = True\n elif 'transcript_support_level \"NA' in line:\n # Transcript Support Level Not Analysed. Pseudogenes, single exon transcripts, HLA, T-cell receptor and Ig transcripts are not analysed and therefore not given any of the TSL categories.\n # Keep only a few ones annotated as \"IG_V_gene\",\"IG_J_gene\",\"protein_coding\",\"TR_J_gene\",\"TR_D_gene\",\"TR_V_gene\",\"IG_C_gene\",\"IG_D_gene\",\"TR_C_gene\"\n gene_biotype = re.search(r'gene_biotype \\\"(.*?)\\\";', line)\n if gene_biotype and gene_biotype.group(1) in accepted_gene_biotypes_for_NA_transcripts:\n line_valid_for_output = True\n if line_valid_for_output:\n gene_name = re.search(r'gene_name \\\"(.*?)\\\";', line)\n if gene_name:\n gene_name = gene_name.group(1)\n out_line = re.sub(r'(?<=transcript_id \")(.*?)(?=\";)', r'\\1|'+gene_name, line)\n out_f.write(out_line)" }, { "alpha_fraction": 0.613967776298523, "alphanum_fraction": 0.6208748817443848, "avg_line_length": 35.7042236328125, "blob_id": "2b19a93457a79bc373f7f3e221b75e47cd699a06", "content_id": "0e78abf749d922242404add7fc482adb90fcf47a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2606, "license_type": "no_license", "max_line_length": 213, "num_lines": 71, "path": "/trim_polyA_and_filter_low_complexity_reads.py", "repo_name": "brianjohnhaas/indrops", "src_encoding": "UTF-8", "text": "import re\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\nfrom indrops import from_fastq, to_fastq\n\ndef low_complexity_filter(args):\n total_reads = 0\n kept_reads = 0\n rejected_because_complexity_too_low = 0\n rejected_because_too_short = 0\n\n keep_polyA_length = 4\n single_base_runs_regex = '|'.join(['%s{%d,}'%(b, keep_polyA_length+1) for b in 'ATCG'])\n\n for name, seq, qual in from_fastq(args.input):\n\n total_reads += 1\n keep_read = True\n\n #Identify length of polyA tail.\n polyA_length = 0\n for s in seq[::-1]:\n if s!='A':\n break\n polyA_length += 1\n\n read_length = len(seq)\n trim_at_position = read_length - min(polyA_length + keep_polyA_length, 0)\n\n if trim_at_position < args.min_post_trim_length:\n keep_read = False\n rejected_because_too_short += 1\n else:\n new_seq = seq[:trim_at_position]\n new_qual = qual[:trim_at_position]\n\n\n low_complexity_bases = sum([m.end()-m.start() for m in re.finditer(single_base_runs_regex, new_seq)])\n low_complexity_fraction = float(low_complexity_bases)/len(new_seq)\n\n if low_complexity_fraction > args.max_low_complexity_fraction:\n keep_read = False\n rejected_because_complexity_too_low += 1\n\n if keep_read:\n output_lines = to_fastq(name, new_seq, new_qual)\n args.output.write(output_lines)\n kept_reads += 1\n\n elif args.rejected:\n args.rejected.write(to_fastq(name, seq, qual))\n\n if args.metrics:\n pickle.dump({'input': total_reads, 'output': kept_reads, 'rejected_because_complexity_too_low': rejected_because_complexity_too_low, 'rejected_because_too_short': rejected_because_too_short}, args.metrics)\n\n sys.stderr.write('Kept %d out of %d.\\n' % (kept_reads, total_reads))\n\nif __name__==\"__main__\":\n import sys, argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-input', type=argparse.FileType('r'), nargs='?', default=sys.stdin)\n parser.add_argument('-output', type=argparse.FileType('w'), nargs='?', default=sys.stdout)\n parser.add_argument('-rejected', type=argparse.FileType('w'), nargs='?', default=False)\n parser.add_argument('-metrics', type=argparse.FileType('w'), nargs='?', default=sys.stderr)\n parser.add_argument('--max-low-complexity-fraction', type=float, nargs='?', default=1.0)\n parser.add_argument('--min-post-trim-length', type=int, nargs='?', default=20)\n args = parser.parse_args()\n low_complexity_filter(args)\n" }, { "alpha_fraction": 0.624761700630188, "alphanum_fraction": 0.648540198802948, "avg_line_length": 32.89115524291992, "blob_id": "6a87ca0bcef6eff54ca875a31c726e625b6d7975", "content_id": "f261851084b2700c9b350590d76bf84173c3ce19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 9967, "license_type": "no_license", "max_line_length": 168, "num_lines": 294, "path": "/Leigh_et_al_2018_Supplementary_R_code/epidermis_Monocle_analysis.R", "repo_name": "brianjohnhaas/indrops", "src_encoding": "UTF-8", "text": "##Pseudotime of epidermis with Monocle\n\n#homeostatic epidermis\nlibrary(monocle)\nlibrary(Seurat)\n\nload(\"intact.Rdata\")\n\n#add IDs to metadata in preparation of importCDS\[email protected]$stashed.id <- merged@ident\n\npdf('intact_timecourse_tsne.pdf')\ntsne.plot(merged, do.label = T)\ndev.off()\n\nWE_filtered <- SubsetData(merged, ident.remove = c(1,2,4,5,6,8,11,12))\n\npdf('intact_WE_timecourse_tsne.pdf')\ntsne.plot(WE_filtered, do.label = T)\ndev.off()\n\nWE_filtered <- importCDS(WE_filtered)\n\nWE_filtered <- detectGenes(WE_filtered, min_expr = .1)\nexpressed_genes <- row.names(subset(fData(WE_filtered),\n num_cells_expressed >= 3))\n\nWE_filtered <- WE_filtered[expressed_genes,]\n\nhead(pData(WE_filtered))\n\nWE_filtered <- estimateSizeFactors(WE_filtered)\nWE_filtered <- detectGenes(WE_filtered, min_expr = .1)\n\n\nhead(fData(WE_filtered))\nhead(pData(WE_filtered))\n\npdf('intact_WE_variance.pdf')\nplot_pc_variance_explained(WE_filtered, return_all = FALSE)\ndev.off()\n\nWE_filtered <- reduceDimension(WE_filtered,\n max_components = 2,\n norm_method = 'log',\n num_dim = 8,\n cores = 4,\n residualModelFormulaStr = \"~num_genes_expressed\",\n reduction_method = 'tSNE',\n verbose = TRUE)\n\nWE_filtered <- clusterCells(WE_filtered, verbose = TRUE)\n\nWE_filtered$tree.ident <- as.character(WE_filtered$tree.ident)\n\npdf('intact_WE_clusters.pdf')\nplot_cell_clusters(WE_filtered)\ndev.off()\n\npdf('intact_WE_numGenes.pdf')\nplot_cell_clusters(WE_filtered, color_by = \"num_genes_expressed\")\ndev.off()\n\npdf('intact_WE_byseurat.pdf')\nplot_cell_clusters(WE_filtered, color_by = \"stashed.id\")\ndev.off()\n\n\npdf('intact_WE_rho_delta.pdf')\nplot_rho_delta(WE_filtered, rho_threshold = 2, delta_threshold = 10)\ndev.off()\n\nclustering_DEG_genes <- differentialGeneTest(WE_filtered,\n fullModelFormulaStr = '~stashed.id',\n cores = 4)\n\nmy_ordering_genes <- row.names(clustering_DEG_genes)[order(clustering_DEG_genes$qval)][1:200]\nWE_filtered <- setOrderingFilter(WE_filtered, ordering_genes = my_ordering_genes)\nWE_filtered <- reduceDimension(WE_filtered,\n method = 'DDRTree',\n fullModelFormulaStr = '~stashed.id', verbose = F, scaling = T, maxIter = 100, norm_method = 'log', max_components = 6, param.gamma = 20)\n\nWE_filtered <- orderCells(WE_filtered)\n\npdf('intact_WE_pseudotime.pdf')\nplot_cell_trajectory(WE_filtered, color_by = \"Pseudotime\", show_branch_points = FALSE)\ndev.off()\n\npdf('intact_WE_bySeuratCluster.pdf')\nplot_cell_trajectory(WE_filtered, color_by = \"stashed.id\", show_branch_points = FALSE)\ndev.off()\n\npdf('intact_WE_byCluster.pdf')\nplot_cell_trajectory(WE_filtered, color_by = \"Cluster\", show_branch_points = FALSE)\ndev.off()\n\npdf('intact_WE_bySeuratCluster_facet.pdf')\nplot_cell_trajectory(WE_filtered, color_by = \"stashed.id\") +\n facet_wrap(~stashed.id, nrow = 1)\ndev.off()\n\nsave.image(\"082018_intact_WE_postDEG.Rdata\")\n\n###\n\nload(\"082018_intact_WE_postDEG.Rdata\")\n\n# create vector of no's\nmy_vector <- rep('no', nrow(pData(WE_filtered)))\n\n# change status to yes if the cell was in cluster 10\nmy_vector[pData(WE_filtered)$stashed.id == 10] <- rep('yes', sum(pData(WE_filtered)$stashed.id == 10))\n\n# add vector to phenoData\npData(WE_filtered)$test <- my_vector\nhead(pData(WE_filtered))\n\nclustering_DEG_genes <- differentialGeneTest(WE_filtered,\n fullModelFormulaStr = '~test',\n cores = 4)\n\nmy_ordering_genes <- row.names(clustering_DEG_genes)[order(clustering_DEG_genes$qval)][1:150]\nWE_filtered <- setOrderingFilter(WE_filtered, ordering_genes = my_ordering_genes)\nWE_filtered <- reduceDimension(WE_filtered,\n method = 'DDRTree',\n fullModelFormulaStr = '~stashed.id', verbose = F, scaling = T, maxIter = 100, norm_method = 'log', max_components = 15, param.gamma = 20)\n\nWE_filtered <- orderCells(WE_filtered, reverse = TRUE)\n\npdf('intact_WE_pseudotime.pdf')\nplot_cell_trajectory(WE_filtered, color_by = \"Pseudotime\", show_branch_points = FALSE)\ndev.off()\n\npdf('intact_WE_bySeuratCluster.pdf')\nplot_cell_trajectory(WE_filtered, color_by = \"stashed.id\", show_branch_points = FALSE)\ndev.off()\n\npdf('intact_WE_byCluster.pdf')\nplot_cell_trajectory(WE_filtered, color_by = \"Cluster\", show_branch_points = FALSE)\ndev.off()\n\npdf('intact_WE_bySeuratCluster_facet.pdf')\nplot_cell_trajectory(WE_filtered, color_by = \"stashed.id\", show_branch_points = FALSE) +\n facet_wrap(~stashed.id, nrow = 1)\ndev.off()\n\n###\n\nsave.image(\"intact_WE_postDEG.Rdata\")\n\nrm(list=setdiff(ls(), \"WE_filtered\")) \n\nsave.image(\"intact_WE_slim.Rdata\")\n\npng('intact_WE_facet.png', width = (7/4)*3, height = 2, units = 'in', res = 300)\nplot_cell_trajectory(WE_filtered, color_by = \"stashed.id\", show_branch_points = FALSE) +\n facet_wrap(~stashed.id, nrow = 1)+\n theme(legend.position=\"none\")\ndev.off()\n\npng('intact_WE_pseudotime.png', width = 2.1, height = 1.77, units = 'in', res = 300)\nplot_cell_trajectory(WE_filtered, color_by = \"Pseudotime\", show_branch_points = FALSE)+\n theme(legend.position=\"none\")\ndev.off()\n\n###Regenerating epidermis\n\n\nlibrary(monocle)\nlibrary(Seurat)\n\n#load medium-bud blastema data (medium-bud and 23dpa are equivalent)\nload(\"medium_bud_blastema.RData\")\n\npdf('timecourse_tsne.pdf')\ntsne.plot(combined, do.label = T)\ndev.off()\n\ncombined <- importCDS(combined)\n\npData(combined)$\"day1\" <- pData(combined)$protocol == \"d1\" \n\ncombined <- detectGenes(combined, min_expr = .1)\nexpressed_genes <- row.names(subset(fData(combined),\n num_cells_expressed >= 3))\n\n#only want expressed genes and the day 1 subset of cells\ncombined_day1 <- combined[expressed_genes, pData(combined)$\"day1\"]\n\n#only want WE populations\npData(combined_day1)$\"WE\" <- pData(combined_day1)$tree.ident == \"1\" |\n pData(combined_day1)$tree.ident == \"2\" |\n pData(combined_day1)$tree.ident == \"3\" |\n pData(combined_day1)$tree.ident == \"4\"\n\ncombined_day1_WE <- combined_day1[expressed_genes, pData(combined_day1)$\"WE\"]\n\nhead(pData(combined_day1_WE))\n\ncombined_day1_WE <- estimateSizeFactors(combined_day1_WE)\ncombined_day1_WE <- detectGenes(combined_day1_WE, min_expr = .1)\n\n\nhead(fData(combined_day1_WE))\nhead(pData(combined_day1_WE))\n\npdf('23dpa_1_WE_variance.pdf')\nplot_pc_variance_explained(combined_day1_WE, return_all = FALSE)\ndev.off()\n\ncombined_day1_WE <- reduceDimension(combined_day1_WE,\n max_components = 2,\n norm_method = 'log',\n num_dim = 8,\n cores = 4,\n residualModelFormulaStr = \"~num_genes_expressed\",\n reduction_method = 'tSNE',\n verbose = TRUE)\n\ncombined_day1_WE <- clusterCells(combined_day1_WE, verbose = TRUE)\n\n#plot clusters\npdf('23dpa_day1_WE_clusters.pdf')\nplot_cell_clusters(combined_day1_WE)\ndev.off()\n\npdf('23dpa_day1_WE_numGenes.pdf')\nplot_cell_clusters(combined_day1_WE, color_by = \"num_genes_expressed\")\ndev.off()\n\npdf('23dpa_day1_WE_byseurat.pdf')\nplot_cell_clusters(combined_day1_WE, color_by = \"tree.ident\")\ndev.off()\n\n\npdf('WE_rho_delta.pdf')\nplot_rho_delta(combined_day1_WE, rho_threshold = 2, delta_threshold = 10)\ndev.off()\n\nclustering_DEG_genes <- differentialGeneTest(combined_day1_WE,\n fullModelFormulaStr = '~tree.ident',\n cores = 4)\n\nmy_ordering_genes <- row.names(clustering_DEG_genes)[order(clustering_DEG_genes$qval)][1:150]\ncombined_day1_WE <- setOrderingFilter(combined_day1_WE, ordering_genes = my_ordering_genes)\ncombined_day1_WE <- reduceDimension(combined_day1_WE,\n method = 'DDRTree',\n fullModelFormulaStr = '~tree.ident', verbose = F, scaling = T, norm_method = 'log', max_components =8, param.gamma = 20)\n\ncombined_day1_WE$tree.ident <- as.character(combined_day1_WE$tree.ident)\n\ncombined_day1_WE <- orderCells(combined_day1_WE)\n\n#plot pseudotime graphs\npdf('23dpa_day1_WE_pseudotime.pdf')\nplot_cell_trajectory(combined_day1_WE, color_by = \"Pseudotime\", show_branch_points = FALSE)\ndev.off()\n\npdf('23dpa_day1_WE_bySeuratCluster.pdf')\nplot_cell_trajectory(combined_day1_WE, color_by = \"tree.ident\", show_branch_points = FALSE)\ndev.off()\n\npdf('23dpa_day1_WE_byState.pdf')\nplot_cell_trajectory(combined_day1_WE, color_by = \"Cluster\", show_branch_points = FALSE)\ndev.off()\n\npdf('23dpa_day1_WE_bySeuratCluster_facet.pdf')\nplot_cell_trajectory(combined_day1_WE, color_by = \"tree.ident\", show_branch_points = FALSE) +\n facet_wrap(~tree.ident, nrow = 1)\ndev.off()\n\nsave.image(\"23dpa_day1_WE_postDEG.Rdata\")\n\nrm(list=setdiff(ls(), \"combined_day1_WE\")) \n\nsave.image(\"23dpa_day1_WE_slim.Rdata\")\n\npng('23dpa_day1_WE_facet.png', width = (7/5)*3, height = 2, units = 'in', res = 300)\nplot_cell_trajectory(combined_day1_WE, color_by = \"tree.ident\", show_branch_points = FALSE) +\n facet_wrap(~tree.ident, nrow = 1)+\n theme(legend.position=\"none\")+\n scale_x_continuous(breaks=c(-6,-3,0,2))\ndev.off()\n\npng('23dpa_day1_WE_pseudotime.png', width = (7/5)*1.15, height = 1.70, units = 'in', res = 300)\nplot_cell_trajectory(combined_day1_WE, color_by = \"Pseudotime\", show_branch_points = FALSE) +\n theme(legend.position=\"none\")+\n scale_x_continuous(breaks=c(-6,-3,0,2))\ndev.off()\n\npng('23dpa_day1_WE_pseudotime_K17.png', width = 7, height = 2, units = 'in', res = 300)\ngenes_to_plot <- c('c1083200_g2_i3^sp|A1L595|K1C17_BOVIN^Filament')\nplot_genes_in_pseudotime(combined_day1_WE[genes_to_plot,], color_by = \"tree.ident\")\ndev.off()\n\n\n\n" } ]
12
sayori11/contactsapi
https://github.com/sayori11/contactsapi
a717036aa5177952ce4982c00d11b1b312d22313
33dd9bf04c1e6c4d8b9301170547283b38f64838
9635cfb5cde19e0a50c3aea57a22c6c18263ee8e
refs/heads/master
2023-07-30T08:24:07.410834
2021-09-24T15:13:44
2021-09-24T15:13:44
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7412109375, "alphanum_fraction": 0.7412109375, "avg_line_length": 36.85185241699219, "blob_id": "8284e86993c7b25c5eac54e5d5f2f75724afcf10", "content_id": "0d24cf3aeac862ad7bce8b4ba500a6b75168ee3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1024, "license_type": "no_license", "max_line_length": 102, "num_lines": 27, "path": "/contacts/contact/views.py", "repo_name": "sayori11/contactsapi", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets\nfrom .serializers import ContactSerializer, EmailSerializer, PhoneSerializer\nfrom .models import Contact, Email, Phone_no\nimport django_filters\n\nclass ContactFilter(django_filters.FilterSet):\n hasemail = django_filters.BooleanFilter(field_name='emails',lookup_expr='isnull', exclude=True)\n hasphone = django_filters.BooleanFilter(field_name='phone_nos',lookup_expr='isnull', exclude=True)\n \n class Meta:\n model = Contact\n fields = ['first_name', 'last_name', 'available']\n\nclass ContactViewSet(viewsets.ModelViewSet):\n queryset = Contact.objects.all()\n serializer_class = ContactSerializer\n permission_classes = []\n search_fields = ('first_name', 'nick_name', 'last_name')\n filterset_class = ContactFilter\n\nclass EmailViewSet(viewsets.ModelViewSet):\n queryset = Email.objects.all()\n serializer_class = EmailSerializer\n\nclass PhoneViewSet(viewsets.ModelViewSet):\n queryset = Phone_no.objects.all()\n serializer_class = PhoneSerializer\n\n\n" }, { "alpha_fraction": 0.642201840877533, "alphanum_fraction": 0.6544342637062073, "avg_line_length": 30.926828384399414, "blob_id": "0b3199f83ee168001fe7990c7f8afeb0a847a1a7", "content_id": "4e667f2518cbc525e77250a4a7a3518ef3580a26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1308, "license_type": "no_license", "max_line_length": 92, "num_lines": 41, "path": "/contacts/contact/models.py", "repo_name": "sayori11/contactsapi", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\nclass Contact(models.Model):\n \n\n first_name = models.CharField(max_length = 50)\n last_name = models.CharField(max_length = 50)\n nick_name = models.CharField(max_length = 20)\n created_on = models.DateTimeField(auto_now_add=True, null=True)\n updated_on = models.DateTimeField(auto_now=True, null=True)\n available = models.BooleanField(default=True)\n\n def __str__(self):\n return f'{self.first_name} {self.last_name}'\n\n class Meta:\n ordering = ('first_name',)\n\nclass Email(models.Model):\n email = models.EmailField(max_length=100)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='emails')\n\n def __str__(self):\n return self.email\n\nclass Phone_no(models.Model):\n label_choices = (\n ('Home', 'Home'),\n ('Office', 'Office'),\n ('Work', 'Work'),\n ('Mobile', 'Mobile')\n )\n\n country_code = models.CharField(max_length=10, default='+1')\n phone_number = models.CharField(max_length=20)\n label = models.CharField(max_length=20, choices=label_choices, default='Home')\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='phone_nos')\n \n def __str__(self):\n return self.phone_number" }, { "alpha_fraction": 0.5767441987991333, "alphanum_fraction": 0.739534854888916, "avg_line_length": 20.5, "blob_id": "b47ca5ddca4b136e9ffba83e94f0c98cc04eeabc", "content_id": "548cdbd0398f90a23a1c066215b0222145cf0479", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 215, "license_type": "no_license", "max_line_length": 31, "num_lines": 10, "path": "/requirements.txt", "repo_name": "sayori11/contactsapi", "src_encoding": "UTF-8", "text": "asgiref==3.4.1\nDjango==3.2.7\ndjango-cors-headers==3.8.0\ndjango-filter==2.4.0\ndjango-phonenumber-field==5.2.0\ndjangorestframework==3.12.4\ndrf-writable-nested==0.6.3\nphonenumbers==8.12.31\npytz==2021.1\nsqlparse==0.4.2\n" }, { "alpha_fraction": 0.5050761699676514, "alphanum_fraction": 0.5888324975967407, "avg_line_length": 20.88888931274414, "blob_id": "370d188d34b3b5db851f9b2f8623c30e5776c3c2", "content_id": "fe9704e7ed002032b82d7c4198656951f0813254", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 394, "license_type": "no_license", "max_line_length": 50, "num_lines": 18, "path": "/contacts/contact/migrations/0003_alter_phone_no_phone_number.py", "repo_name": "sayori11/contactsapi", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.7 on 2021-09-20 16:48\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contact', '0002_auto_20210920_2037'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='phone_no',\n name='phone_number',\n field=models.CharField(max_length=20),\n ),\n ]\n" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.5871794819831848, "avg_line_length": 20.66666603088379, "blob_id": "fcbfc5190cedb58e40faa7a12eb011d0047d04fb", "content_id": "8322e1077a1d357ef9875b60591ee6b36f50390b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "no_license", "max_line_length": 50, "num_lines": 18, "path": "/contacts/contact/migrations/0008_alter_phone_no_phone_number.py", "repo_name": "sayori11/contactsapi", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.7 on 2021-09-23 10:23\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contact', '0007_phone_no_country_code'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='phone_no',\n name='phone_number',\n field=models.BigIntegerField(),\n ),\n ]\n" }, { "alpha_fraction": 0.571152925491333, "alphanum_fraction": 0.5851423144340515, "avg_line_length": 41.306121826171875, "blob_id": "16fa5bb01138a530decdfce59fc2284090d684b1", "content_id": "07d7f3562b396fc263bf6efc3034e656e183b7a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2073, "license_type": "no_license", "max_line_length": 140, "num_lines": 49, "path": "/contacts/contact/migrations/0001_initial.py", "repo_name": "sayori11/contactsapi", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.7 on 2021-09-20 14:04\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport phonenumber_field.modelfields\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Contact',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('first_name', models.CharField(max_length=50)),\n ('last_name', models.CharField(max_length=50)),\n ('nick_name', models.CharField(max_length=20)),\n ('label', models.CharField(choices=[('Home', 'Home'), ('Office', 'Office')], max_length=20)),\n ('created_on', models.DateField(auto_now_add=True)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['first_name'],\n },\n ),\n migrations.CreateModel(\n name='Phone_no',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('phone_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),\n ('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='phone_nos', to='contact.contact')),\n ],\n ),\n migrations.CreateModel(\n name='Email',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('email', models.EmailField(max_length=100)),\n ('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='emails', to='contact.contact')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.520729660987854, "alphanum_fraction": 0.5555555820465088, "avg_line_length": 26.409090042114258, "blob_id": "fa238b292d62de1d6a245259f38b77b1f6b86394", "content_id": "3906cc13bb1f0d88e2b5512595e4f0125fc8f69f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 603, "license_type": "no_license", "max_line_length": 156, "num_lines": 22, "path": "/contacts/contact/migrations/0010_auto_20210924_1254.py", "repo_name": "sayori11/contactsapi", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.7 on 2021-09-24 07:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contact', '0009_alter_phone_no_phone_number'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='contact',\n name='label',\n ),\n migrations.AddField(\n model_name='phone_no',\n name='label',\n field=models.CharField(choices=[('Home', 'Home'), ('Office', 'Office'), ('Work', 'Work'), ('Mobile', 'Mobile')], default='Home', max_length=20),\n ),\n ]\n" }, { "alpha_fraction": 0.6809238791465759, "alphanum_fraction": 0.6809238791465759, "avg_line_length": 36.64516067504883, "blob_id": "d245e016529939f02b5418a68e6f5fe79e8d914c", "content_id": "9be1b5efe470fd8ad9fa551aa7d2b36c813b4273", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1169, "license_type": "no_license", "max_line_length": 99, "num_lines": 31, "path": "/contacts/contact/serializers.py", "repo_name": "sayori11/contactsapi", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom .models import Contact, Email, Phone_no\nfrom drf_writable_nested import WritableNestedModelSerializer\n\nclass EmailSerializer(serializers.ModelSerializer):\n class Meta:\n model = Email\n fields = ['email']\n\nclass PhoneSerializer(serializers.ModelSerializer):\n class Meta:\n model = Phone_no\n fields = ['country_code','phone_number', 'label']\n\nclass ContactSerializer(WritableNestedModelSerializer, serializers.ModelSerializer):\n emails = EmailSerializer(many=True)\n phone_nos = PhoneSerializer(many=True)\n\n class Meta:\n model = Contact\n fields = ['id','first_name', 'last_name', 'nick_name', 'available', 'emails', 'phone_nos']\n\n def create(self, validated_data):\n phone_nos_data = validated_data.pop('phone_nos')\n emails_data = validated_data.pop('emails')\n contact = Contact.objects.create(**validated_data)\n for email_data in emails_data:\n Email.objects.create(contact=contact, **email_data)\n for phone_data in phone_nos_data:\n Phone_no.objects.create(contact=contact, **phone_data)\n return contact \n\n" }, { "alpha_fraction": 0.7795275449752808, "alphanum_fraction": 0.7795275449752808, "avg_line_length": 30.75, "blob_id": "8c253a247412c41779c45520f66ef4a1562f6f7d", "content_id": "bc2866e5607e44d17ff90230850b8bcba60677c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "no_license", "max_line_length": 47, "num_lines": 4, "path": "/contacts/contact/admin.py", "repo_name": "sayori11/contactsapi", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Contact, Email, Phone_no\n\nadmin.site.register([Contact, Email, Phone_no])\n" }, { "alpha_fraction": 0.5214723944664001, "alphanum_fraction": 0.5797545909881592, "avg_line_length": 18.176469802856445, "blob_id": "214a909ee90083d3675fc0de444b28b974e39d72", "content_id": "4399f1829597e249ba84677d00493a8d276d9527", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 326, "license_type": "no_license", "max_line_length": 47, "num_lines": 17, "path": "/contacts/contact/migrations/0006_remove_contact_user.py", "repo_name": "sayori11/contactsapi", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.7 on 2021-09-22 07:39\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contact', '0005_contact_available'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='contact',\n name='user',\n ),\n ]\n" }, { "alpha_fraction": 0.785515308380127, "alphanum_fraction": 0.785515308380127, "avg_line_length": 29, "blob_id": "3977fa8bf44a740588f3e508a504819757f5e914", "content_id": "8612321e52b21b3295124a84c6ea7d3a4432c0c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 359, "license_type": "no_license", "max_line_length": 61, "num_lines": 12, "path": "/contacts/contact/urls.py", "repo_name": "sayori11/contactsapi", "src_encoding": "UTF-8", "text": "from rest_framework.routers import DefaultRouter\nfrom .views import ContactViewSet, EmailViewSet, PhoneViewSet\nfrom django.urls import path, include\n\nrouter = DefaultRouter()\nrouter.register('contact', ContactViewSet)\nrouter.register('emails', EmailViewSet)\nrouter.register('phone_nos', PhoneViewSet)\n\nurlpatterns = [\n path('api/', include(router.urls)),\n]" } ]
11
imbrbing/Ohni.us
https://github.com/imbrbing/Ohni.us
b44e232a6ab83b91375ea3567d4706f6b517b3f4
418425c002d18b117aebd7d7a0c28b5b1d276af1
99601ea7c989ffbed45acebc42c688c5534e9747
refs/heads/master
2020-03-23T18:38:03.811578
2018-07-22T19:34:16
2018-07-22T19:34:16
141,921,420
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5449770092964172, "alphanum_fraction": 0.5499014854431152, "avg_line_length": 29.767677307128906, "blob_id": "26bd7ae4c3e16ff70ed8244bb050681b730d4a36", "content_id": "e20ceb96811af7f82803ad99094e001c131e2a4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3046, "license_type": "no_license", "max_line_length": 74, "num_lines": 99, "path": "/ValidateJson.py", "repo_name": "imbrbing/Ohni.us", "src_encoding": "UTF-8", "text": "import json\nfrom configparser import ConfigParser\nimport ssl\nimport urllib.request\nimport sys\nfrom utils import *\nfrom web3 import Web3, HTTPProvider\nimport http.client\n\ndef is_json(myjson):\n with open(myjson) as f:\n print (\"VALIDATING JSON %s\" % myjson)\n try:\n json_object = json.load(f)\n except ValueError as e:\n print('INVALID JSON: %s' % e)\n return False\n return json_object\n\ndef validate_eth_address(eth_addr):\n E_address = eth_addr\n E_address.strip()\n print(\"VALIDATING ETHEREUM ADDRESS: %s\" % E_address)\n w3 = Web3(HTTPProvider('https://api.myetherapi.com/eth'))\n if w3.isChecksumAddress(E_address):\n print(\"Valid checksum\")\n else:\n print (\"Invalid checksum, converting to valid checksum\") \n E_address = Web3.toChecksumAddress(E_address)\n print (E_address) \n try:\n eth_balance = Web3.fromWei(w3.eth.getBalance(E_address), 'ether')\n print(\"Balance = : %s\" % eth_balance)\n except ValueError as e:\n print('\\t INVALID ADDRESS: %s' % e) \n\ndef validate_definition(description):\n print (\"VALIDATING DEFINITION: %s \" % description)\n length = len(description)\n if (length > 1000):\n print(\"error, string length is %s\" % len(description)) \n return False\n\ndef validate_links(links):\n ssl._create_default_https_context = ssl._create_unverified_context\n print (\"VALIDATING LINKS:\")\n for k, v in links.items():\n req = urllib.request.Request(v)\n try: \n if urllib.request.urlopen(req):\n print (v, \": IS OK\")\n except urllib.error.URLError as e:\n print(v, e.reason) \n \n #print(v)\n\ndef main():\n if len(sys.argv) == 2:\n result = is_json(sys.argv[1])\n\n if (result):\n #try:\n #parser = ConfigParser()\n #parser.read('./Coin.cfg')\n #except parser.ParsingError as err:\n #print('Could not parse:', err)\n\n #for section_name in parser.sections():\n #print('Section:', section_name)\n #print(' Options:', parser.options(section_name))\n #for name, value in parser.items(section_name):\n #print(' {} = {}'.format(name, value))\n #print()\n\n #for k, v in result.items():\n #print (v)\n\n if 'address' in result:\n validate_eth_address(result['address'])\n else:\n print (\"No Address found\")\n if 'description' in result:\n validate_definition(result['description'])\n else:\n print (\"\\t No Description found\")\n if 'links' in result:\n validate_links(result['links'])\n else:\n print(\"No links found\")\n else:\n print(\"##############################\")\n print(\"# Json check FAILED\")\n print(\"##############################\") \n else:\n print(\"Usage: ValidateJson.py filename.json\")\n\nif __name__ == '__main__':\n main()\nexit(1)\n" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 14, "blob_id": "85f6f21f0df99be2b23ffbc1e1e2575867921b0e", "content_id": "c1d3ec2579d0e9596a28638b3fedea33edc0d9e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 15, "license_type": "no_license", "max_line_length": 14, "num_lines": 1, "path": "/README.md", "repo_name": "imbrbing/Ohni.us", "src_encoding": "UTF-8", "text": "# Ohni Scripts\n" } ]
2
mathways/afishab
https://github.com/mathways/afishab
a2daf4b843f98d9922f039c0315795cb1cfcbb90
37718a90cd026a839c5f4923b36b91fc317f001d
76f46821a298f99b33a2198fea2b8240ef3c3825
refs/heads/master
2023-08-12T15:43:19.274102
2020-10-14T08:52:42
2020-10-14T08:52:42
303,955,413
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5357833504676819, "alphanum_fraction": 0.5502901077270508, "avg_line_length": 28.514286041259766, "blob_id": "33cb405fa6ac8eae6bb8da0ff5114547c19ebc11", "content_id": "0a41678c86aa4d09b77f5213f05d6c3b10a0e4c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1034, "license_type": "no_license", "max_line_length": 128, "num_lines": 35, "path": "/telegram-afisha/qiwi.py", "repo_name": "mathways/afishab", "src_encoding": "UTF-8", "text": "import requests\nimport json\n\nfrom settings import qiwi_token, qiwi_account\n\nclass qiwi:\n def __init__(self):\n pass\n\n def get_payments(self, count=50):\n session = requests.Session()\n session.headers['authorization'] = 'Bearer ' + qiwi_token\n parameters = {'rows': str(count)}\n response = session.get(f\"https://edge.qiwi.com/payment-history/v1/persons/{qiwi_account}/payments\", params = parameters)\n req = json.loads(response.text)\n return req['data']\n\n def check_payment(self, code, sum):\n \"\"\"\n Status:\n 0 - not found\n 1 - found, but error\n 2 - found and all right\n \"\"\"\n status = 0\n for payment in self.get_payments():\n if(payment['comment'] == str(code)):\n status = 1\n if(payment['status'] == 'SUCCESS' and payment['sum']['amount'] >= sum):\n return 2\n return status\n\nif __name__ == \"__main__\":\n pay = qiwi()\n print(pay.check_payment('96171', 3))\n\n" }, { "alpha_fraction": 0.51241135597229, "alphanum_fraction": 0.522384762763977, "avg_line_length": 38.57017517089844, "blob_id": "f21df08fe50a06254794df5fb1ddb9e9f982a26d", "content_id": "90f9292cb218f56d25abd3b92a62d9bf7cf0aa73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4512, "license_type": "no_license", "max_line_length": 159, "num_lines": 114, "path": "/telegram-afisha/database.py", "repo_name": "mathways/afishab", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sqlite3\n\nfrom random import randint\nfrom time import time\n\nclass database:\n\n def __init__(self):\n with sqlite3.connect('database.db') as conn:\n conn.execute(\"CREATE TABLE IF NOT EXISTS keys\"\n \"(`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, `product` INTEGER, `key` TEXT)\")\n conn.execute(\"CREATE TABLE IF NOT EXISTS products\"\\\n \"(`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, `name` TEXT, `description` TEXT, `cost` INTEGER )\")\n conn.execute(\"CREATE TABLE IF NOT EXISTS purchases\"\n \"(`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, `user_id` INTEGER, `product` INTEGER, `code` INTEGER, `datetime` INTEGER )\")\n conn.execute(\"CREATE TABLE IF NOT EXISTS users_keys\"\n \"(`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, `key` TEXT, `user_id` INTEGER, `datetime` INTEGER, `product` INTEGER )\")\n conn.commit()\n\n def has_key_on_product(self, product, conn):\n with sqlite3.connect('database.db') as conn:\n for key in conn.execute(f\"SELECT * FROM keys \"\\\n f\"WHERE product == {product}\"):\n return True\n return False\n\n def get_catalog(self, offset=0, count=20):\n with sqlite3.connect('database.db') as conn:\n out = []\n for item in conn.execute(f\"SELECT * FROM products \"\\\n f\"ORDER BY id \"\\\n f\"LIMIT {count} OFFSET {offset}\"):\n if(self.has_key_on_product(item[0], conn)):\n out.append(item)\n return out\n\n\n def get_product_by_id(self, id):\n with sqlite3.connect('database.db') as conn:\n for product in conn.execute(f\"SELECT * FROM products \"\\\n f\"WHERE id == {id}\"):\n return product \n return None\n\n\n def get_purchase_by_code(self, code):\n with sqlite3.connect('database.db') as conn:\n for purchase in conn.execute(f\"SELECT * FROM purchases \"\\\n f\"WHERE code == {code}\"):\n return purchase \n return None\n\n \n def has_purchase(self, user_id, product):\n with sqlite3.connect('database.db') as conn:\n for purchase in conn.execute(f\"SELECT * FROM purchases \"\\\n f\"WHERE user_id == {user_id} AND product == {product}\"):\n return purchase[3]\n return None\n\n\n def add_purchase(self, user_id, product):\n with sqlite3.connect('database.db') as conn:\n code = randint(10000, 99999)\n while(self.get_purchase_by_code(code)):\n code = randint(10000, 99999)\n conn.execute(f\"INSERT INTO purchases (user_id, product, code, datetime) \"\\\n f\"VALUES ({user_id}, {product}, {code}, {time()})\")\n conn.commit()\n return code\n\n\n def get_key_by_product_id(self, product_id):\n with sqlite3.connect('database.db') as conn:\n for key in conn.execute(f\"SELECT * FROM keys \"\\\n f\"WHERE product == {product_id}\"):\n return key\n return None\n\n\n def remove_purcases_by_code(self, code):\n with sqlite3.connect('database.db') as conn:\n conn.execute(f\"DELETE FROM purchases \"\\\n f\"WHERE code == {code}\")\n conn.commit()\n\n\n def remove_key(self, key):\n with sqlite3.connect('database.db') as conn:\n conn.execute(f\"DELETE FROM keys \"\\\n f\"WHERE key == {key}\")\n conn.commit()\n\n\n def add_key_to_user(self, key, user_id):\n with sqlite3.connect('database.db') as conn:\n conn.execute(f\"INSERT INTO users_keys (key, user_id, datetime) \"\\\n f\"VALUES ({key}, {user_id}, {time()})\")\n conn.commit()\n\n\n def get_users_keys(self, user_id):\n with sqlite3.connect('database.db') as conn:\n users_keys = []\n for user_key in conn.execute(f\"SELECT * FROM users_keys \"\\\n f\"WHERE user_id == {user_id}\"):\n users_keys.append(user_key)\n return users_keys\n\n\nif __name__ == \"__main__\":\n db = database()\n # db.remove_key(\"123123\")\n\n" }, { "alpha_fraction": 0.5844786763191223, "alphanum_fraction": 0.5896455645561218, "avg_line_length": 31.914966583251953, "blob_id": "484832124fd6a6227a2a981c1e28289859d25ded", "content_id": "f3ddb8cf0bc532e52959cfca36fc7914cc9b96b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10478, "license_type": "no_license", "max_line_length": 100, "num_lines": 294, "path": "/telegram-afisha/bot.py", "repo_name": "mathways/afishab", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom settings import token, qiwi_account\nfrom database import database\nfrom qiwi import qiwi\n\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup\nfrom telegram.ext import (Updater, CommandHandler,\n CallbackQueryHandler, ConversationHandler,\n PicklePersistence)\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nMENU, CHOOSING, LOOKING, BUY, KEY, CHECK, LOOKING_KEYS = range(7)\n\ndb = database()\npayments = qiwi()\n\nmenu_keyboard = [\n [InlineKeyboardButton(\"На что можно сходить?\", callback_data='catalog')],\n [InlineKeyboardButton(\"Мои покупки\", callback_data='purchases')],\n [InlineKeyboardButton(\"Отзывы\", url=\"скоро добавим\")],\n [InlineKeyboardButton(\"Поддержка\", callback_data='he')]\n]\nmenu_markup = InlineKeyboardMarkup(menu_keyboard, one_time_keyboard=True)\n\n\ndef start(update, context):\n update.message.reply_text(\n \"Главное меню\",\n reply_markup=menu_markup\n )\n\n return MENU\n\n\ndef start_over(update, context):\n querry = update.callback_query\n\n context.bot.edit_message_text(\n chat_id=querry.message.chat_id,\n message_id=querry.message.message_id,\n text=\"Главное меню\",\n reply_markup=menu_markup\n )\n\n return MENU\n\n\ndef catalog(update, context):\n querry = update.callback_query\n if (context.user_data.get('offset') == None):\n context.user_data['offset'] = 0\n items = db.get_catalog(offset=context.user_data['offset'])\n if (len(items)):\n reply_text = 'Список:\\n'\n point = 1\n for item in items:\n reply_text += f\"{point}. {item[1]} - {item[3]} p.\\n\"\n point += 1\n keyboard = list()\n for i in range(1, point, point // 2):\n keyboard.append([])\n for j in range(i, min(point, i + point // 2)):\n callback_data = str(items[j - 1][0])\n keyboard[-1].append(InlineKeyboardButton(str(j), callback_data=callback_data))\n keyboard.append([\n InlineKeyboardButton('Назад', callback_data='back')\n ])\n reply_markup = InlineKeyboardMarkup(keyboard)\n else:\n text = \"Каталог пока пуст, но скоро в нем появятся новые товары. Обязательно возвращайтесь.\"\n keyboard = [\n [InlineKeyboardButton('Назад', callback_data='back')]\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n\n context.bot.edit_message_text(\n chat_id=querry.message.chat_id,\n message_id=querry.message.message_id,\n text=reply_text,\n reply_markup=reply_markup\n )\n\n return CHOOSING\n\n\ndef product(update, context):\n querry = update.callback_query\n if (querry.data == 'back'):\n id = context.user_data['last_id']\n else:\n id = int(querry.data)\n context.user_data['last_id'] = id\n item = db.get_product_by_id(id)\n reply_text = f\"{item[1]}\\n{item[2]}\\n{item[3]} p.\"\n keyboard = [\n [InlineKeyboardButton(\"Перейти к оплате\", callback_data='buy')],\n [InlineKeyboardButton(\"Назад\", callback_data='back')]\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n context.bot.edit_message_text(\n chat_id=querry.message.chat_id,\n message_id=querry.message.message_id,\n text=reply_text,\n reply_markup=reply_markup\n )\n\n return LOOKING\n\n\ndef buy(update, context):\n querry = update.callback_query\n item = db.get_product_by_id(context.user_data['last_id'])\n code = db.has_purchase(querry.message.chat_id, item[0])\n if (code == None):\n code = db.add_purchase(querry.message.chat_id, item[0])\n text = f\"К оплате {item[3]} рублей.\\n\" \\\n f\"Чтобы получить ключ переведите деньги на счет qiwi.com/p/{qiwi_account}.\\n\" \\\n f\"В коментариях укажите {code}.\\n\\n\"\n keyboard = [\n [InlineKeyboardButton(\"Проверить оплату\", callback_data=f'{code}')],\n [InlineKeyboardButton(\"Назад\", callback_data='back')]\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n context.bot.edit_message_text(\n chat_id=querry.message.chat_id,\n message_id=querry.message.message_id,\n text=text,\n reply_markup=reply_markup\n )\n return BUY\n\n\ndef check(update, context):\n querry = update.callback_query\n code = int(querry.data)\n purchase = db.get_purchase_by_code(code)\n product = db.get_product_by_id(purchase[2])\n status = payments.check_payment(code, product[3])\n if (status == 2): # sucksess\n key = db.get_key_by_product_id(product[0])\n db.remove_purcases_by_code(code)\n db.remove_key(key[2])\n db.add_key_to_user(key[2], querry.message.chat_id)\n text = f\"Покупка прошла успешно.\\n\\n\" \\\n f\"Ваш билет {key[2]}.\\n\\n\" \\\n f\"Вы так же сможете посмотреть его в разделе Мои покупки.\"\n keypad = [\n [InlineKeyboardButton(\"Назад\", callback_data='back')]\n ]\n reply_markup = InlineKeyboardMarkup(keypad)\n context.bot.edit_message_text(\n chat_id=querry.message.chat_id,\n message_id=querry.message.message_id,\n text=\"```123123``` ***123321321123***\",\n parse_mode=\"Markdown\"\n )\n context.bot.send_message(\n chat_id=querry.message.chat_id,\n text=text,\n reply_markup=reply_markup\n )\n\n else: # unsucksess\n if (status == 1):\n text = f\"К оплате {product[3]} рублей.\\n\" \\\n f\"Чтобы получить ключ переведите деньги на счет qiwi.com/p/{qiwi_account}.\\n\" \\\n f\"В коментариях укажите {code}.\\n\\n\" \\\n f\"Оплата прошла неудачно.\" \\\n f\"Если вы оплатили, то, пожалуйста, обратитесь в поддержку.\\n\\n\"\n keypad = [\n [InlineKeyboardButton(\"Проверить оплату\", callback_data=f'{code}')],\n [InlineKeyboardButton(\"Назад\", callback_data='back')],\n [InlineKeyboardButton(\"Поддержка\", callback_data='support')]\n ]\n elif (status == 0):\n text = f\"К оплате {product[3]} рублей.\\n\" \\\n f\"Чтобы получить ключ переведите деньги на счет qiwi.com/p/{qiwi_account}.\\n\" \\\n f\"В коментариях укажите {code}.\\n\\n\" \\\n f\"Вашей оплаты не найдено.\" \\\n f\"Если вы оплатили, то, пожалуйста, обратитесь в поддержку.\"\n keypad = [\n [InlineKeyboardButton(\"Проверить оплату\", callback_data=f'{code}')],\n [InlineKeyboardButton(\"Назад\", callback_data='back')],\n [InlineKeyboardButton(\"Поддержка\", callback_data='support')]\n ]\n reply_markup = InlineKeyboardMarkup(keypad)\n context.bot.edit_message_text(\n chat_id=querry.message.chat_id,\n message_id=querry.message.message_id,\n text=text,\n reply_markup=reply_markup\n )\n\n return CHECK\n\n\ndef purchases(update, context):\n querry = update.callback_query\n keys = db.get_users_keys(querry.message.chat_id)\n if (len(keys) == 0):\n text = \"Вы пока не совершали покупки\"\n else:\n text = \"Ваши купленные билеты:\\n\"\n for key in keys:\n text += f\"{key[1]}\\n\"\n keypad = [\n [InlineKeyboardButton(\"Назад\", callback_data='back')],\n ]\n reply_markup = InlineKeyboardMarkup(keypad)\n context.bot.edit_message_text(\n chat_id=querry.message.chat_id,\n message_id=querry.message.message_id,\n text=text,\n reply_markup=reply_markup\n )\n\n return LOOKING_KEYS\n\n\ndef error(update, context):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\n\n\ndef main():\n pp = PicklePersistence(filename='users_states')\n\n updater = Updater(token, persistence=pp, use_context=True)\n dp = updater.dispatcher\n\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n\n states={\n MENU: [\n CallbackQueryHandler(catalog, pattern='^catalog$'),\n CallbackQueryHandler(purchases, pattern='^purchases$'),\n ],\n LOOKING_KEYS: [\n CallbackQueryHandler(start_over, pattern='^back$')\n ],\n CHOOSING: [\n CallbackQueryHandler(start_over, pattern='^back$'),\n CallbackQueryHandler(product, pattern='')\n ],\n LOOKING: [\n CallbackQueryHandler(catalog, pattern='^back$'),\n CallbackQueryHandler(buy, pattern='^buy$')\n ],\n BUY: [\n CallbackQueryHandler(product, pattern='^back$'),\n CallbackQueryHandler(check, pattern='')\n ],\n CHECK: [\n CallbackQueryHandler(product, pattern='^back$'),\n CallbackQueryHandler(check, pattern=''),\n ]\n },\n\n fallbacks=[],\n name=\"AfishaSTD\",\n persistent=True,\n )\n\n dp.add_handler(conv_handler)\n dp.add_error_handler(error)\n updater.start_polling()\n updater.idle()\n\n\ndef he(update, context):\n querry = update.callback_query\n text = \"✅ Поддержка\\n\" \\\n \"Если у вас возникли какие-то проблемы или вопросы обращайтесь в телеграм Codollan\"\n\n context.bot.edit_message_text(\n chat_id=querry.message.chat_id,\n message_id=querry.message.message_id,\n text=text,\n reply_markup=menu_markup\n )\n\n return MENU\n\n\nif __name__ == '__main__':\n main()\n" } ]
3
xCryogenx/python-astar
https://github.com/xCryogenx/python-astar
0ee877284e410fda5ee325fcf5baf8dd18f1d87b
ff3e4bba221bde8b9226614636a22a015e87c069
704ce182698814d85b5df7e53fdc9151eb32febf
refs/heads/master
2020-07-28T09:42:16.475375
2016-11-10T19:37:36
2016-11-10T19:37:36
73,414,749
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3155045509338379, "alphanum_fraction": 0.4378518760204315, "avg_line_length": 34.515384674072266, "blob_id": "20a139ef31ee037fb3800c85b38534e488805039", "content_id": "93f42c12a174d68c3d39b14060448f3a52f870fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4618, "license_type": "no_license", "max_line_length": 490, "num_lines": 130, "path": "/astar.py", "repo_name": "xCryogenx/python-astar", "src_encoding": "UTF-8", "text": "import numpy\nfrom heapq import *\nimport unittest\n\n\ndef heuristic(a, b):\n return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2\n\ndef astar(array, start, goal, reverse=False):\n '''\n Return shortest path in 2D list of points \n or return False. First element in array\n is goal and last element is next after\n start point\n\n :array: list or numpy.array of points\n :start: (x,y) start point\n :goal: (x,y) end point\n '''\n\n if type(array) == numpy.ndarray:\n shape0 = array.shape[0]\n shape1 = array.shape[1]\n else:\n shape0 = len(array)\n shape1 = len(array[0])\n\n neighbors = [(0,1),(0,-1),(1,0),(-1,0),(1,1),(1,-1),(-1,1),(-1,-1)]\n\n close_set = set()\n came_from = {}\n gscore = {start:0}\n fscore = {start:heuristic(start, goal)}\n oheap = []\n\n heappush(oheap, (fscore[start], start))\n \n while oheap:\n\n current = heappop(oheap)[1]\n\n if current == goal:\n data = []\n while current in came_from:\n data.append(current)\n current = came_from[current]\n return data[::-1] if reverse else data\n\n close_set.add(current)\n for i, j in neighbors:\n neighbor = current[0] + i, current[1] + j \n tentative_g_score = gscore[current] + heuristic(current, neighbor)\n if 0 <= neighbor[0] < shape0:\n if 0 <= neighbor[1] < shape1: \n if array[neighbor[0]][neighbor[1]] == 1:\n continue\n else:\n # array bound y walls\n continue\n else:\n # array bound x walls\n continue\n \n if neighbor in close_set and tentative_g_score >= gscore.get(neighbor, 0):\n continue\n \n if tentative_g_score < gscore.get(neighbor, 0) or neighbor not in [i[1]for i in oheap]:\n came_from[neighbor] = current\n gscore[neighbor] = tentative_g_score\n fscore[neighbor] = tentative_g_score + heuristic(neighbor, goal)\n heappush(oheap, (fscore[neighbor], neighbor))\n \n return False\n\n\nclass AstarTest(unittest.TestCase):\n def test_astar_list(self):\n nmap = list([\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [1,1,1,1,1,1,1,1,1,1,1,1,0,1],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [1,0,1,1,1,1,1,1,1,1,1,1,1,1],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [1,1,1,1,1,1,1,1,1,1,1,1,0,1],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [1,0,1,1,1,1,1,1,1,1,1,1,1,1],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [1,1,1,1,1,1,1,1,1,1,1,1,0,1],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0]])\n\n s = astar(nmap, (0,0), (0,5), reverse=True)\n self.assertEqual(s, [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)])\n\n def test_astar_numpy(self):\n nmap = numpy.array([\n #nmap = list([\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [1,1,1,1,1,1,1,1,1,1,1,1,0,1],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [1,0,1,1,1,1,1,1,1,1,1,1,1,1],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [1,1,1,1,1,1,1,1,1,1,1,1,0,1],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [1,0,1,1,1,1,1,1,1,1,1,1,1,1],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [1,1,1,1,1,1,1,1,1,1,1,1,0,1],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0]])\n \n s1 = astar(nmap, (0,0), (10,13))\n compare1 = [(10, 13), (9, 12), (8, 11), (8, 10), (8, 9), (8, 8), (8, 7), (8, 6), (8, 5), (8, 4), (8, 3), (8, 2), (7, 1), (6, 2), (6, 3), (6, 4), (6, 5), (6, 6), (6, 7), (6, 8), (6, 9), (6, 10), (6, 11), (5, 12), (4, 11), (4, 10), (4, 9), (4, 8), (4, 7), (4, 6), (4, 5), (4, 4), (4, 3), (4, 2), (3, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7), (2, 8), (2, 9), (2, 10), (2, 11), (1, 12), (0, 11), (0, 10), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3), (0, 2), (0, 1)]\n self.assertEqual(s1, compare1)\n\n s2 = astar(nmap, (0,0), (0,1))\n self.assertEqual(s2, [(0,1)])\n\n s3 = astar(nmap, (0,0), (0,5), reverse=False)\n self.assertEqual(s3, [(0, 5), (0, 4), (0, 3), (0, 2), (0, 1)])\n\n s4 = astar(nmap, (0,0), (0,5), reverse=True)\n self.assertEqual(s4, [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)])\n\n s5 = astar(nmap, (0,0), (1,5), reverse=True)\n self.assertFalse(s5)\n\n s6 = astar(nmap, (-1,20), (100,500), reverse=True)\n self.assertFalse(s6)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n" }, { "alpha_fraction": 0.8297872543334961, "alphanum_fraction": 0.8297872543334961, "avg_line_length": 14.666666984558105, "blob_id": "b2522e9544632d824112857bd8ff732ba1ce2aa1", "content_id": "8d5f4a209b8e92cf1f514f87481a57c785183852", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 47, "license_type": "no_license", "max_line_length": 30, "num_lines": 3, "path": "/README.md", "repo_name": "xCryogenx/python-astar", "src_encoding": "UTF-8", "text": "# python-astar\n\nImplementation astar algorithm\n" } ]
2
qsq-dm/mff
https://github.com/qsq-dm/mff
5f17d6ffd1d4742dc46d1367cff35233af08a450
d7f1e6f3fba95fe0d8ebb8937dda64a17e71f048
1f79d9d02810a944c45fc962c62159035c5a2247
refs/heads/master
2020-12-29T02:19:29.037394
2016-08-01T15:40:42
2016-08-01T15:40:42
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6902261972427368, "alphanum_fraction": 0.6903461813926697, "avg_line_length": 55.10437774658203, "blob_id": "466026f8f311632bbd8d1ba1a3b16a271d4d3bac", "content_id": "2768cc0f411937fa319452adf2d8d8ceeb48a99a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16667, "license_type": "no_license", "max_line_length": 143, "num_lines": 297, "path": "/admin/urls.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import request\nfrom flask import Blueprint, render_template, abort\nfrom jinja2 import TemplateNotFound\n\nfrom admin.views import index\nfrom admin.views import get_city_list\nfrom admin.views import login\nfrom admin.views import logout\nfrom admin.views import new_city\nfrom admin.views import get_item_list\nfrom admin.views import item_edit\nfrom admin.views import get_item\nfrom admin.views import get_cat\nfrom admin.views import get_subcat\nfrom admin.views import hospital_edit\nfrom admin.views import get_school_list\nfrom admin.views import get_hospital\nfrom admin.views import get_hospital_list\nfrom admin.views import get_cat_list\nfrom admin.views import get_subcat_list\nfrom admin.views import set_subcat_status\nfrom admin.views import get_period_choice_list\nfrom admin.views import edit_itemcat\nfrom admin.views import edit_itemsubcat\nfrom admin.views import refresh_qntoken\nfrom admin.views import get_apply_list\nfrom admin.views import get_apply_detail\nfrom admin.views import apply_approve\nfrom admin.views import apply_reject\nfrom admin.views import set_item_status\nfrom admin.views import recommend_item\nfrom admin.views import get_user_list\nfrom admin.views import get_user_detail\nfrom admin.views import get_school_city_list\nfrom admin.views import get_order_list\nfrom admin.views import upload_image\nfrom admin.views import verify_chsi\nfrom admin.views import set_chsi_captcha\nfrom admin.views import refresh_captcha\nfrom admin.views import get_advice_list\nfrom admin.views import get_advice_detail\nfrom admin.views import admin_refund_order\nfrom admin.views import get_activity_list\nfrom admin.views import get_activity_items\nfrom admin.views import set_activity_items\nfrom admin.views import activity_edit\nfrom admin.views import get_activity\nfrom admin.views import top_recommend_item\nfrom admin.views import recommend_subcat\nfrom admin.views import top_recommend_subcat\nfrom admin.views import get_item_recommend\nfrom admin.views import item_recommend_edit\nfrom admin.views import get_item_activity\nfrom admin.views import item_activity_edit\nfrom admin.views import get_subcat_recommend\nfrom admin.views import subcat_recommend_edit\nfrom admin.views import set_recommend_order\nfrom admin.views import set_recommend_subcat_order\nfrom admin.views import new_period_pay_choice\nfrom admin.views import get_period_pay_log_list\nfrom admin.views import del_item_activity\nfrom admin.views import get_refund_detail\nfrom admin.views import get_coupon_list\nfrom admin.views import coupon_edit\nfrom admin.views import get_coupon\nfrom admin.views import trial_edit\nfrom admin.views import get_trial_list\nfrom admin.views import get_trial\nfrom admin.views import trial_applyer_list\nfrom admin.views import send_trial\nfrom admin.views import set_trial_order\nfrom admin.views import get_promoter_list\nfrom admin.views import add_promoter\nfrom admin.views import add_hospital_admin\nfrom admin.views import get_hospital_user_list\nfrom admin.views import to_supply\nfrom admin.views import supply_apply\nfrom admin.views import set_hospital_status\nfrom admin.views import get_daily_coupon_list\nfrom admin.views import daily_coupon_edit\nfrom admin.views import get_daily_coupon\nfrom admin.views import set_recommend_hospital_order\nfrom admin.views import hospital_recommend_edit\nfrom admin.views import get_hospital_recommend\nfrom admin.views import get_tutorial_list\nfrom admin.views import tutorial_edit\nfrom admin.views import get_tutorial\nfrom admin.views import set_tutorial_status\nfrom admin.views import get_user_vcode\nfrom admin.views import reset_user_vcode_sent\nfrom admin.views import send_user_coupon\nfrom admin.views import get_city\nfrom admin.views import city_edit\nfrom admin.views import recommend_hospital\nfrom admin.views import daily_applyer_list\nfrom admin.views import set_cats_order\nfrom admin.views import set_city\nfrom admin.views import set_question_status\nfrom admin.views import get_question_list\nfrom admin.views import new_question\nfrom admin.views import get_user_question_list\nfrom admin.views import get_room_list\nfrom admin.views import get_room_detail\n\n\nadmin_api = Blueprint('admin_api', __name__,\n template_folder='templates')\n\n\nadmin_api.add_url_rule('/', 'index', index)\nadmin_api.add_url_rule('/login/', 'login', login, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/logout/', 'logout', logout, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_city_list/', 'get_city_list', get_city_list)\nadmin_api.add_url_rule('/new_city/', 'new_city', city_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/city_edit/<int:item_id>/', 'city_edit', city_edit, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/daily_applyer_list/', 'daily_applyer_list', daily_applyer_list, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_item_list/', 'get_item_list', get_item_list, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/new_item/', 'new_item', item_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/edit_item/<int:item_id>/', 'edit_item', item_edit, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/new_tutorial/', 'new_tutorial', tutorial_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/tutorial_edit/<int:item_id>/', 'edit_tutorial', tutorial_edit, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/new_item/', 'new_item', item_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/edit_item/<int:item_id>/', 'edit_item', item_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_item/', 'get_item', get_item, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_cat/', 'get_cat', get_cat, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_subcat/', 'get_subcat', get_subcat, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/new_activity/', 'new_activity', activity_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/new_itemcat/', 'new_itemcat', edit_itemcat, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/new_itemsubcat/', 'new_itemsubcat', edit_itemsubcat, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/edit_itemcat/<int:cat_id>/', 'edit_itemcat', edit_itemcat, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/edit_itemsubcat/<int:sub_cat_id>/', 'edit_itemsubcat', edit_itemsubcat, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/edit_activity/<int:item_id>/', 'edit_activity', activity_edit, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/get_school_list/', 'get_school_list', get_school_list, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/get_order_list/', 'get_order_list', get_order_list, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/get_city/', 'get_city', get_city, methods=['POST', 'GET'])\n\n\nadmin_api.add_url_rule('/get_cat_list/', 'get_cat_list', get_cat_list, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_subcat_list/', 'get_subcat_list', get_subcat_list, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/edit_hospital/<int:item_id>/', 'hospital_edit', hospital_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/new_hospital/', 'new_hospital', hospital_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_hospital/', 'get_hospital', get_hospital, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_hospital_list/', 'get_hospital_list', get_hospital_list, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_tutorial_list/', 'get_tutorial_list', get_tutorial_list)\n\nadmin_api.add_url_rule('/get_period_choice_list/', 'get_period_choice_list', get_period_choice_list, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/subcat/set_status/', 'set_status', set_subcat_status, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/refresh_qntoken/', 'refresh_qntoken', refresh_qntoken, methods=['POST', 'GET'])\n\n\n\nadmin_api.add_url_rule('/get_apply_list/', 'get_apply_list', get_apply_list, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_apply_detail/', 'get_apply_detail', get_apply_detail, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/apply_reject/', 'apply_reject', apply_reject, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/apply_approve/', 'apply_approve', apply_approve, methods=['POST', 'GET'])\n\n\nadmin_api.add_url_rule('/set_item_status/', 'set_item_status', set_item_status, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/recommend_item/', 'recommend_item', recommend_item, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/recommend_hospital/', 'recommend_hospital', recommend_hospital, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/recommend_subcat/', 'recommend_subcat', recommend_subcat, methods=['POST', 'GET'])\n\n\nadmin_api.add_url_rule('/get_user_list', 'get_user_list', get_user_list)\nadmin_api.add_url_rule('/get_user_detail', 'get_user_detail', get_user_detail)\n\nadmin_api.add_url_rule('/get_school_city_list/', 'get_school_city_list', get_school_city_list)\nadmin_api.add_url_rule('/get_advice_list/', 'get_advice_list', get_advice_list)\nadmin_api.add_url_rule('/get_advice_detail/', 'get_advice_detail', get_advice_detail)\n\nadmin_api.add_url_rule('/upload_image/', 'upload_image', upload_image, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/verify_chsi/', 'verify_chsi', verify_chsi, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/set_chsi_captcha/', 'set_chsi_captcha', set_chsi_captcha, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/refresh_chsi_captcha/', 'refresh_chsi_captcha', refresh_captcha, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/refund_order/', 'refund_order', admin_refund_order, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/get_activity_list/', 'get_activity_list', get_activity_list, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_activity_items/', 'get_activity_items', get_activity_items, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/set_activity_items/', 'set_activity_items', set_activity_items, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_activity/', 'get_activity', get_activity, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/top_recommend_item/', 'top_recommend_item', top_recommend_item, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/top_recommend_subcat/', 'top_recommend_subcat', top_recommend_subcat, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/get_item_recommend/', 'get_item_recommend', get_item_recommend, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_hospital_recommend/', 'get_hospital_recommend', get_hospital_recommend, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/get_subcat_recommend/', 'get_subcat_recommend', get_subcat_recommend, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/get_item_activity/', 'get_item_activity', get_item_activity, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/item_recommend_edit/<int:item_id>/', 'item_recommend_edit', item_recommend_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/item_activity_edit/<int:item_id>/', 'item_activity_edit', item_activity_edit, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/subcat_recommend_edit/<int:item_id>/', 'subcat_recommend_edit', subcat_recommend_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/hospital_recommend_edit/<int:item_id>/', 'hospital_recommend_edit', hospital_recommend_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/set_recommend_order/', 'set_recommend_order', set_recommend_order, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/set_recommend_subcat_order/', 'set_recommend_subcat_order', set_recommend_subcat_order, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/set_recommend_hospital_order/', 'set_recommend_hospital_order', set_recommend_hospital_order, methods=['POST', 'GET'])\n\n\nadmin_api.add_url_rule('/new_period_pay_choice/', 'new_period_pay_choice', new_period_pay_choice, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/get_period_pay_log_list/', 'get_period_pay_log_list', get_period_pay_log_list, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/del_item_activity/', 'del_item_activity', del_item_activity, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/get_refund_detail/', 'get_refund_detail', get_refund_detail)\n\nadmin_api.add_url_rule('/get_coupon_list/', 'get_coupon_list', get_coupon_list)\n\nadmin_api.add_url_rule('/coupon_edit/<int:item_id>/', 'coupon_edit', coupon_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/new_coupon/', 'new_coupon', coupon_edit, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/get_coupon/', 'get_coupon', get_coupon)\n\nadmin_api.add_url_rule('/get_trial_list/', 'get_trial_list', get_trial_list)\n\nadmin_api.add_url_rule('/new_trial/', 'new_trial', trial_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/edit_trial/<int:item_id>/', 'trial_edit', trial_edit, methods=['POST', 'GET'])\n\n\nadmin_api.add_url_rule('/get_trial/', 'get_trial', get_trial)\n\nadmin_api.add_url_rule('/trial_applyer_list/', 'trial_applyer_list', trial_applyer_list)\n\nadmin_api.add_url_rule('/send_trial/', 'send_trial', send_trial, methods=['POST','GET'])\n\nadmin_api.add_url_rule('/set_trial_order/', 'set_trial_order', set_trial_order, methods=['POST','GET'])\n\nadmin_api.add_url_rule('/get_promoter_list/', 'get_promoter_list', get_promoter_list)\n\nadmin_api.add_url_rule('/add_promoter/', 'add_promoter', add_promoter, methods=['POST','GET'])\n\n\nadmin_api.add_url_rule('/get_hospital_user_list/', 'get_hospital_user_list', get_hospital_user_list)\n\nadmin_api.add_url_rule('/add_hospital_admin/', 'add_hospital_admin', add_hospital_admin, methods=['POST','GET'])\n\nadmin_api.add_url_rule('/to_supply/', 'to_supply', to_supply, methods=['POST','GET'])\n\nadmin_api.add_url_rule('/supply_apply/', 'supply_apply', supply_apply, methods=['POST','GET'])\n\nadmin_api.add_url_rule('/set_hospital_status/', 'set_hospital_status', set_hospital_status, methods=['POST','GET'])\n\nadmin_api.add_url_rule('/get_daily_coupon_list/', 'get_daily_coupon_list', get_daily_coupon_list, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/daily_coupon_edit/<int:item_id>/', 'daily_coupon_edit', daily_coupon_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/new_daily_coupon/', 'new_daily_coupon', daily_coupon_edit, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_daily_coupon/', 'get_daily_coupon', get_daily_coupon, methods=['POST', 'GET'])\nadmin_api.add_url_rule('/get_tutorial/', 'get_tutorial', get_tutorial)\n\nadmin_api.add_url_rule('/set_tutorial_status/', 'set_tutorial_status', set_tutorial_status, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/get_user_vcode/', 'get_user_vcode', get_user_vcode, methods=['GET', 'POST'])\n\nadmin_api.add_url_rule('/reset_user_vcode/', 'reset_user_vcode', reset_user_vcode_sent, methods=['GET', 'POST'])\n\nadmin_api.add_url_rule('/send_user_coupon/', 'send_user_coupon', send_user_coupon, methods=['POST'])\n\nadmin_api.add_url_rule('/set_cats_order/', 'set_cats_order', set_cats_order, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/change_city/', 'set_city', set_city, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/set_question_status/', 'set_question_status', set_question_status, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/get_question_list/', 'get_question_list', get_question_list, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/new_question/', 'new_question', new_question, methods=['POST', 'GET'])\n\nadmin_api.add_url_rule('/get_user_question_list/', 'get_user_question_list', get_user_question_list, methods=['POST','GET'])\n\nadmin_api.add_url_rule('/get_room_list/', 'get_room_list', get_room_list, methods=['POST','GET'])\n\nadmin_api.add_url_rule('/get_room_detail/', 'get_room_detail', get_room_detail, methods=['POST','GET'])\n\n\nfrom admin.views import remark_order\nfrom admin.views import remark_apply\nfrom admin.views import remark_useradvice\n\nadmin_api.add_url_rule('/remark_order/', 'remark_order', remark_order, methods=['POST','GET'])\nadmin_api.add_url_rule('/remark_apply/', 'remark_apply', remark_apply, methods=['POST','GET'])\nadmin_api.add_url_rule('/remark_useradvice/', 'remark_useradvice', remark_useradvice, methods=['POST','GET'])\n\n\n\n\n" }, { "alpha_fraction": 0.6281052827835083, "alphanum_fraction": 0.6503522396087646, "avg_line_length": 38.08695602416992, "blob_id": "0ee6cc956d79abe9beca474d480b025e111c7135", "content_id": "071849769317798375fd56b2a017f7d9b0631e97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2697, "license_type": "no_license", "max_line_length": 65, "num_lines": 69, "path": "/migrations/versions/4db11f101899_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4db11f101899\nRevises: 4d043b343761\nCreate Date: 2015-12-09 15:21:29.065236\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4db11f101899'\ndown_revision = '4d043b343761'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('trial',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=300), nullable=True),\n sa.Column('image', sa.String(length=300), nullable=True),\n sa.Column('cat', sa.Integer(), nullable=True),\n sa.Column('item_id', sa.Integer(), nullable=True),\n sa.Column('total', sa.Integer(), nullable=True),\n sa.Column('sent', sa.Integer(), nullable=True),\n sa.Column('apply_count', sa.Integer(), nullable=True),\n sa.Column('rules', sa.Text(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.Column('end_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['item_id'], ['item.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('trial_apply',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('trial_id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=True),\n sa.Column('phone', sa.String(length=30), nullable=True),\n sa.Column('school', sa.String(length=100), nullable=True),\n sa.Column('addr', sa.String(length=100), nullable=True),\n sa.Column('content', sa.String(length=1000), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['trial_id'], ['trial.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('trial_comment',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('trial_id', sa.Integer(), nullable=False),\n sa.Column('photos', sa.String(length=1000), nullable=True),\n sa.Column('content', sa.String(length=10000), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['trial_id'], ['trial.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('trial_comment')\n op.drop_table('trial_apply')\n op.drop_table('trial')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5334815979003906, "alphanum_fraction": 0.5460835099220276, "avg_line_length": 29.41353416442871, "blob_id": "2331c0b1aefa44a34c4e9682f1aabcb698f98fd0", "content_id": "6374578579e04df8c6c94ed821a35ce8823fdcc8", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 8368, "license_type": "no_license", "max_line_length": 181, "num_lines": 266, "path": "/thirdparty/wechat.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport time\nimport urllib\nimport urllib2\nimport hashlib\n\nfrom flask import request\nimport weixin.client\nWechatAuthAPI = weixin.client.WeixinAPI\n\nfrom wechat_sdk import WechatBasic\nfrom settings import WECHAT_APP_ID\nfrom settings import WECHAT_APP_SECRET\nfrom settings import WECHAT_APP_TOKEN\nfrom settings import SERVER_NAME\nfrom celery.contrib.methods import task_method\nfrom celery import current_app\n\nclass WechatInfo(WechatBasic):\n\n def set_cache(self, cache):\n self.cache = cache\n\n @property\n def access_token(self):\n token = self.cache.get()\n if not token:\n self.refresh_wechat_token()\n token = self.cache.get()\n return token\n\n @current_app.task(filter=task_method)\n def refresh_wechat_token(self):\n ''' 比较特殊 tasks的参数第一个bound了self '''\n data = self.grant_token()\n access_token = data[\"access_token\"]\n expires_in = data[\"expires_in\"]\n self.cache.set(access_token, expires_in-60)\n\n @property\n def jsapi_ticket(self):\n self._check_appid_appsecret()\n \n if getattr(self, '__jsapi_ticket', None):\n now = time.time()\n if self.__jsapi_ticket_expires_at - now > 60:\n return self.__jsapi_ticket\n else:\n self.grant_jsapi_ticket()\n data = self.grant_jsapi_ticket()\n return self.__jsapi_ticket\n\n def grant_jsapi_ticket(self, override=True):\n \"\"\"\n 获取 Jsapi Ticket\n 详情请参考 http://mp.weixin.qq.com/wiki/7/aaa137b55fb2e0456bf8dd9148dd613f.html#.E9.99.84.E5.BD.951-JS-SDK.E4.BD.BF.E7.94.A8.E6.9D.83.E9.99.90.E7.AD.BE.E5.90.8D.E7.AE.97.E6.B3.95\n :param override: 是否在获取的同时覆盖已有 jsapi_ticket (默认为True)\n :return: 返回的 JSON 数据包\n :raise HTTPError: 微信api http 请求失败\n \"\"\"\n self._check_appid_appsecret()\n # force to grant new access_token to avoid invalid credential issue\n response_json = self._get(\n url=\"https://api.weixin.qq.com/cgi-bin/ticket/getticket\",\n params={\n \"access_token\": self.access_token,\n \"type\": \"jsapi\",\n }\n )\n if override:\n self.__jsapi_ticket = response_json['ticket']\n self.__jsapi_ticket_expires_at = int(time.time()) + response_json['expires_in']\n return response_json\n\nwechat = WechatInfo(token=WECHAT_APP_TOKEN, appid=WECHAT_APP_ID, appsecret=WECHAT_APP_SECRET)\n\n\nmenu_data = {\n \"button\":[\n {\n \"type\": \"view\",\n \"name\": u\"分期整形\",\n \"url\": \"http://{}/user/index\".format(SERVER_NAME),\n },\n {\n \"type\": \"view\",\n \"name\": u\"寝室大赛\",\n \"url\": \"http://{}/static/user/Activities/home.html\".format(SERVER_NAME),\n },\n {\n \"name\": u\"更多\",\n \"sub_button\":[\n {\n \"type\":\"view\",\n \"name\":u\"我\",\n \"url\": \"http://{}/static/user/my-not-reg.html\".format(SERVER_NAME),\n },\n {\n \"type\":\"view\",\n \"name\":u\"下载APP\",\n \"url\": \"http://{}/static/user/downLoad.html\".format(SERVER_NAME),\n },\n {\n \"type\":\"view\",\n \"name\":u\"帮助中心\",\n \"url\": \"http://{}/user/help.html\".format(SERVER_NAME),\n },\n {\n \"type\": \"click\",\n \"name\": u\"联系客服\",\n \"key\": \"contact_us\",\n },\n {\n \"type\":\"view\",\n \"name\":u\"医院入口\",\n \"url\": \"http://{}/hospital/\".format(SERVER_NAME),\n },\n ]\n }\n ]}\n\n\ndef create_menu():\n ''' 创建公众号菜单 '''\n return wechat.create_menu(menu_data)\n\n\n\n\nREDIRECT_URI = 'http://{}/user/'.format(SERVER_NAME)\n\n\nAuthApi = WechatAuthAPI(appid=WECHAT_APP_ID,\n app_secret=WECHAT_APP_SECRET,\n redirect_uri=REDIRECT_URI)\n\n\ndef get_user_snsapi_base_url(redirecturi='http://{}/user/auth'.format(SERVER_NAME), state='STATE'):\n '''返回snsapi_base静默登录url '''\n link = ('''\nhttps://open.weixin.qq.com/connect/oauth2/authorize?appid=%s&redirect_uri=%s&response_type=code&scope=snsapi_base&state=%s#wechat_redirect\n'''%(WECHAT_APP_ID, urllib.quote_plus(redirecturi), state)).strip()\n return link\n\ndef exchange_code_for_token(code):\n ''' 通过微信oauth静默登录认证回调的code参数 获取 access_token openid\n 返回值是一个字典 包含access_token, openid\n '''\n return AuthApi.exchange_code_for_access_token(code=code)\n\n\ndef get_user_info(access_token, openid):\n ''' 通过code获取的access_token及open_id获取oauth授权登录用户信息 '''\n auth_api = WechatAuthAPI(access_token=access_token)\n user = auth_api.user(openid=openid)\n return user\n \n \n\ndef create_link_str(params):\n result = ''\n for i in sorted(params.keys()):\n result += i+'='+params[i]+'&'\n return result.rstrip('&')\nfrom util.utils import random_str\ndef gen_noncestr():\n return random_str(10)\n\ndef get_sign(path, timestamp, noncestr):\n data = dict(\n jsapi_ticket = wechat.jsapi_ticket,\n noncestr= noncestr,\n timestamp= timestamp,\n url = path,\n )\n m = hashlib.sha1()\n m.update(create_link_str(data))\n print create_link_str(data)\n return m.hexdigest()\n\n\ndef get_jssdk_context(link=None):\n ''' js sdk 参数 '''\n try:\n noncestr = gen_noncestr()\n timestamp= str(time.time())\n sign = get_sign(link or request.url, timestamp, noncestr)\n \n context = {\n 'domain': SERVER_NAME,\n 'appid': WECHAT_APP_ID,\n 'noncestr': noncestr,\n 'timestamp': timestamp,\n 'sign': sign,\n }\n \n return context\n except:\n import traceback\n traceback.print_exc()\n print 'jssdk error'\n return {}\n\n\n\ndef create_qrcode(scene_id):\n ''' 创建二维码ticket'''\n data = {\n \"action_name\": \"QR_LIMIT_SCENE\",\n \"action_info\": {\"scene\": {\"scene_id\": scene_id}}\n }\n a = wechat.create_qrcode(data)\n ticket = a['ticket']\n print ticket, 'ticket'\n return ticket, wechat.show_qrcode(ticket)\n\nimport json\nimport requests\ndef send_group_mnews(open_ids, media_id):\n ''' 群发消息 '''\n link = 'https://api.weixin.qq.com/cgi-bin/message/mass/send?access_token={}'.format(wechat.access_token)\n data = {\n \"touser\": open_ids,\n \"mpnews\":{\n \"media_id\":\"123dsdajkasd231jhksad\"\n },\n \"msgtype\":\"mpnews\"\n }\n response= requests.post(link, data=json.dumps(data))\n return response\n\n\ndef create_article():\n link = 'https://api.weixin.qq.com/cgi-bin/media/uploadnews?access_token={}'.format(wechat.access_token)\n article = {\n \"articles\": [\n {\n \"thumb_media_id\":'vtZ1MJiazhv0FicHsbhOicw7fRKPbKDQtH85oERG82aia2Eicn5BlEoyYZff6KXTgN8X3gYWVeRVx1ZR7bMmhIU7JgQ',\n \"author\":\"xxx\",\n \"title\":\"Happy Day\",\n \"content_source_url\":\"www.qq.com\",\n \"content\":\"content\",\n \"digest\":\"digest\",\n \"show_cover_pic\":\"1\"\n },\n {\n \"thumb_media_id\":'vtZ1MJiazhv0FicHsbhOicw7fRKPbKDQtH85oERG82aia2Eicn5BlEoyYZff6KXTgN8X3gYWVeRVx1ZR7bMmhIU7JgQ',\n \"author\":\"xxx\",\n \"title\":\"Happy Day\",\n \"content_source_url\":\"www.qq.com\",\n \"content\":\"content\",\n \"digest\":\"digest\",\n \"show_cover_pic\":\"0\"\n }\n ]\n }\n response= requests.post(link, data=json.dumps(article))\n print response.text\n return response\n\n\ndef upload_image():\n ''' '''\n link = 'https://api.weixin.qq.com/cgi-bin/media/uploadimg?access_token={}'.format(wechat.access_token)\n files = {'media': open('/tmp/meifenfen/static/user/img/logo.png', 'rb')}\n return requests.post(link, files=files)\n\n\n\n\n" }, { "alpha_fraction": 0.4985664188861847, "alphanum_fraction": 0.5195922255516052, "avg_line_length": 25.44725799560547, "blob_id": "e29d86b60fc90443aa0265d1c9b3eca0104d6bce", "content_id": "6b2580b140b6646d55d2d19f3aef36df50ee7ad6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6320, "license_type": "no_license", "max_line_length": 100, "num_lines": 237, "path": "/util/validators.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport re\nimport json\nimport urllib2\n\nfrom util.utils import union_dict\n\n\n\nphoneprefix = [\n '130','131','132','133','134','135','136','137','138','139',\n '150','151','152','153','154','155','156','157','158','159',\n '170','171','172','173','174','175','176','177','178','179',\n '180','181','182','183','184','185','186','187','188','189'\n ]\nphone_prefix_pattern = re.compile('^(%s)\\d{8}$' % '|'.join(phoneprefix))\nfake_phone = re.compile(r'1000000(\\d){4}')\ndef valid_phone(phone):\n return (bool(phone_prefix_pattern.search(phone)) or bool(fake_phone.search(phone))) and phone\n\n\n\ndef Optional(field):\n field.optional = True\n return field\n\n\nclass Field(object):\n ''' '''\n def __init__(self, msg=None, **kw):\n self.optional = False\n self.msg = msg\n\n def validate(self, data):\n is_valid = False\n result = '请实现此方法'\n return is_valid, result\n\n\nclass TextField(Field):\n\n def __init__(self, min_length=None, max_length=None, **kw):\n super(TextField, self).__init__(**kw)\n self.min_length = min_length;\n self.max_length = max_length\n\n def validate(self, data):\n is_valid = isinstance(data, (str, unicode)) and \\\n (self.min_length<=len(data) if self.min_length is not None else True) and \\\n (self.max_length>=len(data) if self.max_length is not None else True)\n result = data\n return is_valid, result\n\n\nclass IntChoiceField(Field):\n\n def __init__(self, choices=None, **kw):\n super(IntChoiceField, self).__init__(**kw)\n self.choices = choices or set()\n\n def validate(self, data):\n is_valid = str(str(data) or '').isdigit() and \\\n int(data) in self.choices\n result = int(data) if is_valid else None\n return is_valid, result\n\n\nclass BoolChoiceField(Field):\n\n def __init__(self, choices=None, **kw):\n super(BoolChoiceField, self).__init__(**kw)\n self.choices = choices or set()\n\n def validate(self, data):\n is_valid = True\n result = bool(data)\n return is_valid, result\n\n\nclass BoolIntChoiceField(Field):\n\n def __init__(self, **kw):\n super(BoolIntChoiceField, self).__init__(**kw)\n\n def validate(self, data):\n is_valid = True\n try:\n data = bool(int(data))\n except:\n is_valid = False\n return is_valid, data\n\n\n\nclass ChoiceField(Field):\n\n def __init__(self, choices=None, **kw):\n super(ChoiceField, self).__init__(**kw)\n self.choices = choices or set()\n\n def validate(self, data):\n result = None\n is_valid = False\n try:\n if data in self.choices:\n is_valid = True\n result = data\n except Exception as e:\n print str(e)\n\n return is_valid, result\n\n\nclass IntChoicesField(Field):\n\n def __init__(self, choices=None, all=False, **kw):\n super(IntChoicesField, self).__init__(**kw)\n self.choices = choices or set()\n\n def validate(self, data):\n result = None\n is_valid = False\n try:\n result = map(int, data)\n if not all:\n is_valid = all(int(i) in self.choices for i in result) if self.choices else False\n else:\n is_valid = True\n except Exception as e:\n print str(e)\n\n return is_valid, result\n\n\nclass MobileField(Field):\n\n def validate(self, data):\n is_valid = valid_phone(data)\n result = data\n return is_valid, result\n\n\nclass FloatField(Field):\n\n def validate(self, data):\n is_valid = False\n result = None\n try:\n result = float(data)\n is_valid = True\n except Exception as e:\n pass\n return is_valid, result\n\n\nclass IntField(Field):\n\n def validate(self, data):\n is_valid = False\n result = None\n try:\n result = int(data)\n is_valid = True\n except Exception as e:\n pass\n return is_valid, result\n\n\nclass JsonField(Field):\n\n def validate(self, data):\n is_valid = False\n result = None\n try:\n result = json.loads(urllib2.unquote(data)) if data else []\n is_valid = True\n except Exception as e:\n pass\n return is_valid, result\n\n\n\nclass IdField(Field):\n ''' 数据库ID字段 '''\n def validate(self, data):\n is_valid = str(str(data) or '').isdigit()\n result = int(data) if is_valid else None\n return is_valid, result\n\n\nclass REGField(Field):\n def __init__(self, pattern=None, **kw):\n self.pattern = pattern\n super(REGField, self).__init__(**kw)\n\n def validate(self, value):\n try:\n valid = False\n p = re.compile(self.pattern)\n self.p = p\n valid = bool(p.search(str(value)))\n except:\n import traceback\n traceback.print_exc()\n return valid, value\n else:\n return valid, value\n\n\nNOT_EXIST = object()\nclass Inputs(object):\n\n def __init__(self, schema):\n self._schema = schema\n\n def validate(self, raw_data):\n self._raw_data = raw_data\n\n result = {}\n self._fields_err = {}\n for name, validator in self._schema.items():\n print name; print validator\n val = self._raw_data.get(name, NOT_EXIST)\n data = None\n err = ''\n print val is NOT_EXIST, val, validator.optional, 'optional'\n if val is NOT_EXIST:\n if not validator.optional:\n err = '缺少字段{}'.format(name)\n else:\n is_valid, data = validator.validate(val)\n if not is_valid: err = validator.msg or '{}字段格式错误'.format(name)\n if err: self._fields_err[name] = err\n result[name] = data\n\n err_str = '\\n'.join(self._fields_err.values()) \n return err_str, result\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6272878646850586, "alphanum_fraction": 0.6722129583358765, "avg_line_length": 22.115385055541992, "blob_id": "fd094683143fb99a4a2e0d0b5d59d99f2799634a", "content_id": "1bcbfb43252a13da36ec9311c50ecb20e1ab3b4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 601, "license_type": "no_license", "max_line_length": 81, "num_lines": 26, "path": "/migrations/versions/1c198b1a91cb_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 1c198b1a91cb\nRevises: 15e92c9ccee8\nCreate Date: 2015-12-14 14:13:25.240489\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1c198b1a91cb'\ndown_revision = '15e92c9ccee8'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('trial', sa.Column('start_time', sa.DateTime(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('trial', 'start_time')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6284658312797546, "alphanum_fraction": 0.6802217960357666, "avg_line_length": 29.91428565979004, "blob_id": "38aa42d1bf115a3f613723ecb44e57e6319a211c", "content_id": "57cfb0e5d9eb14a80a707a5f604a468335aa8c5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1082, "license_type": "no_license", "max_line_length": 101, "num_lines": 35, "path": "/migrations/versions/3bed4aa05b5a_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3bed4aa05b5a\nRevises: 4593874013ba\nCreate Date: 2015-12-05 17:49:05.566697\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3bed4aa05b5a'\ndown_revision = '4593874013ba'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('wechat_location',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('open_id', sa.String(length=50), nullable=True),\n sa.Column('lng', sa.String(length=50), nullable=True),\n sa.Column('lat', sa.String(length=50), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_wechat_location_open_id'), 'wechat_location', ['open_id'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_wechat_location_open_id'), table_name='wechat_location')\n op.drop_table('wechat_location')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.525486409664154, "alphanum_fraction": 0.5321011543273926, "avg_line_length": 35.390071868896484, "blob_id": "15c6b4f44eb9eb5c849e5e1b5495a8c2cad42ed6", "content_id": "63953da61b7d67d2f6f00571b04a526823a250ad", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 5368, "license_type": "no_license", "max_line_length": 99, "num_lines": 141, "path": "/thirdparty/views.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import request\nfrom flask import Blueprint, render_template, abort\nfrom jinja2 import TemplateNotFound\nfrom thirdparty.wechat import wechat\nfrom settings import CONTACT\nfrom ops.promote import PromoteService\n\n\nserver_verify = Blueprint('server_verify', __name__,\n template_folder='templates')\n\nmeifenfen_open_ids = set([\n\"o56qvw-ThtwfthGGlZ-XbH-3fjRc\",\n\"o56qvw-OyM2NRJ6jHQDxsOXdF0Pc\",\n\"o56qvw3FG1tt39dw4i8L0SrpBFCQ\",\n\"o56qvw2lAC5eXqIa87o35EYvtxJw\",\n\"o56qvw0fwSQlUMl4ChyafE4ajKjM\",\n\"o56qvw63JsGh6Lz2BU5cUEYlZNAw\",\n\"o56qvw-cLgV-CLxVyKU3P-zJ0aJk\",\n\"o56qvw-hxHtmnZ8bNGYSxFihTjRk\",\n\"o56qvw-hFbnnIMzQA3ArpsYNRylE\",\n\"o56qvwy77LP82ZWZ8q5Gy-ebCOeU\",\n\"o56qvw0PnwLCK7Okesvhkc7d6UGA\",\n\"o56qvwy2plGL6LeBY-gzFtn6_Yis\",\n\"o56qvwzrgKsuO28J7PKymLChJrSY\",\n\"o56qvwwzTB80JXYJYnqKsEO-vXqE\",\n\"o56qvw4oaWtk600BtO1Tsa6BbAQY\",\n\"o56qvw2buPRaEWJ1TdKLn-HxqyBo\",\n\"o56qvw2u0B7NcHfKseEDy-oDK1bI\",\n\"o56qvw3ppto7QGZq96W5zd4p58YQ\",\n\"o56qvwxvcD7ddq1GoEr0XNyVAyYs\",\n\"o56qvw9XQZ2-JATmeVcdMNveGJzk\"\n])\n\ndef check_signature():\n try:\n signature = request.args['signature']\n timestamp = request.args['timestamp']\n nonce = request.args['nonce']\n echostr = request.args.get('echostr') or 'success'\n if wechat.check_signature(signature=signature, timestamp=timestamp, nonce=nonce):\n return echostr\n except:\n import traceback\n traceback.print_exc()\n return ''\n\nindex_link= '''http://www.meifenfen.com/user/index/'''\n\nq_link= '''http://www.meifenfen.com/static/user/Activities/home.html'''\nWELCOME_MSG = '''这位小主,欢迎关注美分分\n\n美分分,为你加分!\n“微整形,分期付,美丽加分0负担 ” \n<a href='{}'>→点此查看详情</a>\n\n上海大学生寝室设计大赛火热进行中,\n2000元现金大奖,坐等你来抢\n<a href='{}'>→点此报名领百元大礼包</a>\n'''.format(index_link, q_link)\n\nWELCOME_MSG = '''<a href='http://www.meifenfen.com/static/user/login.html'>美分分,为你加分!</a>\n'''\n\n'''\nhttp://omgitsmgp.com/2012/10/13/notes-on-flask/\nIf a URL has a trailing slash, accessing it without one will automatically redirect;\nif a URL doesn’t have a trailing slash, accessing it with one will generate a 404.\n'''\n@server_verify.route('', methods=['POST','GET'])\ndef wechat_verify():\n try:\n if request.method == 'GET':\n echostr = check_signature()\n if echostr: return echostr\n return 'error'\n elif request.method == 'POST':\n if check_signature():\n print request.form, 'form'\n print(request.data), 'data'\n wechat.parse_data(request.data)\n message = wechat.get_message()\n response = None\n print message.type;\n print message.__dict__\n if message.type=='click' and message.key=='contact_us':\n return wechat.response_text(u'客服电话:{}'.format(CONTACT))\n if message.type=='subscribe':\n #if message.key: #扫码关注\n key = None\n if message.key:\n key = message.key.split('_')[-1]\n log_id = PromoteService.log_qr_user(key, message.source)\n if not log_id:\n PromoteService.set_wechat_status(message.source, 1)\n print log_id, key,'-------qrcode sub'\n if key:\n qrcode = PromoteService.get_qrcode(key)\n if qrcode:\n if log_id: PromoteService.incr_promote_follow_count(qrcode.promoter_id)\n else:\n print 'qrcode not exist'\n PromoteService.set_user_sex.delay(message.source)\n if message.source in meifenfen_open_ids:\n return wechat.response_text(WELCOME_MSG) \n return wechat.response_text('')\n if message.type=='unsubscribe':\n PromoteService.set_wechat_status(message.source, 0)\n qrcode_user = PromoteService.get_qrcodeuser_by_open_id(message.source)\n if qrcode_user:\n qrcode = PromoteService.get_qrcode(qrcode_user.qrcode_id)\n if qrcode:\n PromoteService.incr_promote_unfollow_count(qrcode.promoter_id)\n return wechat.response_text('')\n if message.type == 'text':\n if message.content == 'wechat':\n response = wechat.response_text(u'哈哈')\n else:\n response = wechat.response_text(u'?')\n elif message.type == 'image':\n response = wechat.response_text(u'图片')\n elif message.type == 'location':\n print message.longitude, message.latitude #经纬度\n open_id = message.source\n lng = message.longitude\n lat = message.latitude\n is_first = not PromoteService.get_first_location(open_id)\n PromoteService.log_wechat_location(open_id, lng, lat)\n if is_first: PromoteService.set_first_location(open_id, lng, lat)\n response = wechat.response_text(u'地理位置')\n else:\n response = wechat.response_text(u'未知')\n return ''\n else:\n return ''\n except Exception as e:\n import traceback\n traceback.print_exc()\n return ''\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.3377750813961029, "alphanum_fraction": 0.4059736132621765, "avg_line_length": 27.08705711364746, "blob_id": "d606c1e3e47858a293b10498562af4043537b19b", "content_id": "1d165078472d1a455259e88c1b53de6bbc75cb81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 125805, "license_type": "no_license", "max_line_length": 2125, "num_lines": 3917, "path": "/static/doc.html", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "<html>\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n\n<style>\n.api-index {\n font-size: 30px;\n}\n.api-wrap > a {\n color: blue;\n text-decoration: none;\n display: block;\n margin: 10px;\n}\n\n.api-wrap{\n overflow-y: scroll;\n position: fixed;\n width: 20%;\n height: 100%;\n float: left;\n padding-bottom: 30px;\n}\n.api-content{\n width: 77%;\n float: right;\n}\n.api-content > a > pre {\n padding: 10px;\n}\n.doc-api-entry.fold {\n max-height: 100px;\n overflow: hidden;\n padding-bottom: 10px;\n border-bottom: 1px dotted gray;\n cursor: pointer;\n}\n.doc-api-entry.unfold {\n}\n\n.doc-api-entry:hover {\n background: #E4F7F1 !important;\n}\n</style>\n\n\n<div class='api-wrap'>\n\n <div class='api-index'>\n </div>\n <a href='#link1'>1. 美分分首页</a>\n <a href='#link2'>2. 项目分类</a>\n <a href='#link3'>3. 项目列表</a>\n <a href='#link4'>4. 项目详情</a>\n <a href='#link5'>5. 分类筛选</a>\n <a href='#link6'>6. 获取注册验证码</a>\n <a href='#link7'>7. 获取重置密码验证码</a>\n <a href='#link8'>8. 注册</a>\n <a href='#link9'>9. 登录</a>\n <a href='#link10'>10. 退出</a>\n <a href='#link11'>11. 重置密码</a>\n <a href='#link12'>12. 个人页面</a>\n <a href='#link13'>13. 修改名字</a>\n <a href='#link14'>14. 上传图片</a>\n <a href='#link15'>15. 城市选择列表</a>\n <a href='#link16'>16. 项目评论列表</a>\n <a href='#link17'>17. 医院详情</a>\n <a href='#link18'>18. 订单预览</a>\n <a href='#link19'>19. 确认下单</a>\n <a href='#link20'>20. 支付选择接口</a>\n <a href='#link21'>21. 微信支付参数</a>\n <a href='#link22'>22. 支付宝支付参数</a>\n <a href='#link23'>23. 提交评价</a>\n <a href='#link25'>25. 我的心愿单</a>\n <a href='#link26'>26. 加入心愿单</a>\n <a href='#link27'>27. 医院项目列表</a>\n <a href='#link28'>28. 额度申请结果</a>\n <a href='#link29'>29. 提交额度申请</a>\n <a href='#link30'>30. 订单列表</a>\n <a href='#link31'>31. 订单详情</a>\n <a href='#link32'>32. 额度申请文本</a>\n <a href='#link33'>33. 我的优惠券列表</a>\n <a href='#link34'>34. 取消订单</a>\n <a href='#link35'>35. 账单列表</a>\n <a href='#link36'>36. 还款历史</a>\n <a href='#link37'>37. 完成订单</a>\n <a href='#link38'>38. 订单还款列表</a>\n <a href='#link39'>39. 选择还款</a>\n <a href='#link40'>40. 通知列表</a>\n <a href='#link41'>41. 标记通知已读</a>\n <a href='#link44'>44. 支付宝还款</a>\n <a href='#link45'>45. 微信还款</a>\n <a href='#link46'>46. 还款方式选择</a>\n <a href='#link47'>47. 推荐商品</a>\n <a href='#link48'>48. 意见反馈</a>\n <a href='#link49'>49. 检查更新</a>\n <a href='#link50'>50. 上传设备信息</a>\n <a href='#link51'>51. 新版首页</a>\n <a href='#link52'>52. 美攻略列表</a>\n <a href='#link53'>53. 每日优惠券列表</a>\n <a href='#link54'>54. 医院列表</a>\n <a href='#link55'>55. 领取优惠券</a>\n <a href='#link56'>56. 美攻略详情</a>\n <a href='#link57'>57. 医院筛选</a>\n <a href='#link58'>58. 赠送优惠券</a>\n\n</div>\n\n<div class='api-content'>\n\n<a name='link1'>\n<pre class='doc-api-entry fold'>\n1. 首页\n /api/index/ GET\n city_id 存在全局的定位成功的已开通城市的id\n返回:\n{\n \"city\": {\n \"name\": \"上海\",\n \"id\": 1\n },\n \"code\": 0,\n \"recommend_items\": [\n {\n \"width\": 710,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448967430.36\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"进口水光注射深层补水 一次注射等于1千次面膜\",\n \"hospital_id\": 1,\n \"period_money\": 252,\n \"id\": 19,\n \"orig_price\": 6800,\n \"period_count\": 24,\n \"price\": 5000\n },\n \"item_id\": 19,\n \"id\": 7,\n \"desc\": \"该项目通过国际CFDA认证,安全放心\"\n },\n {\n \"width\": 710,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868936.11\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"伊婉玻尿酸打造饱满柔和年轻面部 做更美的自己\",\n \"hospital_id\": 1,\n \"period_money\": 242,\n \"id\": 2,\n \"orig_price\": 5600,\n \"period_count\": 24,\n \"price\": 4800\n },\n \"item_id\": 2,\n \"id\": 2,\n \"desc\": \"该项目纯进口,安全,且效果立显,不影响工作学习\"\n },\n {\n \"width\": 710,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448941657.7\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"非手术瘦脸 肉毒素塑造上镜玲珑小脸\",\n \"hospital_id\": 1,\n \"period_money\": 242,\n \"id\": 5,\n \"orig_price\": 5500,\n \"period_count\": 24,\n \"price\": 4800\n },\n \"item_id\": 5,\n \"id\": 3,\n \"desc\": \"该项目为进口botox瘦脸针,安全见效快\"\n },\n {\n \"width\": 710,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448865835.83\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"埋线双眼皮打造灵动双眸 魅力无限\",\n \"hospital_id\": 1,\n \"period_money\": 115,\n \"id\": 1,\n \"orig_price\": 2200,\n \"period_count\": 18,\n \"price\": 1800\n },\n \"item_id\": 1,\n \"id\": 1,\n \"desc\": \"该项目个性化设计,且损伤小、恢复快、易修复\"\n },\n {\n \"width\": 710,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448941598.53\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"假体隆出惊艳鼻型 告别窝瓜脸塌塌鼻\",\n \"hospital_id\": 2,\n \"period_money\": 235,\n \"id\": 10,\n \"orig_price\": 4500,\n \"period_count\": 18,\n \"price\": 3680\n },\n \"item_id\": 10,\n \"id\": 5,\n \"desc\": \"该项目采用的硅胶假体与鼻腔相容性好,较少排异\"\n }\n ],\n \"activity_items\": [\n {\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448941231.99\",\n \"id\": 2,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"白瓷娃娃改善肤质 \",\n \"hospital_id\": 2,\n \"period_money\": 0,\n \"id\": 7,\n \"orig_price\": 1200,\n \"period_count\": 12,\n \"price\": 0.03\n },\n \"item_id\": 7,\n }\n },\n {\n \"item_id\": 6,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"超声波洗牙\",\n \"hospital_id\": 3,\n \"period_money\": 27,\n \"id\": 6,\n \"orig_price\": 500,\n \"period_count\": 12,\n \"price\": 300\n },\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448941251.18\",\n \"id\": 3\n },\n {\n \"item_id\": 16,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"月光真空脱腋毛 \",\n \"hospital_id\": 2,\n \"period_money\": 91,\n \"id\": 16,\n \"orig_price\": 1400,\n \"period_count\": 12,\n \"price\": 1000\n },\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448941213.95\",\n \"id\": 1\n }\n ],\n \"recommend_sub_cats\": [\n {\n \"image\": \"http://www.meifenfen.com/static/user/img/home-btn1.png\",\n \"id\": 5\n },\n {\n \"image\": \"http://www.meifenfen.com/static/user/img/home-btn2.png\",\n \"id\": 8\n },\n {\n \"image\": \"http://www.meifenfen.com/static/user/img/home-btn3.png\",\n \"id\": 3\n }\n ],\n \"msg\": \"\",\n \"activity\": {\n \"title\": \"限时美折\",\n \"end_time\": \"2016-02-01 09:00:00\",\n \"desc\": \"上线打折\"\n },\n \"city_name\": null,\n \"banners\": [\n {\n \"image\": \"http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a5cd7ihe2aokci1uqdpfk1ivq1banner_01.jpg\",\n \"link\": \"http://139.196.6.231/static/user/banner1.html\",\n 'cat': 2 #1网页 2申请额度\n\n },\n {\n \"image\": \"http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a5cd7ihe2aokci1uqdpfk1ivq1banner_01.jpg\",\n \"link\": \"http://139.196.6.231/static/user/banner1.html\",\n 'cat': 2 #1网页 2申请额度\n },\n {\n \"image\": \"http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a5cd7ihe2aokci1uqdpfk1ivq1banner_01.jpg\",\n \"link\": \"http://139.196.6.231/static/user/banner1.html\",\n 'cat': 2 #1网页 2申请额度\n }\n ],\n \"city_code\": null\n}\n</pre>\n</a>\n\n\n<a name='link2'>\n<pre class='doc-api-entry fold'>\n2. 项目分类\n /api/item_cats/ GET\n\n返回:\n{\n \"data\": [\n {\n \"sub_cats\": [\n {\n \"status\": 1,\n \"name\": \"双眼皮\",\n \"cat_id\": 2,\n \"desc\": \"\",\n \"id\": 5,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868653.71\"\n },\n {\n \"status\": 1,\n \"name\": \"隆鼻\",\n \"cat_id\": 3,\n \"desc\": \"\",\n \"id\": 8,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868616.87\"\n },\n {\n \"status\": 1,\n \"name\": \"祛痘\",\n \"cat_id\": 1,\n \"desc\": \"\",\n \"id\": 3,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868685.91\"\n },\n {\n \"status\": 1,\n \"name\": \"祛斑\",\n \"cat_id\": 1,\n \"desc\": \"\",\n \"id\": 1,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868808.48\"\n },\n {\n \"status\": 1,\n \"name\": \"点痣\",\n \"cat_id\": 1,\n \"desc\": \"\",\n \"id\": 2,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868716.87\"\n },\n {\n \"status\": 1,\n \"name\": \"肉毒素\",\n \"cat_id\": 5,\n \"desc\": \"\",\n \"id\": 11,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868544.83\"\n },\n {\n \"status\": 1,\n \"name\": \"玻尿酸\",\n \"cat_id\": 5,\n \"desc\": \"\",\n \"id\": 10,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868528.36\"\n },\n {\n \"status\": 1,\n \"name\": \"脱毛\",\n \"cat_id\": 4,\n \"desc\": \"\",\n \"id\": 9,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868599.35\"\n },\n {\n \"status\": 1,\n \"name\": \"牙齿美白\",\n \"cat_id\": 6,\n \"desc\": \"\",\n \"id\": 13,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448890154.66\"\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/tuijian_hong.png\",\n \"id\": 0,\n \"name\": \"推荐\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/tuijian_hui.png\"\n },\n {\n \"id\": 1,\n \"sub_cats\": [\n {\n \"status\": 1,\n \"name\": \"祛斑\",\n \"cat_id\": 1,\n \"desc\": \"\",\n \"id\": 1,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868808.48\"\n },\n {\n \"status\": 1,\n \"name\": \"点痣\",\n \"cat_id\": 1,\n \"desc\": \"\",\n \"id\": 2,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868716.87\"\n },\n {\n \"status\": 1,\n \"name\": \"祛痘\",\n \"cat_id\": 1,\n \"desc\": \"\",\n \"id\": 3,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868685.91\"\n },\n {\n \"status\": 1,\n \"name\": \"嫩肤\",\n \"cat_id\": 1,\n \"desc\": \"\",\n \"id\": 4,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868670.32\"\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/pifu_hong.png\",\n \"name\": \"皮肤\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/pifu_hui.png\"\n },\n {\n \"id\": 2,\n \"sub_cats\": [\n {\n \"status\": 1,\n \"name\": \"双眼皮\",\n \"cat_id\": 2,\n \"desc\": \"\",\n \"id\": 5,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868653.71\"\n },\n {\n \"status\": 1,\n \"name\": \"开眼角\",\n \"cat_id\": 2,\n \"desc\": \"\",\n \"id\": 6,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868641.63\"\n },\n {\n \"status\": 1,\n \"name\": \"去眼袋\",\n \"cat_id\": 2,\n \"desc\": \"\",\n \"id\": 7,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868629.15\"\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/yanbu_hong.png\",\n \"name\": \"眼部\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/yanbu_hui.png\"\n },\n {\n \"id\": 3,\n \"sub_cats\": [\n {\n \"status\": 1,\n \"name\": \"隆鼻\",\n \"cat_id\": 3,\n \"desc\": \"\",\n \"id\": 8,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868616.87\"\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/bibu_hong.png\",\n \"name\": \"鼻部\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/bibu_hui.png\"\n },\n {\n \"id\": 4,\n \"sub_cats\": [\n {\n \"status\": 1,\n \"name\": \"脱毛\",\n \"cat_id\": 4,\n \"desc\": \"\",\n \"id\": 9,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868599.35\"\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/maofa_hong.png\",\n \"name\": \"毛发\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/maofa_hui.png\"\n },\n {\n \"id\": 5,\n \"sub_cats\": [\n {\n \"status\": 1,\n \"name\": \"玻尿酸\",\n \"cat_id\": 5,\n \"desc\": \"\",\n \"id\": 10,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868528.36\"\n },\n {\n \"status\": 1,\n \"name\": \"肉毒素\",\n \"cat_id\": 5,\n \"desc\": \"\",\n \"id\": 11,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868544.83\"\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/weizhengxing_hong.png\",\n \"name\": \"微整形\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/weizhengxing_hui.png\"\n },\n {\n \"id\": 6,\n \"sub_cats\": [\n {\n \"status\": 1,\n \"name\": \"洗牙\",\n \"cat_id\": 6,\n \"desc\": \"\",\n \"id\": 12,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448883145.29\"\n },\n {\n \"status\": 1,\n \"name\": \"牙齿美白\",\n \"cat_id\": 6,\n \"desc\": \"\",\n \"id\": 13,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448890154.66\"\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/yachi_hong.png\",\n \"name\": \"口腔\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/yachi_hui.png\"\n },\n {\n \"id\": 7,\n \"sub_cats\": [],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/xingti_hong.png\",\n \"name\": \"形体\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/xingti_hui.png\"\n }\n ]\n}\n</pre>\n</a>\n\n\n<a name='link3'>\n<pre class='doc-api-entry fold'>\n3. 项目列表\n /api/item_list/ GET\n 参数:\n sub_cat_id: 子分类id\n sort_type: 排序类型 1综合 2销量 3低价 4高价优先\n city_id: 城市id\n offset 上次接口返回值, 首次传空字符串或者不传\n\n返回:\n{\n \"msg\": \"\",\n \"has_more\": true,\n \"infos\": [\n {\n \"has_fee\": true,\n \"title\": \"超声波洗牙\",\n \"period_money\": 27,\n \"price\": 300,\n \"orig_price\": 500,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448890069.99\",\n \"period_count\": 12,\n \"sold_count\": 0,\n \"hospital\": {\n \"name\": \"上海天大医疗美容医院\",\n \"id\": 3\n },\n \"id\": 6\n },\n {\n \"has_fee\": true,\n \"title\": \"韩式三点双眼皮 告别眯眯眼 双眼带电做女神\",\n \"period_money\": 243,\n \"price\": 3800,\n \"orig_price\": 4500,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448884720.47\",\n \"period_count\": 18,\n \"sold_count\": 0,\n \"hospital\": {\n \"name\": \"上海真爱医疗美容医院\",\n \"id\": 2\n },\n \"id\": 9\n }\n ],\n \"code\": 0,\n \"offset\": \"9_\"\n}\n</pre>\n</a>\n\n\n<a name='link4'>\n<pre class='doc-api-entry fold'>\n4. 项目详情\n /api/item_detail GET\n 参数:\n item_id 项目id\n\n返回:\n{\n \"code\": 0,\n \"verified\": true,\n \"need_pay\": 0,\n \"pay_choices\": [\n {\n \"disabled\": false,\n \"period_total\": 1800,\n \"choice_title\": \"¥1800\",\n \"period_count\": 0,\n \"period_fee\": 0,\n \"need_pay\": 0,\n \"id\": 0,\n \"choice_fee\": \"(直购)\",\n \"period_amount\": 0\n },\n {\n \"fee\": 0.03,\n \"credit_used\": 1854,\n \"need_pay\": 0,\n \"period_money\": 618,\n \"period_total\": 618,\n \"credit\": 10000,\n \"choice_title\": \"¥618x3期\",\n \"disabled\": false,\n \"period_count\": 3,\n \"total\": 1854,\n \"period_fee\": 18,\n \"id\": 1,\n \"choice_fee\": \"(含每期服务费¥18.0)\",\n \"period_amount\": 600\n },\n {\n \"fee\": 0.05,\n \"credit_used\": 1890,\n \"need_pay\": 0,\n \"period_money\": 315,\n \"period_total\": 315,\n \"credit\": 10000,\n \"choice_title\": \"¥315x6期\",\n \"disabled\": false,\n \"period_count\": 6,\n \"total\": 1890,\n \"period_fee\": 15,\n \"id\": 2,\n \"choice_fee\": \"(含每期服务费¥15.0)\",\n \"period_amount\": 300\n },\n {\n \"fee\": 0.102,\n \"credit_used\": 1983.6,\n \"need_pay\": 0,\n \"period_money\": 165.3,\n \"period_total\": 165.3,\n \"credit\": 10000,\n \"choice_title\": \"¥165.3x12期\",\n \"disabled\": false,\n \"period_count\": 12,\n \"total\": 1983.6,\n \"period_fee\": 15.3,\n \"id\": 3,\n \"choice_fee\": \"(含每期服务费¥15.3)\",\n \"period_amount\": 150\n },\n {\n \"fee\": 0.153,\n \"credit_used\": 2075.4,\n \"need_pay\": 0,\n \"period_money\": 115.3,\n \"period_total\": 115.3,\n \"credit\": 10000,\n \"choice_title\": \"¥115.3x18期\",\n \"disabled\": false,\n \"period_count\": 18,\n \"total\": 2075.4,\n \"period_fee\": 15.3,\n \"id\": 5,\n \"choice_fee\": \"(含每期服务费¥15.3)\",\n \"period_amount\": 100\n }\n ],\n 'disable_msg' : '选择分期期数需小于现在到毕业前六个月的月数',\n \"has_fav\": false,\n \"hospital\": {\n \"addr\": \"上海市徐汇区漕宝路111号\",\n \"tag_list\": [\n \"民营美容医院\",\n \" 上市集团直投\",\n \" 国内外专家汇聚\"\n ],\n \"photo_list\": [\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dr9kht5is1hl72v81h2mal8525515082b8f338.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dr9kht5is1hl72v81h2mal8525515085a81bdb.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dr9kht5is1hl72v81h2mal8525515093a35155.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dr9kht5is1hl72v81h2mal852551508785aaa7.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5btdcnp1phi1rnc1dpase6vv5ai红蓝光1.JPG\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5btdcnp1phi1rnc1dpase6vv5aiCO2激光.JPG\"\n ],\n \"working_time\": \"09:00——18:00\",\n \"phone\": \"4006177321\",\n \"long_lat\": \"121.424442,31.167829\",\n \"desc\": \"千年传承 续写美丽传奇\\r\\n千年前的盛世汉唐,未央宫的美人们以黛画眉,蘸丹沙点绛唇,引领古代美容潮流。千年后的美未央,传承传统汉唐美学理念,结合现代医学手段,汇聚中、韩、日、台湾等地整形美容大家,运用国际水平的专业技术与顶级医疗美容服务为高标准的求美者提供专业化、国际化、个性化的系列专属整形美容与健康综合解决方案,续写千年的美丽传奇。\\r\\n上市公司直接投资 明星达人首选医院\\r\\n上海美未央医疗美容医院坐落于上海市徐汇区漕宝路111号,是由上市公司参与投资的全新理念整形美容医院,明星及时尚人士首选医院。美未央下辖整形美容外科、美容皮肤科、美容口腔科三个核心临床科室,为广大求美者提供涵盖整形美容、激光美肤、皮肤养护、注射美容、牙美容和抗衰老、功能医学、境外医疗等全系列医美服务。\\r\\n高品质个性化服务 专注医疗技术效果\\r\\n医院汇聚中、韩、法、日、台湾等地整形美容大家,拥有实力强大的国际级医疗专家团队,国内外最新技术及仪器设备,致力于将中国传统美学与现代医学科技完美融合。应用“尖端技术设备+诊疗手法”为广大求美者提供“时尚+安全”的微整形服务,医院结合美丽与科技,不断创新的医疗美容体系和高品质个性化服务,引领前沿技术。在行业还在批量、流水线式整形时,提倡回归医疗本质,专注医疗技术和效果,为爱美女性提供全方位、个性化的定制医疗美容服务,收到社会各界的广泛赞誉与认可!\\r\\n\",\n \"id\": 1,\n \"name\": \"上海美未央医疗美容整形医院\"\n },\n \"credit_amount\": 10000,\n \"comments\": {\n \"infos\": [\n {\n \"photo_list\": [\n \"http://7xnpdb.com1.z0.glb.clouddn.com/comment/1449645823.29\"\n ],\n \"is_anonymous\": false,\n \"thumb_photo_list\": [\n \"http://7xnpdb.com1.z0.glb.clouddn.com/comment/1449645823.29?imageView2/1/w/200/h/200\"\n ],\n \"content\": \"绝对不是心里作用啊!效果太明显了\",\n \"rate\": 5,\n \"create_time\": \"2015-12-09 15:23:45\",\n \"user\": {\n \"id\": 44,\n \"name\": \"为了青春美\",\n \"avatar\": \"http://7xnpdb.com1.z0.glb.clouddn.com/avatar/1449645488.09\"\n },\n \"item_id\": 7,\n \"id\": 5,\n \"is_re_comment\": false\n }\n ],\n \"total\": 0\n },\n \"item\": {\n \"direct_buy\": true,\n \"photo_list\": [\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5e8s4bf17cgj7v1unaa18tho1操作图.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5e8s4bf17cgj7v1unaa18tho1案例.jpg\"\n ],\n \"has_fee\": true,\n \"title\": \"埋线双眼皮打造灵动双眸 魅力无限\",\n \"hospital_id\": 1,\n \"price\": 1800,\n \"support_choice_list\": [\n 1,\n 2,\n 3,\n 5\n ],\n \"use_time\": \"需提前一天预约\",\n \"note\": \"费用仅包含手术费、血常规、凝血四项麻醉\",\n \"orig_price\": 2200,\n \"id\": 1\n },\n \"msg\": \"\",\n \"can_comment\": false,\n \"total_period_amount\": 1800\n}\n</pre>\n</a>\n\n\n<a name='link5'>\n<pre class='doc-api-entry fold'>\n5. 分类筛选\n /api/item_filters GET\n参数:\n city_id 当前城市id\n sub_cat_id 分类id\n返回:\n{\n \"sort_type_obj\": {\n \"id\": 1,\n \"name\": \"综合排序\"\n },\n \"city\": {\n \"name\": \"上海\",\n \"city_code\": \"289\",\n \"id\": 1\n },\n \"subcat\": {\n \"cat_id\": 2,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868653.71\",\n \"name\": \"双眼皮\",\n \"id\": 5\n },\n \"order_choices\": [\n {\n \"id\": 1,\n \"name\": \"综合排序\"\n },\n {\n \"id\": 2,\n \"name\": \"销量优先\"\n },\n {\n \"id\": 3,\n \"name\": \"低价优先\"\n },\n {\n \"id\": 4,\n \"name\": \"高价优先\"\n }\n ],\n \"citys\": [\n {\n \"name\": \"上海\",\n \"city_code\": \"289\",\n \"id\": 1\n }\n ],\n \"data\": [\n {\n \"sub_cats\": [\n {\n \"cat_id\": 2,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868653.71\",\n \"name\": \"双眼皮\",\n \"id\": 5\n },\n {\n \"cat_id\": 5,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868544.83\",\n \"name\": \"肉毒素\",\n \"id\": 11\n },\n {\n \"cat_id\": 6,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448890154.66\",\n \"name\": \"牙齿美白\",\n \"id\": 13\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/tuijian_hong.png\",\n \"id\": 0,\n \"name\": \"推荐\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/tuijian_hui.png\"\n },\n {\n \"id\": 1,\n \"sub_cats\": [\n {\n \"cat_id\": 1,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868670.32\",\n \"name\": \"嫩肤\",\n \"id\": 4\n },\n {\n \"cat_id\": 1,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448868685.91\",\n \"name\": \"祛痘\",\n \"id\": 3\n },\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/pifu_hong.png\",\n \"name\": \"皮肤\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/pifu_hui.png\"\n },\n ]\n}\n\n</pre>\n</a>\n\n\n<a name='link6'>\n<pre class='doc-api-entry fold'>\n6. 获取注册验证码\n /api/get_reg_vcode/ POST\n 参数:\n phone 手机号\n\n返回:\n{\n \"msg\": \"\",\n \"code\": 0\n}\n或\n{\n \"msg\": \"手机号已存在\",\n \"code\": 10000\n}\n\n</pre>\n</a>\n\n\n<a name='link7'>\n<pre class='doc-api-entry fold'>\n7. 获取重置密码验证码\n /api/get_vcode/ POST\n 参数:\n phone 手机号\n\n返回:\n{\n \"msg\": \"\",\n \"code\": 0\n}\n或\n{\n \"msg\": \"手机号不存在\",\n \"code\": 10000\n}\n\n</pre>\n</a>\n\n\n<a name='link8'>\n<pre class='doc-api-entry fold'>\n8. 注册\n /api/signup_post/ POST\n 参数:\n phone 手机号\n vcode 验证码\n passwd 密码 (md5加密的密码 加密key为meifenfen)\n\n返回:\n{\n \"msg\": \"\",\n \"token\": \"1.4a875d4958fd45d81460b56c548b0a22\", 即为sign_user\n \"code\": 0\n}\n或\n{\n \"msg\": \"验证码错误\",\n \"code\": 10000\n}\n\n</pre>\n</a>\n\n\n<a name='link9'>\n<pre class='doc-api-entry fold'>\n9. 登录\n /api/login_post/ POST\n 参数:\n phone 手机号\n passwd 密码 (md5加密的密码 加密key为meifenfen)\n\n返回:\n{\n \"msg\": \"\",\n \"token\": \"1.4a875d4958fd45d81460b56c548b0a22\", 即为sign_user\n \"code\": 0\n}\n\n</pre>\n</a>\n\n\n<a name='link10'>\n<pre class='doc-api-entry fold'>\n10. 退出\n /api/logout/ GET\n 参数:\n sign_user 登录签名\n\n返回:\n{\n \"msg\": \"\",\n \"code\": 0\n}\n\n</pre>\n</a>\n\n\n\n<a name='link11'>\n<pre class='doc-api-entry fold'>\n11. 重置密码\n /api/reset_passwd/ POST\n 参数:\n phone 手机号\n vcode 短信验证码\n passwd 密码 (md5加密的密码 加密key为meifenfen)\n\n返回:\n{\n \"msg\": \"\",\n \"code\": 0\n}\n或\n{\n \"msg\": \"验证码错误\",\n \"code\": 10000\n}\n</pre>\n</a>\n\n\n<a name='link12'>\n<pre class='doc-api-entry fold'>\n12. 个人页面\n /api/home/ GET\n 参数:\n sign_user 登录签名\n\n返回:\n{\n \"can_edit_name\": false, #能否编辑名字\n \"bill_str\": \"本期应还2000元\", #应还款项\n \"remain\": 10000, #额度剩余\n \"user\": {\n \"id\": 1,\n \"phone\": '18801795295',\n \"avatar\": \"http://7xnpdb.com1.z0.glb.clouddn.com/avatar/1448878641.78\",\n \"name\": \"xianpeng\"\n },\n \"period_to_pay\": 1885.44, #本期应还\n \"has_delayed\": true, #是否逾期\n \"remain_days\": 8, #逾期天数\n \"total\": 10000, #总额度\n \"apply_status\": 1, #0未申请 1申请中 2已通过 3被拒绝\n \"coupon_count\": 17 #优惠券数量\n \"notification_unread\": 0 #未读通知数\n}\n</pre>\n</a>\n\n\n\n<a name='link13'>\n<pre class='doc-api-entry fold'>\n13. 修改名字\n /api/edit_name/ POST\n 参数:\n name 修改后的名字\n sign_user 登录签名\n返回:\n{\n \"code\" : 0,\n \"msg\" : '修改成功'\n}\n或\n{\n \"code\" : 10000,\n \"msg\" : '只能修改一次'\n}\n</pre>\n</a>\n\n\n<a name='link14'>\n<pre class='doc-api-entry fold'>\n14. 上传图片\n /api/upload_image/ POST\n 参数:\n file 图片\n image_cat 图片类型 (avatar, comment)\n sign_user 登录签名\n返回:\n{\n \"msg\": \"上传成功\",\n \"image\": \"avatar/1450922990.77.jpg\",\n \"code\": 0,\n \"fullpath\": \"http://7xnpdb.com1.z0.glb.clouddn.com/avatar/1450922990.77.jpg\"\n}\n或\n{\n \"msg\": \"上传失败\",\n \"code\": 10000,\n}\n</pre>\n</a>\n\n<a name='link15'>\n<pre class='doc-api-entry fold'>\n15. 城市选择列表\n /api/meifenfen_city/ GET\n 参数:\n amap_code 高德地图城市代码\n city_name 城市名字\n返回:\n{\n \"status\": 1, #当前城市状态 1无法定位 2未开通 3已开通\n \"citys\": [ \n {\n \"name\": \"上海\",\n \"city_code\": \"289\",\n \"id\": 1\n }\n ],\n \"current_city\": null #当前城市 定位成功前开通的返回为{'id':1, 'name':'上海'}\n}\n</pre>\n</a>\n\n<a name='link16'>\n<pre class='doc-api-entry fold'>\n16. 项目评论列表 / 我的项目评论列表\n /api/comment_list/ GET\n 参数:\n item_id 项目id\n is_mine 0查看全部评论 1只查看我自己的评论\n sign_user 签名 is_mine为1时必传\n\n返回:\n{\n \"msg\": \"\",\n \"has_more\": false,\n \"hospital\": {\n \"id\": 1,\n \"name\": \"上海美未央医疗美容整形医院\"\n },\n \"item\": {\n \"title\": \"伊婉玻尿酸打造饱满柔和年轻面部 做更美的自己\",\n \"hospital_id\": 1,\n \"price\": 4800,\n \"id\": 2,\n \"orig_price\": 5600,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448867706.82\"\n },\n \"infos\": [\n {\n \"photo_list\": [\n \"http://7xnpdb.com1.z0.glb.clouddn.com/comment/1449645823.29\"\n ],\n \"is_anonymous\": false,\n \"thumb_photo_list\": [ #缩略图\n \"http://7xnpdb.com1.z0.glb.clouddn.com/comment/1449645823.29?imageView2/1/w/200/h/200\"\n ],\n \"content\": \"绝对不是心里作用啊!效果太明显了\",\n \"rate\": 5,\n \"create_time\": \"2015-12-09 15:23:45\",\n \"user\": {\n \"id\": 44,\n \"name\": \"为了青春美\",\n \"avatar\": \"http://7xnpdb.com1.z0.glb.clouddn.com/avatar/1449645488.09\"\n },\n \"item_id\": 7,\n \"id\": 5,\n \"is_re_comment\": false\n }\n ]\n \"code\": 0,\n \"offset\": \"1\"\n}\n</pre>\n</a>\n\n\n<a name='link17'>\n<pre class='doc-api-entry fold'>\n17. 医院详情\n /api/hospital_detail/ GET\n 参数:\n hospital_id 医院id\n\n返回:\n{\n \"msg\": \"\",\n \"hospital\": {\n \"addr\": \"上海市徐汇区漕宝路111号\",\n \"tag_list\": [\n \"民营美容医院\",\n \" 上市集团直投\",\n \" 国内外专家汇聚\"\n ],\n \"photo_list\": [\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dr9kht5is1hl72v81h2mal8525515082b8f338.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dr9kht5is1hl72v81h2mal8525515085a81bdb.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dr9kht5is1hl72v81h2mal8525515093a35155.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dr9kht5is1hl72v81h2mal852551508785aaa7.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5btdcnp1phi1rnc1dpase6vv5ai红蓝光1.JPG\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5btdcnp1phi1rnc1dpase6vv5aiCO2激光.JPG\"\n ],\n \"working_time\": \"09:00——18:00\",\n \"phone\": \"4006177321\",\n \"long_lat\": \"121.424442,31.167829\",\n \"desc\": \"千年传承 续写美丽传奇\\r\\n千年前的盛世汉唐,未央宫的美人们以黛画眉,蘸丹沙点绛唇,引领古代美容潮流。千年后的美未央,传承传统汉唐美学理念,结合现代医学手段,汇聚中、韩、日、台湾等地整形美容大家,运用国际水平的专业技术与顶级医疗美容服务为高标准的求美者提供专业化、国际化、个性化的系列专属整形美容与健康综合解决方案,续写千年的美丽传奇。\\r\\n上市公司直接投资 明星达人首选医院\\r\\n上海美未央医疗美容医院坐落于上海市徐汇区漕宝路111号,是由上市公司参与投资的全新理念整形美容医院,明星及时尚人士首选医院。美未央下辖整形美容外科、美容皮肤科、美容口腔科三个核心临床科室,为广大求美者提供涵盖整形美容、激光美肤、皮肤养护、注射美容、牙美容和抗衰老、功能医学、境外医疗等全系列医美服务。\\r\\n高品质个性化服务 专注医疗技术效果\\r\\n医院汇聚中、韩、法、日、台湾等地整形美容大家,拥有实力强大的国际级医疗专家团队,国内外最新技术及仪器设备,致力于将中国传统美学与现代医学科技完美融合。应用“尖端技术设备+诊疗手法”为广大求美者提供“时尚+安全”的微整形服务,医院结合美丽与科技,不断创新的医疗美容体系和高品质个性化服务,引领前沿技术。在行业还在批量、流水线式整形时,提倡回归医疗本质,专注医疗技术和效果,为爱美女性提供全方位、个性化的定制医疗美容服务,收到社会各界的广泛赞誉与认可!\\r\\n\",\n \"id\": 1,\n \"name\": \"上海美未央医疗美容整形医院\"\n },\n \"code\": 0,\n \"infos\": [\n {\n \"photo_list\": [\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5el5vs9e8gff31vta1bjvgam1f操作图.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5el5vs9e8gff31vta1bjvgam1f案例.jpg\"\n ],\n \"has_fee\": true,\n \"title\": \"瑞蓝玻尿酸注射美容 打造精致立体饱满面容\",\n \"period_money\": 342,\n \"price\": 6800,\n \"orig_price\": 8000,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448969744.72\",\n \"period_count\": 24,\n \"id\": 20\n },\n {\n \"photo_list\": [\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5e90gng1hjf67ehqa1p57guc1k操作图2.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5e90gng1hjf67ehqa1p57guc1k操作图.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5e90gng1hjf67ehqa1p57guc1k案例.jpg\"\n ],\n \"has_fee\": true,\n \"title\": \"硅胶假体隆鼻 搞定立体挺拔美鼻 娇俏鼻型不留痕迹\",\n \"period_money\": 434,\n \"price\": 6800,\n \"orig_price\": 7800,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448870958.6\",\n \"period_count\": 18,\n \"id\": 3\n }\n ]\n}\n</pre>\n</a>\n\n\n\n<a name='link18'>\n<pre class='doc-api-entry fold'>\n18. 订单预览\n /api/order_preview/ GET\n 参数:\n item_id 项目id\n period_choice_id 分期选项id 直购时为0\n coupon_id 优惠券id 选填 不使用代金券传0 \n sign_user 登录签名\n\n注意:\n 顶部《去申请框》显示条件:选择分期购买项目(period_choice_id不为0), 并且credit_status不为1,2\n\n返回:\n{\n \"code\": 0,\n \"coupon_list\": [ #可用优惠券列表\n {\n \"status\": 0,\n \"coupon_cat\": 0,\n \"remark\": \"\",\n \"cat_str\": \"全部适用\",\n \"user_id\": 1,\n \"title\": \"牙齿小类代金券\",\n \"is_trial\": 0,\n \"price\": 100,\n \"coupon_id\": 4,\n \"need\": 0,\n \"create_time\": \"2015-12-29 10:20:32\",\n \"id\": 34,\n \"end_time\": \"2016-01-28 10:20:32\",\n \"remain_str\": \"21天到期\",\n \"item_id\": null,\n \"cat_id\": null,\n \"sub_cat_id\": 13\n },\n {\n \"status\": 0,\n \"coupon_cat\": 0,\n \"remark\": \"\",\n \"cat_str\": \"全部适用\",\n \"user_id\": 1,\n \"title\": \"牙齿小类代金券\",\n \"is_trial\": 0,\n \"price\": 100,\n \"coupon_id\": 4,\n \"need\": 0,\n \"create_time\": \"2015-12-29 10:20:29\",\n \"id\": 33,\n \"end_time\": \"2016-01-28 10:20:29\",\n \"remain_str\": \"21天到期\",\n \"item_id\": null,\n \"cat_id\": null,\n \"sub_cat_id\": 13\n }\n ],\n \"need_pay\": 4800, #需首付金额或直购金额\n \"hospital\": {\n \"name\": \"上海美未央医疗美容整形医院\",\n \"id\": 1\n },\n \"credit_amount\": 0, #总分期金额 包含所有分期手续费\n \"credit_status\": 1, #0未申请 1审核中 2审核通过 3被拒\n \"coupon_id\": 0, #当前优惠券id\n \"period_total\": 0, #分期总额 包含分期手续费\n \"coupon_title\": \"\", #当前优惠券标题\n \"item\": {\n \"direct_buy\": true,\n \"has_fee\": true,\n \"title\": \"伊婉玻尿酸打造饱满柔和年轻面部 做更美的自己\",\n \"hospital_id\": 1,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448867706.82\",\n \"support_choice_list\": [\n 1,\n 2,\n 3,\n 5,\n 6\n ],\n \"price\": 4800,\n \"orig_price\": 5600,\n \"id\": 2,\n \"sub_cat_id\": 10\n },\n \"period_count\": 1, #分期数\n \"msg\": \"\",\n \"coupon_amout\": 0, #优惠券金额\n \"total\": 4800, #订单金额\n \"period_fee\": 0, #每期手续费\n \"credit_amount_can_use\": 0,\n}\n</pre>\n</a>\n\n\n<a name='link19'>\n<pre class='doc-api-entry fold'>\n19. 确认下单\n /api/confirm_order/ POST\n 参数:\n item_id 项目id\n period_choice_id 分期选项id 直购时为0\n coupon_id 优惠券id 选填\n sign_user 登录签名\n返回:\n{\n \"msg\": \"\",\n \"order_id\": 45,\n \"need_pay\": 0 #如果大于0 跳转到支付方式选择页面 如果等于0跳转到支付成功页面\n \"code\": 0\n}\n</pre>\n</a>\n\n<a name='link20'>\n<pre class='doc-api-entry fold'>\n20. 支付选择接口\n /api/order_pay/ GET\n 参数:\n order_id 订单id\n sign_user 登录签名\n\n返回:\n{\n \"msg\": \"\",\n \"pay_choices\": [\n {\n \"id\": 1,\n \"name\": \"支付宝\"\n },\n {\n \"id\": 2,\n \"name\": \"微信\"\n }\n ],\n \"code\": 0,\n \"order_info\": {\n \"refund\": 0,\n \"status\": 2,\n \"credit_amount\": 969.76,\n \"user_id\": 1,\n \"order_no\": \"201512031455236648454158\",\n \"credit_choice_id\": 3,\n \"price\": 0,\n \"user_finished\": false,\n \"coupon_id\": 0,\n \"create_time\": \"2015-12-03 14:55:48\",\n \"coupon_amount\": 0,\n \"credit_verified\": 1,\n \"total_fee\": 89.76,\n \"hospital_id\": 2,\n \"item_id\": 16,\n \"total\": 880,\n \"id\": 36,\n \"transaction_id\": null\n }\n}\n</pre>\n</a>\n\n\n<a name='link21'>\n<pre class='doc-api-entry fold'>\n21. 微信支付参数\n /api/wx_order_pay_action/ GET\n 参数:\n order_id 订单id\n sign_user 登录签名\n\n返回:\n{\n \"msg\": \"\",\n \"code\": 0,\n \"data\": {\n \"packageValue\": \"Sign=WxPay\",\n \"timestamp\": \"1452570119\",\n \"sign\": \"EEDA9D29B4C85CBC86DC56C9271969E3\",\n \"partnerid\": \"1305025101\",\n \"appid\": \"wx1e8901446967b46b\",\n \"prepayid\": \"wx20160112114159ca265c03700864695495\",\n \"noncestr\": \"457nwi2paxtgx9q34fhexci7c4t65sfm\"\n },\n}\n或\n{\n \"msg\": \"调用微信支付失败\",\n \"code\": 10000\n}\n</pre>\n</a>\n\n\n<a name='link22'>\n<pre class='doc-api-entry fold'>\n22. 支付宝支付参数\n /api/alipay_order_pay_action/ GET\n 参数:\n order_id 订单id\n sign_user 登录签名\n\n返回:\n{\n \"msg\": \"\",\n \"code\": 0,\n \"sign_str\": \"_input_charset=\\\"utf-8\\\"&body=\\\"上海真爱医疗美容医院\\\"&notify_url=\\\"http%3A%2F%2F139.196.6.231%2Fuser%2Fapi%2Falipay_notify\\\"&out_trade_no=\\\"201512031455236648454158\\\"&partner=\\\"2088021957827236\\\"&payment_type=\\\"1\\\"&seller_id=\\\"[email protected]\\\"&service=\\\"mobile.securitypay.pay\\\"&subject=\\\"月光真空脱腋毛 \\\"&total_fee=\\\"0.00\\\"&sign=\\\"SU4FCGLu%2BCO1rRPrvuv%2BbOlfslMzvZAWhyYN5X%2F1IoLUwLkogAZFYWN8KYZVeKm6XfrD%2BYKKzmzo7oRhV2Em1fV1VaHDEhNu92OZlNZBr7EXSV2a%2F0jMUvbgu8GEpfGfawRQMxL%2F%2F6xSkg4UBySk62ONWVtAoX2MWeGCmYup1zU%3D\\\"&sign_type=\\\"RSA\\\"\"\n}\n</pre>\n</a>\n\n<a name='link23'>\n<pre class='doc-api-entry fold'>\n23. 提交评价\n /api/comment_post/ POST\n 参数:\n order_id 订单id\n content 评价内容\n rate 评星 1-5数字\n is_anonymous 0不匿名 1匿名\n photos 逗号分隔的字符串 upload_image接口返回的image字段 用逗号拼接起来的 如: \"avatar/1450922990.77.jpg,avatar/1450922990.77.jpg\"\n sign_user 登录签名\n\n返回:\n{\n \"msg\": \"评论成功\",\n \"item_id\": 1,\n \"comment_id\": 28,\n \"code\": 0\n}\n</pre>\n</a>\n\n\n<a name='link25'>\n<pre class='doc-api-entry fold'>\n25. 我的心愿单\n /api/my_favs/ GET\n 参数:\n offset 分页参数\n sign_user 登录签名\n返回:\n{\n \"has_more\": false,\n \"infos\": [\n {\n \"item\": {\n \"title\": \"综合祛痘 彻底告别痘痘 还原平滑美肌\",\n \"period_money\": 61,\n \"id\": 18, #用于跳转的项目id\n \"orig_price\": 980,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448941876.19\",\n \"period_count\": 12,\n \"hospital\": {\n \"name\": \"上海真爱医疗美容医院\",\n \"id\": 2\n },\n \"price\": 680\n },\n \"create_time\": \"2015-12-01 14:44:59\",\n \"user_id\": 1,\n \"id\": 4\n }\n ],\n \"offset\": \"4\" #拉取下页参数\n}\n</pre>\n</a>\n\n\n<a name='link26'>\n<pre class='doc-api-entry fold'>\n26. 加入心愿单\n /api/fav_item/ POST\n 参数:\n item_id 项目id\n status 状态 0移除 1加入\n sign_user 登录签名\n返回:\n{\n \"msg\": \"已从心愿单中移除\",\n \"code\": 0\n}\n</pre>\n</a>\n\n\n<a name='link27'>\n<pre class='doc-api-entry fold'>\n27. 医院项目列表\n /api/hospital_item_list/ GET\n 参数:\n hospital_id 医院id\n offset 上次接口返回值, 首次传空字符串或者不传\n\n返回:\n{\n \"msg\": \"\",\n \"has_more\": true,\n \"infos\": [\n {\n \"has_fee\": true,\n \"title\": \"超声波洗牙\",\n \"period_money\": 27,\n \"price\": 300,\n \"orig_price\": 500,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448890069.99\",\n \"period_count\": 12,\n \"sold_count\": 0,\n \"hospital\": {\n \"name\": \"上海天大医疗美容医院\",\n \"id\": 3\n },\n \"id\": 6\n },\n {\n \"has_fee\": true,\n \"title\": \"韩式三点双眼皮 告别眯眯眼 双眼带电做女神\",\n \"period_money\": 243,\n \"price\": 3800,\n \"orig_price\": 4500,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448884720.47\",\n \"period_count\": 18,\n \"sold_count\": 0,\n \"hospital\": {\n \"name\": \"上海真爱医疗美容医院\",\n \"id\": 2\n },\n \"id\": 9\n }\n ],\n \"code\": 0,\n \"offset\": \"9_\"\n}\n</pre>\n</a>\n\n\n<a name='link28'>\n<pre class='doc-api-entry fold'>\n28. 额度申请结果\n /api/my_apply_result/ GET\n 参数:\n sign_user 用户签名\n\n返回:\n{\n \"msg\": \"\",\n \"predict_time\": \"2016-01-05\", #预计审核通过时间\n \"code\": 0,\n \"has_applied\": true, #true已申请 false未申请\n \"apply\": {\n \"status\": 1, #1审核中 2审核中 3通过 4被拒绝\n \"update_time\": \"2016-01-04 15:52:20\", #通过或被拒绝的时间\n \"reason\": \"\", #被拒绝理由\n \"create_time\": \"2016-01-04 15:52:20\", #提交申请时间\n \"id\": 51\n }\n}\n</pre>\n</a>\n\n\n\n<a name='link29'>\n<pre class='doc-api-entry fold'>\n29. 提交额度申请\n /api/apply_credit_post/ POST\n 参数:\n chsi_name 学信网用户名\n chsi_passwd 学信网密码\n graduate_time 毕业时间 2015-07-01\n parent_contact 父母联系方式\n sign_user 登录签名\n body_choice_ids 逗号分隔的字符串 1,2,3,4,5\n body_choice_text 当选项body_choice_ids包含10时必传其他内容\n返回:\n{\n \"msg\" : \"申请成功\",\n \"code\": 0\n }\n}\n</pre>\n</a>\n\n\n<a name='link30'>\n<pre class='doc-api-entry fold'>\n30. 我的订单列表\n /api/my_orders/ GET\n 参数:\n cat 0全部 1待支付 2待服务 3待评价\n offset 分页参数\n sign_user 登录签名\n\n列表状态对应按钮\n 0 1 去支付 2 预约 3 无4无 5 追加评价 6 去评价 7 重新下单 8查看额度申请进度 9已完成 10重新购买 11额度申请被拒绝\n\nstatus字段含义\n NEW_ORDER = 0 #待支付\n TO_PAY = 1 #待支付\n PAY_SUCCESS = 2 #待服务\n PAY_ERROR = 3 #支付异常 界面不显示任何按钮\n BOOKED = 4 #已预约 \n FINISH = 5 已完成\n TO_COMMENT = 6 #待评论 \n CANCELED = 7 #已取消\n VERIFYING = 8 #审核中\n CONFIRMED = 9 #服务码已确认\n CANCEL_BEFORE_PAY = 10 #支付前取消\n REJECTED = 11 #额度申请被拒 请重新申请\n返回:\n{\n \"msg\": \"\",\n \"has_more\": true,\n \"infos\": [\n {\n \"order_no\": \"201512021731454604211251\",\n \"create_time\": \"2015-12-02 17:31:04\",\n \"period_count\": 1, #分期数\n \"total\": 880, #商品金额\n \"id\": 30,\n \"hospital\": {\n \"phone\": \"02162269000\",\n \"id\": 2,\n \"name\": \"上海真爱医疗美容医院\"\n },\n \"credit_choice_id\": 0, 为0时为直购\n \"status\": 0, #\n \"price\": 780, #首付金额或直购金额\n \"coupon_amount\": 100, #优惠券金额\n \"item_id\": 7, #商品id\n \"status_label\": \"待支付\",\n \"credit_amount\": 0, #分期总额\n \"item\": {\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448883461.5\",\n \"id\": 7,\n \"title\": \"白瓷娃娃改善肤质 \"\n },\n \"period_fee\": 0, #每期手续费\n \"period_amount\": 0 #每期总额含手续费\n }\n ],\n \"code\": 0,\n \"offset\": \"27\"\n}\n</pre>\n</a>\n\n\n<a name='link31'>\n<pre class='doc-api-entry fold'>\n31. 订单详情\n /api/order_detail/ GET\n 参数:\n order_id 订单ID\n sign_user 登录签名\n\n详情状态对应按钮:\n0 1 取消订单 去支付 2 取消订单和预约 3 无按钮 4 取消订单 顶部服务码已预约 5 查看评价 顶部服务码已使用 6 去评价 顶部服务码已使用 7 重新下单 8取消订单 顶部服务码待审核 9取消订单 顶部服务码已确认 10重新购买 顶部无服务码 11取消订单 顶部服务码额度被拒\n \n返回:\n{\n \"service_code\": \"\", #服务码\n \"hospital\": {\n \"name\": \"上海真爱医疗美容医院\",\n \"phone\": \"02162269000\",\n \"long_lat\": \"121.431822,31.213147\",\n \"tag_list\": [\n \"民营二级医疗美容医院\"\n ],\n \"id\": 2,\n \"addr\": \"上海市长宁区延安西路934号\"\n },\n \"order_info\": {\n \"order_no\": \"201512021731454604211251\", #订单号\n \"coupon_id\": 0,\n \"credit_choice_id\": 0, 为0时为直购\n \"create_time\": \"2015-12-02 17:31:04\", #下单时间\n \"period_count\": 0, #分期数\n \"total\": 880, #商品总价\n \"id\": 30,\n \"user_id\": 1,\n \"status\": 0, #含义见订单列表接口\n \"price\": 780, #首付金额或直购金额\n \"coupon_amount\": 100,\n \"item_id\": 7,\n \"status_labbel\": \"待支付\",\n \"credit_amount\": 0, #总分期额度\n \"period_fee\": 0, #分期手续费\n \"period_amount\": 0 #分期月供 不包含服务费\n },\n \"item\": {\n \"title\": \"白瓷娃娃改善肤质 \",\n \"hospital_id\": 2,\n \"price\": 880,\n \"id\": 7,\n \"orig_price\": 1200,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448883461.5\"\n },\n \"cancel_msg\": \"确认取消订单吗\", #点取消按钮式弹框提示文字\n \"service_status\": 0 #0未使用 1已预约 2已使用\n}\n</pre>\n</a>\n\n \n<a name='link32'>\n<pre class='doc-api-entry fold'>\n32. 额度申请菜单文本\n /api/apply_credit/ GET\n \n返回:\n{\n \"msg\": \"\",\n \"code\": 0,\n \"link\": \"https://account.chsi.com.cn/account/preregister.action?from=chsi-home\",\n \"title\": \"\\n学信网(www.chsi.com.cn)是教育部指定的学籍查询唯一网站,为方便核实学籍信息需您提供本人在学信网的账号密码,以加快额度申请审核通过。另,如您还未有账号密码可以前往学信网注册获得,所提供的账号密码美分分会严格保密。\\n\"\n}\n</pre>\n</a>\n\n\n \n<a name='link33'>\n<pre class='doc-api-entry fold'>\n33. 我的优惠券列表\n /api/my_coupons/ GET\n参数:\n cat 1未使用 2已使用 3已过期\n offset 分页参数\n sign_user 登录签名\n\n返回:\n{\n \"has_more\": false,\n \"infos\": [\n {\n \"remark\": \"\",\n \"cat_str\": \"仅限口唇类项目\",\n \"title\": \"牙齿小类代金券\",\n \"price_str\": \"¥100\",\n \"coupon_id\": 4,\n \"id\": 34,\n \"remain_str\": \"18天到期\",\n },\n ],\n \"offset\": \"29\"\n}\n</pre>\n</a>\n\n \n<a name='link34'>\n<pre class='doc-api-entry fold'>\n34. 取消订单\n /api/cancel_order/ POST\n参数:\n order_id 订单id\n sign_user 登录签名\n\n返回:\n{\n \"code\": 0,\n \"msg\" : '取消成功'\n}\n\n</pre>\n</a>\n\n\n<a name='link35'>\n<pre class='doc-api-entry fold'>\n35. 账单列表\n /api/my_period_bill/ POST\n参数:\n cat 1本期 2下期\n sign_user 登录签名\n\n返回:\n{\n 'code': 0,\n 'msg' : ''\n \"repayed\": 1.03, #剩余\n \"title\": \"2016年01月帐单(12.01-12.31)\",\n \"remain\": 0, #已还\n \"deadline\": \"还款日截至2016年02月01日\",\n \"infos\": [\n {\n \"status\": 1, #0待还 1已还 2订单取消\n \"fee\": 0.03, #手续费\n \"user_id\": 1,\n \"punish\": 0, #逾期罚款\n \"total\": 1.03, #总共1.03分期应还钱\n \"delayed\": false,\n \"id\": 31,\n \"delayed_days\": 0, #逾期天数\n \"item\": {\n \"id\": 8,\n \"title\": \"冷光牙齿美白\"\n },\n \"amount\": 1, #金额\n \"create_time_str\": \"2015.12.03\"\n \"deadline\": \"2016-02-01 23:59:59\",\n \"period_pay_index\": 1, 当前期\n \"period_count\": 3, #总期数\n \"repayment_time\": \"2015-12-03 15:26:56\",\n \"order\": {\n \"id\": 34, #订单id 跳转到订单对应账单列表\n }\n }\n ],\n \"total\": 1.03 #总额\n}\n</pre>\n</a>\n\n\n\n<a name='link36'>\n<pre class='doc-api-entry fold'>\n36. 历史还款\n /api/my_repayments/ POST\n参数:\n sign_user 登录签名\n offset 分页参数\n返回:\n{\n 'code': 0,\n 'msg' : ''\n \"has_more\": false,\n \"infos\": [\n {\n \"fee\": 0.03,\n \"total\": 1.03, #总共1.03分期应还钱\n \"user_id\": 1,\n \"punish\": 0,\n \"id\": 32,\n \"item\": {\n \"id\": 8,\n \"title\": \"冷光牙齿美白\"\n },\n \"amount\": 1,\n \"create_time_str\": \"2015.12.03\"\n \"period_pay_index\": 2,\n \"period_count\": 3,\n \"order\": {\n \"id\": 34,\n }\n }\n ],\n \"offset\": \"1_2015-12-12 12:00:00\"\n}\n</pre>\n</a>\n\n\n<a name='link37'>\n<pre class='doc-api-entry fold'>\n37. 完成订单\n /api/finish_order/ POST\n参数:\n order_id 订单id\n sign_user 登录签名\n\n返回:\n{\n \"code\": 0,\n \"msg\" : ''\n}\n</pre>\n</a>\n\n\n<a name='link38'>\n<pre class='doc-api-entry fold'>\n38. 订单还款列表\n /api/my_order_bill/ POST\n参数:\n order_id 订单id\n sign_user 登录签名\n\n返回:\n{\n 'code': 0,\n 'msg' : ''\n \"repayed\": 2.06,\n \"hospital\": {\n \"id\": 3,\n \"name\": \"上海天大医疗美容医院\"\n },\n \"item\": {\n \"price\": 3.09,\n \"orig_price\": 2500,\n \"id\": 8,\n \"image\": 'http://www.meifenfen.com/a.jpg',\n \"title\": \"冷光牙齿美白\"\n },\n \"remain\": 1.03,\n \"infos\": [\n {\n \"status\": 2, #已取消\n \"fee\": 0.03, #分期手续费\n \"user_id\": 1,\n \"punish\": 0, #滞纳金\n \"delayed\": false,\n \"id\": 33,\n \"delayed_days\": 0, #逾期天数\n \"item\": {\n \"id\": 8,\n \"title\": \"冷光牙齿美白\"\n },\n \"amount\": 1, #分期金额 不含分期手续费\n \"create_time\": \"2015-12-03 15:15:56\",\n \"period_pay_index\": 3,\n \"period_count\": 3,\n \"total\": 1.03,\n \"order\": {\n \"id\": 34,\n },\n \"repayment_time\": \"\",\n \"create_time_str\": \"2015.12.03\"\n }\n ],\n \"total\": 3.09\n}\n\n</pre>\n</a>\n\n\n<a name='link39'>\n<pre class='doc-api-entry fold'>\n39. 选择还款\n /api/repayment/ POST\n参数:\n data 还款数据 [{'id':1,'amount':1,'fee':1,'punish': 0}]\n sign_user 登录签名\n\n返回:\n{\n 'code': 0,\n 'msg' : '',\n 'repayment_id': 1 #还款id\n}\n\n</pre>\n</a>\n\n\n<a name='link40'>\n<pre class='doc-api-entry fold'>\n40. 通知列表\n /api/repayment/ GET\n参数:\n offset 分页参数\n sign_user 登录签名\n\n返回:\n{\n \"msg\": \"\",\n \"has_more\": false,\n \"infos\": [\n {\n \"status\": 0,\n \"article\": {\n \"status\": 0,\n \"title\": \"通知消息1测试了\",\n \"image\": \"\",\n \"create_time\": \"2016-01-11 17:17:00\",\n \"link\": \"http://www.meifenfen.com/a.jpg\", #跳转到webview的链接\n \"id\": 1,\n \"desc\": \"\"\n },\n \"create_time\": \"2016-01-11 17:17:22\",\n \"user_id\": 1,\n \"id\": 1\n }\n ],\n \"code\": 0,\n \"offset\": \"1\"\n}\n</pre>\n</a>\n\n\n<a name='link41'>\n<pre class='doc-api-entry fold'>\n41. 标记通知消息已读\n /api/mark_read/ POST\n参数:\n notification_id 通知id\n sign_user 登录签名\n\n返回:\n{\n 'code': 0,\n 'msg' : '标记已读'\n}\n\n</pre>\n</a>\n\n\n<a name='link42'>\n<pre class='doc-api-entry fold'>\n42. 测试支付宝支付\n /api/test_alipay/ POST\n参数:\n order_id 订单id\n sign_user 登录签名\n\n返回:\n{\n \"msg\": \"\",\n \"code\": 0,\n \"sign_str\": \"_input_charset=\\\"utf-8\\\"&body=\\\"美分分支付宝支付2\\\"&notify_url=\\\"http%3A%2F%2F139.196.6.231%2Fuser%2Fapi%2Falipay_notify\\\"&out_trade_no=\\\"201601121142172938391435\\\"&partner=\\\"2088021957827236\\\"&payment_type=\\\"1\\\"&seller_id=\\\"[email protected]\\\"&service=\\\"mobile.securitypay.pay\\\"&subject=\\\"美分分支付宝支付\\\"&total_fee=\\\"0.01\\\"&sign=\\\"mKR5ikC2O0yYPSqWP2Pm4tkvW0Z5CyuEYzqHQp3KbpzxtXCN%2BJsfc2xXaNHnCZloXdi%2FmIBa%2B0B%2FSPDlPBOW9kWsqS%2FESqI%2Fu9huB9A1IHY3a%2FZXIEPchNvLyLwlTHVuWYzYpxEEEEFYqzeB0Jx7WNM0kDmR7xVYJxBq8afslJA%3D\\\"&sign_type=\\\"RSA\\\"\"\n}\n</pre>\n</a>\n\n\n<a name='link43'>\n<pre class='doc-api-entry fold'>\n43. 测试微信app支付接口\n /api/test_wx_app_pay/ POST\n参数:\n order_id 订单id\n sign_user 登录签名\n\n返回:\n{\n \"msg\": \"\",\n \"code\": 0,\n \"data\": {\n \"package\": \"Sign=WxPay\",\n \"timestamp\": \"1452570119\",\n \"sign\": \"EEDA9D29B4C85CBC86DC56C9271969E3\",\n \"partnerid\": \"1305025101\",\n \"appid\": \"wx1e8901446967b46b\",\n \"prepayid\": \"wx20160112114159ca265c03700864695495\",\n \"noncestr\": \"457nwi2paxtgx9q34fhexci7c4t65sfm\"\n },\n}\n</pre>\n</a>\n\n\n<a name='link44'>\n<pre class='doc-api-entry fold'>\n44. 支付宝还款\n /api/alipay_repayment_pay_action/ POST\n参数:\n repayment_id 还款id\n sign_user 登录签名\n\n返回:\n{\n \"msg\": \"\",\n \"code\": 0,\n \"sign_str\": \"_input_charset=\\\"utf-8\\\"&body=\\\"美分分支付宝支付2\\\"&notify_url=\\\"http%3A%2F%2F139.196.6.231%2Fuser%2Fapi%2Falipay_notify\\\"&out_trade_no=\\\"201601121142172938391435\\\"&partner=\\\"2088021957827236\\\"&payment_type=\\\"1\\\"&seller_id=\\\"[email protected]\\\"&service=\\\"mobile.securitypay.pay\\\"&subject=\\\"美分分支付宝支付\\\"&total_fee=\\\"0.01\\\"&sign=\\\"mKR5ikC2O0yYPSqWP2Pm4tkvW0Z5CyuEYzqHQp3KbpzxtXCN%2BJsfc2xXaNHnCZloXdi%2FmIBa%2B0B%2FSPDlPBOW9kWsqS%2FESqI%2Fu9huB9A1IHY3a%2FZXIEPchNvLyLwlTHVuWYzYpxEEEEFYqzeB0Jx7WNM0kDmR7xVYJxBq8afslJA%3D\\\"&sign_type=\\\"RSA\\\"\"\n}\n</pre>\n</a>\n\n\n\n<a name='link45'>\n<pre class='doc-api-entry fold'>\n45. 微信app还款\n /api/wx_repayment_pay_action/ POST\n参数:\n repayment_id 还款id\n sign_user 登录签名\n\n返回:\n{\n \"msg\": \"\",\n \"code\": 0,\n \"data\": {\n \"packageValue\": \"Sign=WxPay\",\n \"timestamp\": \"1452593704\",\n \"sign\": \"5000583F3403B5A77DACF6385563ED59\",\n \"partnerid\": \"1305025101\",\n \"appid\": \"wx1e8901446967b46b\",\n \"prepayid\": \"wx20160112181504c9e29961f00429427939\",\n \"noncestr\": \"yixg3hg5uavn5fhcwtgisz8voi3jb7ff\"\n },\n}\n</pre>\n</a>\n\n\n\n<a name='link46'>\n<pre class='doc-api-entry fold'>\n46. 还款方式选择\n /api/repayment_pay/ POST\n参数:\n repayment_id 还款id\n sign_user 登录签名\n\n返回:\n{\n \"msg\": \"\",\n \"pay_choices\": [\n {\n \"id\": 1,\n \"name\": \"支付宝\"\n },\n {\n \"id\": 2,\n \"name\": \"微信\"\n }\n ],\n \"code\": 0,\n \"repayment\": {\n \"status\": 0,\n \"update_time\": \"2015-11-30 16:26:56\",\n \"order_no\": \"201511301626044655975140\",\n \"pay_method\": 0,\n \"price\": 0.01,\n \"coupon_id\": null,\n \"create_time\": \"2015-11-30 16:26:56\",\n \"data\": \"[{\\\"punish\\\": \\\"0\\\", \\\"amount\\\": \\\"1000\\\", \\\"fee\\\": \\\"50\\\", \\\"id\\\": \\\"1\\\"}]\",\n \"id\": 1,\n \"transaction_id\": null\n }\n}\n</pre>\n</a>\n\n\n<a name='link47'>\n<pre class='doc-api-entry fold'>\n47. 推荐商品\n /api/recommend_item_list/ GET\n参数:\n sign_user 登录签名\n\n返回:\n{\n \"msg\": \"\",\n \"infos\": [\n {\n \"has_fee\": true,\n \"title\": \"白瓷娃娃 让你的皮肤吹弹可破\",\n \"period_money\": 52,\n \"price\": 300,\n \"orig_price\": 800,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450170024.04\",\n \"period_count\": 6,\n \"sold_count\": 0,\n \"hospital\": {\n \"id\": 5,\n \"name\": \"上海韩镜医疗美容医院\"\n },\n \"id\": 34\n },\n {\n \"has_fee\": true,\n \"title\": \"超声波洗牙\",\n \"period_money\": 27,\n \"price\": 300,\n \"orig_price\": 500,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448890069.99\",\n \"period_count\": 12,\n \"sold_count\": 0,\n \"hospital\": {\n \"id\": 3,\n \"name\": \"上海天大医疗美容医院\"\n },\n \"id\": 6\n },\n {\n \"has_fee\": true,\n \"title\": \"综合祛痘 彻底告别痘痘 还原平滑美肌\",\n \"period_money\": 61,\n \"price\": 680,\n \"orig_price\": 980,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448941876.19\",\n \"period_count\": 12,\n \"sold_count\": 0,\n \"hospital\": {\n \"id\": 2,\n \"name\": \"上海真爱医疗美容医院\"\n },\n \"id\": 18\n }\n ],\n \"code\": 0\n}\n</pre>\n</a>\n\n<a name='link48'>\n<pre class='doc-api-entry fold'>\n48. 意见反馈\n /api/advice/ POST\n参数:\n contact 联系方式\n content 反馈内容\n sign_user 登录签名 登录时传 未登录可以不传\n\n返回:\n{\n \"msg\": \"感谢您的反馈\",\n \"code\": 0\n}\n</pre>\n</a>\n\n\n<a name='link49'>\n<pre class='doc-api-entry fold'>\n49. 检查更新\n /api/check_update/ GET\n参数:\n device_type 设备类型 ios android\n\n返回:\n{\n \"code\": 0,\n \"force\": false, # false非强制更新 true强制更新\n \"title\": \"更新标题\",\n \"content\": \"更新内容:\\n1\\n2\\n3\",\n \"version\": 1.1, #当前版本 浮点型 若本地版本小于1.1 弹框提示更新\n \"link\": \"http://file.ws.126.net/3g/client/netease_newsreader_android.apk\",\n \"msg\": \"\"\n}\n</pre>\n</a>\n\n\n<a name='link50'>\n<pre class='doc-api-entry fold'>\n50. 设备信息 (每次应用启动时调此接口)\n /api/upload_device_info/ POST\n参数:(字段类型都为字符串)\n device_type 设备类型 ios android\n device_id 设备id\n app_version 应用当前版本 '1.1'\n device_name 设备型号 'iphone6s'\n os_version 系统版本 '9.2'\n push_token 推送标识 选填 用户打开时传\n返回:\n{\n \"code\": 0,\n \"msg\": \"\"\n}\n</pre>\n</a>\n\n\n\n<a name='link51'>\n<pre class='doc-api-entry fold'>\n51. 新版首页\n /api/meifenfen_new_index/ POST\n参数:\n city_id 城市id 选填\n sign_user 登录状态时传\n返回:\n{\n \"city\": null,\n \"tutorial_tags\": [\n \"原理\",\n \"手法\",\n \"案例\",\n \"大人说\"\n ],\n \"recommend_items\": [\n {\n \"width\": 750,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450664488.69\",\n \"height\": 333,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"韩式半永久眉毛\",\n \"hospital_id\": 4,\n \"period_money\": 181,\n \"id\": 48,\n \"orig_price\": 2540,\n \"period_count\": 12,\n \"price\": 1980\n },\n \"item_id\": 48,\n \"id\": 11,\n \"desc\": \"根根分明、自然流畅、不必为每天早起画眉而烦恼\"\n },\n {\n \"width\": 711,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908541.44\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"进口水光注射深层补水 一次注射等于1千次面膜\",\n \"hospital_id\": 1,\n \"period_money\": 252,\n \"id\": 19,\n \"orig_price\": 6800,\n \"period_count\": 24,\n \"price\": 5000\n },\n \"item_id\": 19,\n \"id\": 7,\n \"desc\": \"该项目通过国际CFDA认证,安全放心\"\n },\n {\n \"width\": 750,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450664969.19\",\n \"height\": 333,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"韩式半永久美瞳线\",\n \"hospital_id\": 4,\n \"period_money\": 154,\n \"id\": 49,\n \"orig_price\": 2200,\n \"period_count\": 12,\n \"price\": 1680\n },\n \"item_id\": 49,\n \"id\": 12,\n \"desc\": \"瞳孔增大术、浓密睫毛、为眼睛增添浓郁神色\"\n },\n {\n \"width\": 711,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908560.23\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"伊婉玻尿酸打造饱满柔和年轻面部 做更美的自己\",\n \"hospital_id\": 1,\n \"period_money\": 242,\n \"id\": 2,\n \"orig_price\": 5600,\n \"period_count\": 24,\n \"price\": 4800\n },\n \"item_id\": 2,\n \"id\": 2,\n \"desc\": \"该项目纯进口,安全,且效果立显,不影响工作学习\"\n },\n {\n \"width\": 750,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450663479.71\",\n \"height\": 333,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"激光祛斑 彻底告别斑女郎\",\n \"hospital_id\": 5,\n \"period_money\": 139,\n \"id\": 35,\n \"orig_price\": 1800,\n \"period_count\": 6,\n \"price\": 800\n },\n \"item_id\": 35,\n \"id\": 8,\n \"desc\": \"该项目是激光透入皮肤作用于色素颗粒,对皮肤无伤害\"\n },\n {\n \"width\": 711,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908584.6\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"非手术瘦脸 肉毒素塑造上镜玲珑小脸\",\n \"hospital_id\": 1,\n \"period_money\": 242,\n \"id\": 5,\n \"orig_price\": 5500,\n \"period_count\": 24,\n \"price\": 4800\n },\n \"item_id\": 5,\n \"id\": 3,\n \"desc\": \"该项目为进口botox瘦脸针,安全见效快\"\n },\n {\n \"width\": 750,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450665060.6\",\n \"height\": 333,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"韩式半永久韩式花瓣唇\",\n \"hospital_id\": 4,\n \"period_money\": 204,\n \"id\": 50,\n \"orig_price\": 5000,\n \"period_count\": 18,\n \"price\": 3200\n },\n \"item_id\": 50,\n \"id\": 13,\n \"desc\": \"不花妆、不用卸妆、永远保持好气色\"\n },\n {\n \"width\": 750,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450663883.51\",\n \"height\": 333,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"自体脂肪填充 抚平面部凹陷细纹 拥有年轻美颜\",\n \"hospital_id\": 4,\n \"period_money\": 109,\n \"id\": 43,\n \"orig_price\": 2400,\n \"period_count\": 12,\n \"price\": 1199\n },\n \"item_id\": 43,\n \"id\": 10,\n \"desc\": \"改变脸型、改变五官、童颜不是梦\"\n },\n {\n \"width\": 711,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908633.03\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"埋线双眼皮打造灵动双眸 魅力无限\",\n \"hospital_id\": 1,\n \"period_money\": 115,\n \"id\": 1,\n \"orig_price\": 2200,\n \"period_count\": 18,\n \"price\": 1800\n },\n \"item_id\": 1,\n \"id\": 1,\n \"desc\": \"该项目个性化设计,且损伤小、恢复快、易修复\"\n },\n {\n \"width\": 711,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908649.5\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"假体隆出惊艳鼻型 告别窝瓜脸塌塌鼻\",\n \"hospital_id\": 2,\n \"period_money\": 235,\n \"id\": 10,\n \"orig_price\": 4500,\n \"period_count\": 18,\n \"price\": 3680\n },\n \"item_id\": 10,\n \"id\": 5,\n \"desc\": \"该项目采用的硅胶假体与鼻腔相容性好,较少排异\"\n }\n ],\n \"activity_items\": [\n {\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448941231.99\",\n \"id\": 2,\n \"price\": 880,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"白瓷娃娃改善肤质 \",\n \"hospital_id\": 2,\n \"period_money\": 80,\n \"id\": 7,\n \"orig_price\": 1200,\n \"period_count\": 12,\n \"price\": 880\n },\n \"item_id\": 7,\n \"hospital\": {\n \"status\": 1,\n \"tag_list\": [\n \"民营二级医疗美容医院\"\n ],\n \"lat\": \"31.213147\",\n \"addr\": \"上海市长宁区延安西路934号\",\n \"tags\": \"民营二级医疗美容医院\",\n \"city_id\": 1,\n \"image\": \"\",\n \"working_time\": \"9:00——17:00\",\n \"phone\": \"02162269000\",\n \"name\": \"上海真爱医疗美容医院\",\n \"photos\": \"o_1a5dogr6mm7c1vd5ue91lkk1j8p282-150302103049516.jpg,o_1a5dogr6mm7c1vd5ue91lkk1j8p282-15030210312aZ.jpg,o_1a5dogr6mm7c1vd5ue91lkk1j8p282-15030210315aZ.jpg,o_1a5dogr6mm7c1vd5ue91lkk1j8p282-1503021031202R.jpg,o_1a5dogr6mm7c1vd5ue91lkk1j8p28zl-04.jpg,o_1a5dogr6mm7c1vd5ue91lkk1j8p28sss-01.jpg\",\n \"rate\": 5,\n \"sold_count\": 0,\n \"long_lat\": \"121.431822,31.213147\",\n \"lng\": \"121.431822\",\n \"desc\": \"出身名门,尊贵之享\\r\\n上海真爱医疗美容医院,前身是境内首家专科医院——上海妇孺医院,源自1935,是专业为女性服务的医院典范。2004年,上海真爱医疗美容医院成立,隶属于上海中医药大学附属曙光医院(三甲),不仅始终传承着1935年的经典,更对自身的服务品质不懈追求,始终站在中国女性需求的最前端。\\r\\n涅槃重生,突破升级\\r\\n2014年,上海真爱医疗美容医院华丽转身、专注整形。真爱整形科全线升级,引进世界级医疗设备和器材,多位顶级专家加盟真爱,开创了亚洲品质医美时代、引领高端医美行业的全新突破。\\r\\n贵宾礼遇,皇室尊享\\r\\n真爱整形有着曾经对待妇科患者的服务意识基础的存在,“用心服务,用爱塑美”的服务理念本质上区别于其他整形医院。视顾客为亲人,真情相待,是我们给您的全城关爱;一心一意,我们将倾尽所能,为您缔造美丽传奇。\",\n \"id\": 2,\n \"photo_list\": [\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dogr6mm7c1vd5ue91lkk1j8p282-150302103049516.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dogr6mm7c1vd5ue91lkk1j8p282-15030210312aZ.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dogr6mm7c1vd5ue91lkk1j8p282-15030210315aZ.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dogr6mm7c1vd5ue91lkk1j8p282-1503021031202R.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dogr6mm7c1vd5ue91lkk1j8p28zl-04.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dogr6mm7c1vd5ue91lkk1j8p28sss-01.jpg\"\n ]\n }\n },\n {\n \"item_id\": 6,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"超声波洗牙\",\n \"hospital_id\": 3,\n \"period_money\": 27,\n \"id\": 6,\n \"orig_price\": 500,\n \"period_count\": 12,\n \"price\": 300\n },\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448941251.18\",\n \"price\": 300,\n \"id\": 3\n },\n {\n \"item_id\": 16,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"月光真空脱腋毛 \",\n \"hospital_id\": 2,\n \"period_money\": 91,\n \"id\": 16,\n \"orig_price\": 1400,\n \"period_count\": 12,\n \"price\": 1000\n },\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448941213.95\",\n \"price\": 1000,\n \"id\": 1\n }\n ],\n \"recommend_sub_cats\": [\n {\n \"image\": \"http://www.meifenfen.com/static/user/img/home-btn1.png\",\n \"id\": 5\n },\n {\n \"image\": \"http://www.meifenfen.com/static/user/img/home-btn2.png\",\n \"id\": 8\n },\n {\n \"image\": \"http://www.meifenfen.com/static/user/img/home-btn3.png\",\n \"id\": 3\n }\n ],\n \"recommend_hospitals\": [],\n \"tutorials\": [\n {\n \"status\": 0,\n \"view_count\": 0,\n \"title\": \"shenme \",\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452830940.5\",\n \"create_time\": \"1.15\",\n \"id\": 14,\n },\n {\n \"status\": 0,\n \"view_count\": 0,\n \"title\": \"神呢\",\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452830741.64\",\n \"create_time\": \"1.15\",\n \"id\": 13,\n }\n ],\n \"code\": 0,\n \"activity\": {\n \"title\": \"限时美折\",\n \"city_id\": 1,\n \"start_time\": \"2015-11-30 21:00:00\",\n \"create_time\": \"2015-11-30 21:56:31\",\n \"end_time\": \"2016-03-25 09:00:00\",\n \"id\": 1,\n \"desc\": \"上线打折\"\n },\n \"msg\": \"\",\n {\n \"city\": null,\n \"tutorial_tags\": [\n \"原理\",\n \"手法\",\n \"案例\",\n \"大人说\"\n ],\n \"recommend_items\": [\n {\n \"width\": 750,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450664488.69\",\n \"height\": 333,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"韩式半永久眉毛\",\n \"hospital_id\": 4,\n \"period_money\": 181,\n \"id\": 48,\n \"orig_price\": 2540,\n \"period_count\": 12,\n \"price\": 1980\n },\n \"item_id\": 48,\n \"id\": 11,\n \"desc\": \"根根分明、自然流畅、不必为每天早起画眉而烦恼\"\n },\n {\n \"width\": 711,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908541.44\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"进口水光注射深层补水 一次注射等于1千次面膜\",\n \"hospital_id\": 1,\n \"period_money\": 252,\n \"id\": 19,\n \"orig_price\": 6800,\n \"period_count\": 24,\n \"price\": 5000\n },\n \"item_id\": 19,\n \"id\": 7,\n \"desc\": \"该项目通过国际CFDA认证,安全放心\"\n },\n {\n \"width\": 750,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450664969.19\",\n \"height\": 333,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"韩式半永久美瞳线\",\n \"hospital_id\": 4,\n \"period_money\": 154,\n \"id\": 49,\n \"orig_price\": 2200,\n \"period_count\": 12,\n \"price\": 1680\n },\n \"item_id\": 49,\n \"id\": 12,\n \"desc\": \"瞳孔增大术、浓密睫毛、为眼睛增添浓郁神色\"\n },\n {\n \"width\": 711,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908560.23\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"伊婉玻尿酸打造饱满柔和年轻面部 做更美的自己\",\n \"hospital_id\": 1,\n \"period_money\": 242,\n \"id\": 2,\n \"orig_price\": 5600,\n \"period_count\": 24,\n \"price\": 4800\n },\n \"item_id\": 2,\n \"id\": 2,\n \"desc\": \"该项目纯进口,安全,且效果立显,不影响工作学习\"\n },\n {\n \"width\": 750,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450663479.71\",\n \"height\": 333,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"激光祛斑 彻底告别斑女郎\",\n \"hospital_id\": 5,\n \"period_money\": 139,\n \"id\": 35,\n \"orig_price\": 1800,\n \"period_count\": 6,\n \"price\": 800\n },\n \"item_id\": 35,\n \"id\": 8,\n \"desc\": \"该项目是激光透入皮肤作用于色素颗粒,对皮肤无伤害\"\n },\n {\n \"width\": 711,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908584.6\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"非手术瘦脸 肉毒素塑造上镜玲珑小脸\",\n \"hospital_id\": 1,\n \"period_money\": 242,\n \"id\": 5,\n \"orig_price\": 5500,\n \"period_count\": 24,\n \"price\": 4800\n },\n \"item_id\": 5,\n \"id\": 3,\n \"desc\": \"该项目为进口botox瘦脸针,安全见效快\"\n },\n {\n \"width\": 750,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450665060.6\",\n \"height\": 333,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"韩式半永久韩式花瓣唇\",\n \"hospital_id\": 4,\n \"period_money\": 204,\n \"id\": 50,\n \"orig_price\": 5000,\n \"period_count\": 18,\n \"price\": 3200\n },\n \"item_id\": 50,\n \"id\": 13,\n \"desc\": \"不花妆、不用卸妆、永远保持好气色\"\n },\n {\n \"width\": 750,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450663883.51\",\n \"height\": 333,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"自体脂肪填充 抚平面部凹陷细纹 拥有年轻美颜\",\n \"hospital_id\": 4,\n \"period_money\": 109,\n \"id\": 43,\n \"orig_price\": 2400,\n \"period_count\": 12,\n \"price\": 1199\n },\n \"item_id\": 43,\n \"id\": 10,\n \"desc\": \"改变脸型、改变五官、童颜不是梦\"\n },\n {\n \"width\": 711,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908633.03\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"埋线双眼皮打造灵动双眸 魅力无限\",\n \"hospital_id\": 1,\n \"period_money\": 115,\n \"id\": 1,\n \"orig_price\": 2200,\n \"period_count\": 18,\n \"price\": 1800\n },\n \"item_id\": 1,\n \"id\": 1,\n \"desc\": \"该项目个性化设计,且损伤小、恢复快、易修复\"\n },\n {\n \"width\": 711,\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908649.5\",\n \"height\": 269,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"假体隆出惊艳鼻型 告别窝瓜脸塌塌鼻\",\n \"hospital_id\": 2,\n \"period_money\": 235,\n \"id\": 10,\n \"orig_price\": 4500,\n \"period_count\": 18,\n \"price\": 3680\n },\n \"item_id\": 10,\n \"id\": 5,\n \"desc\": \"该项目采用的硅胶假体与鼻腔相容性好,较少排异\"\n }\n ],\n \"activity_items\": [\n {\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448941231.99\",\n \"id\": 2,\n \"price\": 880,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"白瓷娃娃改善肤质 \",\n \"hospital_id\": 2,\n \"period_money\": 80,\n \"id\": 7,\n \"orig_price\": 1200,\n \"period_count\": 12,\n \"price\": 880\n },\n \"item_id\": 7,\n \"hospital\": {\n \"status\": 1,\n \"tag_list\": [\n \"民营二级医疗美容医院\"\n ],\n \"lat\": \"31.213147\",\n \"addr\": \"上海市长宁区延安西路934号\",\n \"tags\": \"民营二级医疗美容医院\",\n \"city_id\": 1,\n \"image\": \"\",\n \"working_time\": \"9:00——17:00\",\n \"phone\": \"02162269000\",\n \"name\": \"上海真爱医疗美容医院\",\n \"photos\": \"o_1a5dogr6mm7c1vd5ue91lkk1j8p282-150302103049516.jpg,o_1a5dogr6mm7c1vd5ue91lkk1j8p282-15030210312aZ.jpg,o_1a5dogr6mm7c1vd5ue91lkk1j8p282-15030210315aZ.jpg,o_1a5dogr6mm7c1vd5ue91lkk1j8p282-1503021031202R.jpg,o_1a5dogr6mm7c1vd5ue91lkk1j8p28zl-04.jpg,o_1a5dogr6mm7c1vd5ue91lkk1j8p28sss-01.jpg\",\n \"rate\": 5,\n \"sold_count\": 0,\n \"long_lat\": \"121.431822,31.213147\",\n \"lng\": \"121.431822\",\n \"desc\": \"出身名门,尊贵之享\\r\\n上海真爱医疗美容医院,前身是境内首家专科医院——上海妇孺医院,源自1935,是专业为女性服务的医院典范。2004年,上海真爱医疗美容医院成立,隶属于上海中医药大学附属曙光医院(三甲),不仅始终传承着1935年的经典,更对自身的服务品质不懈追求,始终站在中国女性需求的最前端。\\r\\n涅槃重生,突破升级\\r\\n2014年,上海真爱医疗美容医院华丽转身、专注整形。真爱整形科全线升级,引进世界级医疗设备和器材,多位顶级专家加盟真爱,开创了亚洲品质医美时代、引领高端医美行业的全新突破。\\r\\n贵宾礼遇,皇室尊享\\r\\n真爱整形有着曾经对待妇科患者的服务意识基础的存在,“用心服务,用爱塑美”的服务理念本质上区别于其他整形医院。视顾客为亲人,真情相待,是我们给您的全城关爱;一心一意,我们将倾尽所能,为您缔造美丽传奇。\",\n \"id\": 2,\n \"photo_list\": [\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dogr6mm7c1vd5ue91lkk1j8p282-150302103049516.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dogr6mm7c1vd5ue91lkk1j8p282-15030210312aZ.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dogr6mm7c1vd5ue91lkk1j8p282-15030210315aZ.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dogr6mm7c1vd5ue91lkk1j8p282-1503021031202R.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dogr6mm7c1vd5ue91lkk1j8p28zl-04.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5dogr6mm7c1vd5ue91lkk1j8p28sss-01.jpg\"\n ]\n }\n },\n {\n \"item_id\": 6,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"超声波洗牙\",\n \"hospital_id\": 3,\n \"period_money\": 27,\n \"id\": 6,\n \"orig_price\": 500,\n \"period_count\": 12,\n \"price\": 300\n },\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448941251.18\",\n \"price\": 300,\n \"id\": 3\n },\n {\n \"item_id\": 16,\n \"item\": {\n \"has_fee\": true,\n \"title\": \"月光真空脱腋毛 \",\n \"hospital_id\": 2,\n \"period_money\": 91,\n \"id\": 16,\n \"orig_price\": 1400,\n \"period_count\": 12,\n \"price\": 1000\n },\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448941213.95\",\n \"price\": 1000,\n \"id\": 1\n }\n ],\n \"recommend_sub_cats\": [\n {\n \"image\": \"http://www.meifenfen.com/static/user/img/home-btn1.png\",\n \"id\": 5\n },\n {\n \"image\": \"http://www.meifenfen.com/static/user/img/home-btn2.png\",\n \"id\": 8\n },\n {\n \"image\": \"http://www.meifenfen.com/static/user/img/home-btn3.png\",\n \"id\": 3\n }\n ],\n \"recommend_hospitals\": [],\n \"tutorials\": [\n {\n \"status\": 0,\n \"view_count\": 0,\n \"title\": \"shenme \",\n \"items\": \"1\",\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452830940.5\",\n \"create_time\": \"1.15\",\n \"photo\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452830945.98\",\n \"item_id_list\": [\n 1\n ],\n \"id\": 14,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452830943.48\"\n },\n {\n \"status\": 0,\n \"view_count\": 0,\n \"title\": \"神呢\",\n \"items\": \"1\",\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452830741.64\",\n \"create_time\": \"1.15\",\n \"photo\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452830749.41\",\n \"item_id_list\": [\n 1\n ],\n \"id\": 13,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452830744.99\"\n }\n ],\n \"code\": 0,\n \"activity\": {\n \"title\": \"限时美折\",\n \"city_id\": 1,\n \"start_time\": \"2015-11-30 21:00:00\",\n \"create_time\": \"2015-11-30 21:56:31\",\n \"end_time\": \"2016-03-25 09:00:00\",\n \"id\": 1,\n \"desc\": \"上线打折\"\n },\n \"msg\": \"\",\n \"banners\": [\n {\n \"image\": \"http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a5cd7ihe2aokci1uqdpfk1ivq1banner_01.jpg\",\n \"link\": \"http://139.196.6.231/static/user/banner1.html\",\n 'cat': 2 #1网页 2申请额度\n\n },\n {\n \"image\": \"http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a5cd7ihe2aokci1uqdpfk1ivq1banner_01.jpg\",\n \"link\": \"http://139.196.6.231/static/user/banner1.html\",\n 'cat': 2 #1网页 2申请额度\n },\n {\n \"image\": \"http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a5cd7ihe2aokci1uqdpfk1ivq1banner_01.jpg\",\n \"link\": \"http://139.196.6.231/static/user/banner1.html\",\n 'cat': 2 #1网页 2申请额度\n }\n ],\n}}\n\n</pre>\n</a>\n\n\n\n<a name='link52'>\n<pre class='doc-api-entry fold'>\n52. 美攻略列表\n /api/mei_tutorials/ POST\n 参数:\n cat 1最新 2最热 3最热\n \n offset 选填\n返回:\n{\n \"code\": 0,\n \"has_more\": false,\n \"cat\": 1,\n \"offset\": \"1\",\n \"msg\": \"\",\n \"infos\": [\n {\n \"status\": 1,\n \"view_count\": 6,\n \"title\": \"0012\",\n \"items\": \"1,2\",\n \"create_time\": \"1-6\",\n \"item_id_list\": [\n 1,\n 2\n ],\n \"id\": 2,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452149708.12\", #列表icon\n },\n {\n \"status\": 1,\n \"view_count\": 4,\n \"title\": \"1000\",\n \"items\": \"1\",\n \"create_time\": \"12-31\",\n \"item_id_list\": [\n 1\n ],\n \"id\": 1,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452149708.12\",\n }\n ]\n}\n\n</pre>\n</a>\n\n\n<a name='link53'>\n<pre class='doc-api-entry fold'>\n53. 每日优惠券列表\n /api/daily_coupons/ POST\n\n返回:\n{\n \"msg\": \"\",\n \"has_more\": false,\n \"infos\": [\n {\n \"note\": \"ahahh\",\n \"infos\": [\n {\n \"use_condition\": \"😄满300可用\",\n \"title\": \"ahahh\",\n \"coupon\": {\n \"coupon_cat\": 3,\n \"remark\": \"\",\n \"effective\": 5184000,\n \"title\": \"综合祛痘代金券\",\n \"effective_days\": 60,\n \"cat_id\": null,\n \"price\": 0,\n \"need\": 0,\n \"need_cat\": 2,\n \"item_id\": 18,\n \"is_trial\": 1,\n \"id\": 9,\n \"sub_cat_id\": null\n },\n \"start_time\": \"2015-12-30 10:36:00\",\n \"use_time\": \"2.22-4.22\",\n \"remain\": 0,\n \"create_time\": \"2016-01-08 10:36:14\",\n \"use_time_start\": \"2016-02-22 16:08:00\",\n \"end_time\": \"2016-02-26 00:00:00\",\n \"id\": 4,\n \"total\": 0,\n \"use_time_end\": \"2016-04-22 16:08:00\",\n \"has_received\": false,\n \"sent\": 0,\n \"create_time_str\": \"12.30\"\n }\n ],\n \"title\": \"12.30\"\n }\n ],\n \"code\": 0,\n \"offset\": \"2015-12-30 10:36:00\"\n}\n</pre>\n</a>\n\n\n<a name='link54'>\n<pre class='doc-api-entry fold'>\n54. 医院列表\n /api/hospital_list/ POST\n参数:\n sub_cat_id: 子分类id\n sort_type: 排序类型 1综合 2销量 3好评优先\n city_id: 城市id\n offset 上次接口返回值, 首次传空字符串或者不传\n返回:\n{\n \"msg\": \"\",\n \"has_more\": false,\n \"infos\": [\n {\n \"addr\": \"上海市徐汇区虹桥路616号\",\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452150622.44\",\n \"item_count\": 10,\n \"rate\": \"5.0\",\n \"cats\": [\n {\n \"status\": 0,\n \"sort_order\": 3,\n \"name\": \"形体\",\n \"id\": 7\n },\n {\n \"status\": 0,\n \"sort_order\": 1,\n \"name\": \"鼻部\",\n \"id\": 3\n },\n {\n \"status\": 0,\n \"sort_order\": 0,\n \"name\": \"眼部\",\n \"id\": 2\n },\n {\n \"status\": 0,\n \"sort_order\": 7,\n \"name\": \"面部\",\n \"id\": 1\n }\n ],\n \"sold_count\": 0,\n \"tag_list\": [\n \"高水准技术\",\n \"一流设备\",\n \"完善的安全体系\"\n ],\n \"id\": 6,\n \"name\": \"上海百达丽医疗美容门诊部\"\n },\n {\n \"addr\": \"上海市伊利南路111号3-4楼\",\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452150653.12\",\n \"item_count\": 8,\n \"rate\": \"5.0\",\n \"cats\": [\n {\n \"status\": 0,\n \"sort_order\": 3,\n \"name\": \"形体\",\n \"id\": 7\n },\n {\n \"status\": 0,\n \"sort_order\": 1,\n \"name\": \"鼻部\",\n \"id\": 3\n },\n {\n \"status\": 0,\n \"sort_order\": 0,\n \"name\": \"眼部\",\n \"id\": 2\n },\n {\n \"status\": 0,\n \"sort_order\": 7,\n \"name\": \"面部\",\n \"id\": 1\n }\n ],\n \"sold_count\": 0,\n \"tag_list\": [\n \"服务一对一\",\n \"私密建档\",\n \"高端体验\"\n ],\n \"id\": 5,\n \"name\": \"上海韩镜医疗美容医院\"\n },\n {\n \"addr\": \"上海市金沙江路2890弄52号\",\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452510992.77\",\n \"item_count\": 14,\n \"rate\": \"5.0\",\n \"cats\": [\n {\n \"status\": 0,\n \"sort_order\": 3,\n \"name\": \"形体\",\n \"id\": 7\n },\n {\n \"status\": 0,\n \"sort_order\": 4,\n \"name\": \"口唇\",\n \"id\": 6\n },\n {\n \"status\": 0,\n \"sort_order\": 5,\n \"name\": \"微整形\",\n \"id\": 5\n },\n {\n \"status\": 0,\n \"sort_order\": 1,\n \"name\": \"鼻部\",\n \"id\": 3\n },\n {\n \"status\": 0,\n \"sort_order\": 0,\n \"name\": \"眼部\",\n \"id\": 2\n },\n {\n \"status\": 0,\n \"sort_order\": 7,\n \"name\": \"面部\",\n \"id\": 1\n }\n ],\n \"sold_count\": 0,\n \"tag_list\": [\n \"中美韩连锁\"\n ],\n \"id\": 4,\n \"name\": \"上海美尔雅医疗美容医院\"\n },\n {\n \"addr\": \"上海市长宁区中山西路339号\",\n \"image\": \"\",\n \"item_count\": 10,\n \"rate\": \"5.0\",\n \"cats\": [\n {\n \"status\": 0,\n \"sort_order\": 4,\n \"name\": \"口唇\",\n \"id\": 6\n },\n {\n \"status\": 0,\n \"sort_order\": 5,\n \"name\": \"微整形\",\n \"id\": 5\n },\n {\n \"status\": 0,\n \"sort_order\": 1,\n \"name\": \"鼻部\",\n \"id\": 3\n },\n {\n \"status\": 0,\n \"sort_order\": 0,\n \"name\": \"眼部\",\n \"id\": 2\n }\n ],\n \"sold_count\": 0,\n \"tag_list\": [\n \"业界标杆\",\n \"VIP五星级管家服务\"\n ],\n \"id\": 3,\n \"name\": \"上海天大医疗美容医院\"\n },\n {\n \"addr\": \"上海市长宁区延安西路934号\",\n \"image\": \"\",\n \"item_count\": 9,\n \"rate\": \"5.0\",\n \"cats\": [\n {\n \"status\": 0,\n \"sort_order\": 5,\n \"name\": \"微整形\",\n \"id\": 5\n },\n {\n \"status\": 0,\n \"sort_order\": 6,\n \"name\": \"毛发\",\n \"id\": 4\n },\n {\n \"status\": 0,\n \"sort_order\": 1,\n \"name\": \"鼻部\",\n \"id\": 3\n },\n {\n \"status\": 0,\n \"sort_order\": 0,\n \"name\": \"眼部\",\n \"id\": 2\n },\n {\n \"status\": 0,\n \"sort_order\": 7,\n \"name\": \"面部\",\n \"id\": 1\n }\n ],\n \"sold_count\": 0,\n \"tag_list\": [\n \"民营二级医疗美容医院\"\n ],\n \"id\": 2,\n \"name\": \"上海真爱医疗美容医院\"\n },\n {\n \"addr\": \"上海市徐汇区漕宝路111号\",\n \"image\": \"\",\n \"item_count\": 9,\n \"rate\": \"5.0\",\n \"cats\": [\n {\n \"status\": 0,\n \"sort_order\": 5,\n \"name\": \"微整形\",\n \"id\": 5\n },\n {\n \"status\": 0,\n \"sort_order\": 1,\n \"name\": \"鼻部\",\n \"id\": 3\n },\n {\n \"status\": 0,\n \"sort_order\": 0,\n \"name\": \"眼部\",\n \"id\": 2\n },\n {\n \"status\": 0,\n \"sort_order\": 7,\n \"name\": \"面部\",\n \"id\": 1\n }\n ],\n \"sold_count\": 0,\n \"tag_list\": [\n \"民营美容医院\",\n \" 上市集团直投\",\n \" 国内外专家汇聚\"\n ],\n \"id\": 1,\n \"name\": \"上海美未央医疗美容整形医院\"\n }\n ],\n \"code\": 0,\n \"offset\": \"1_\"\n}\n</pre>\n</a>\n\n\n<a name='link55'>\n<pre class='doc-api-entry fold'>\n55. 领取每日优惠券\n /api/receive_coupon/ POST\n参数:\n daily_id   每日优惠券id\n返回:\n{\n \"msg\": \"领取成功\",\n \"code\": 0\n}\n</pre>\n</a>\n\n\n\n<a name='link56'>\n<pre class='doc-api-entry fold'>\n56. 美攻略详情\n /api/tutorial_detail/ POST\n参数:\n tutorial_id   美攻略id\n{\n \"msg\": \"\",\n \"infos\": [\n {\n \"support_choices\": \"7,1,2,8,3,5\",\n \"image\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1448862874.94\",\n \"doctor_desc\": \"<p><img src=\\\"http://7xnpdb.com2.z0.glb.qiniucdn.com/xiangmu0002.png\\\"/></p><p><br/></p><p><b>姓名:</b></p><p>袁磊<br/><br/></p><p><b>资质:</b></p><p>主治医师<br/><br/></p><p><b>简介:</b><br/>上海美未央美容外科首席注射微整专家,上海九院整形外科硕士,韩国首尔Metro Plastic医院访问学者,上海市科学技术委员会课题撰写人。毕业于上海交通大学医学院附属九院硕士,从事外科整形15年,精细眼鼻整形专家,多次受邀参加韩国、欧美等高峰学术交流会。袁磊主任先后在上海交通大学附属第九人民医院整形外科、上海交通大学附属第三人民医院整形外科、上海市宝山区中心医院整形外科、上海东方医院医疗美容科等公立三甲医院任职多年,2015年被美未央特聘为美未央医院整形外科主诊医生、首席注射微整形专家。临床十余年的实践与学习,让袁主任在眼部、鼻部整形、吸脂、微创注射及内窥镜辅助双平面假体隆胸方面有着得天独厚的优势,特别是引以为豪的注射微整形技术受到广大求美者一致好评。<br/></p>\",\n \"create_time\": \"2015-11-30 14:10:22\",\n \"period_count\": 18,\n \"use_time\": \"需提前一天预约\",\n \"id\": 1,\n \"has_fee\": true,\n \"sub_cat_id_list\": [\n 5\n ],\n \"hospital\": {\n \"id\": 1,\n \"name\": \"上海美未央医疗美容整形医院\"\n },\n \"surgery_desc\": \"<p style=\\\"color: rgb(85, 85, 85);\\\"><img src=\\\"http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a5e8s4bf17cgj7v1unaa18tho1%E6%93%8D%E4%BD%9C%E5%9B%BE.jpg\\\"/><!--EndFragment--><br/><br/>1、个性化设计 打造眼部协调美</p><p style=\\\"color: rgb(85, 85, 85);\\\">袁磊医生在术前会根据求美者的五官特征,按照人体的三庭五眼的黄金比例,综合眼与眉的距离,长度,厚度等来做综合性的设计,使双眼皮效果达到化,提升面部的整体协调美。<br/>2、创口微小 形态自然</p><p style=\\\"color: rgb(85, 85, 85);\\\">埋线双眼皮的手术方法是在眼皮的部位切开一个容得下缝合线出入的微小切口,然后在皮肤内部进行缝合,将皮肤与提上睑肌腱膜或睑板缝合,形成粘连,从而形成重睑的手术方式。适用于那些眼皮较薄、脂肪较少的求美者。<br/>3、损伤小 恢复快 易修复</p><p style=\\\"color: rgb(85, 85, 85);\\\">由于埋线双眼皮手术切口微小,对组织损伤较小,因此术后浮肿较轻,恢复快。若对双眼皮形状不满意,修复也相对容易。<br/><br/></p><p style=\\\"color: rgb(85, 85, 85);\\\"><b>适合人群:</b></p><p style=\\\"color: rgb(85, 85, 85);\\\">内双、眼皮一单一双、眼皮薄、无明显脂肪堆积者<br/><br/></p><p style=\\\"color: rgb(85, 85, 85);\\\"><b>治疗方法:</b></p><p style=\\\"color: rgb(85, 85, 85);\\\">手术<br/><br/></p><p style=\\\"color: rgb(85, 85, 85);\\\"><b>治疗次数:</b></p><p style=\\\"color: rgb(85, 85, 85);\\\">一次<br/><br/></p><p style=\\\"color: rgb(85, 85, 85);\\\"><b>术前准备:</b><br/>1、术前不要化妆,保持眼部清洁,不要戴隐形眼镜。<br/>2、如有结膜炎、睑缘炎、严重砂眼者必须治愈后才能接受手术。<br/>3、双眼皮手术与近视眼手术之间要相隔至少两个月。<br/>4、双眼皮手术前两周内,请勿服用含有阿斯匹林的药物<br/>5、双眼皮手术前确定身体健康,无传染性疾病或其他身体炎症<br/>6、做双眼皮手术女性要避开月经期。<br/>7、术前准备太阳镜(遮掩伤痕)、冰袋(术后冰敷伤口用)等物品。<br/><br/></p><p style=\\\"color: rgb(85, 85, 85);\\\"><b>手术时长:</b></p><p style=\\\"color: rgb(85, 85, 85);\\\">30-40分钟<br/><br/></p><p style=\\\"color: rgb(85, 85, 85);\\\"><b>术后是否住院:</b></p><p style=\\\"color: rgb(85, 85, 85);\\\">否<br/></p><p><br/></p><p style=\\\"color: rgb(85, 85, 85);\\\"><b>注意事项:</b><br/>1、术后按医嘱进行冰敷,冰敷可以有效减轻肿胀和疼痛。<br/>2、避免用不干净的手或毛巾等接触手术部位,否则可能导致手术部位细菌感染。<br/>3、按医嘱服用消炎药,发炎后治愈的手术痕迹尤其明显,同时炎症也可能影响双眼皮的形状,所以要注意预防发炎。&#8232;术后尽量垫高头部减轻肿胀<br/>4、术后冷敷只需2-3天左右,之后要热敷。热敷可以帮助血液循环,有助于消肿。<br/><br/></p><p style=\\\"color: rgb(85, 85, 85);\\\"><b>常见问题:</b><br/>Q:埋线双眼皮会留疤痕么?<br/>埋线双眼皮创口微小,愈后痕迹可隐藏在形成双眼皮的褶皱里,肉眼几乎看不出来。<br/>Q:埋线双眼皮能永久保持吗?<br/>埋线可能会有一定的脱落几率,这与医生打结的松紧程度、个人的眼睛条件以及平时的保护等因素有关。一般可维持2年以上。<br/>Q:术后有哪些注意事项?<br/>术后要保持伤口清洁,防止感染,伤口愈合前不要沾水。一个月内忌烟酒,不要吃羊肉、辣椒一类辛辣刺激性食物。</p><p><br/></p><p style=\\\"color: rgb(85, 85, 85);\\\"><b>效果展示:</b><br/><img src=\\\"http://7xnpdb.com2.z0.glb.qiniucdn.com/xiangmu114.png\\\"/></p>\",\n \"note\": \"费用仅包含手术费、血常规、凝血四项麻醉\",\n \"price\": 1800,\n \"status\": 1,\n \"direct_buy\": true,\n \"period_money\": 115,\n \"photos\": \"o_1a5e8s4bf17cgj7v1unaa18tho1操作图.jpg,o_1a5e8s4bf17cgj7v1unaa18tho1案例.jpg\",\n \"item_no\": \"1\",\n \"photo_list\": [\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5e8s4bf17cgj7v1unaa18tho1操作图.jpg\",\n \"http://7xnpdb.com1.z0.glb.clouddn.com/o_1a5e8s4bf17cgj7v1unaa18tho1案例.jpg\"\n ],\n \"sub_cat_ids\": \"5\",\n \"title\": \"埋线双眼皮打造灵动双眸 魅力无限\",\n \"orig_price\": 2200,\n \"sold_count\": 0,\n \"sub_cat_id\": 5\n }\n ],\n \"code\": 0,\n \"tutorial\": {\n \"status\": 1,\n \"view_count\": 4,\n \"title\": \"1000\",\n \"items\": \"1\",\n \"create_time\": \"2015-12-31 14:02:35\",\n \"photo\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452737957.58\",\n \"id\": 1,\n }\n}\n</pre>\n</a>\n\n\n\n<a name='link57'>\n<pre class='doc-api-entry fold'>\n57. 医院筛选\n /api/hospital_filters/ POST\n参数:\n{\n \"sort_type_obj\": {\n \"id\": 1,\n \"name\": \"综合排序\"\n },\n \"city\": {\n \"amap_code\": \"1212\",\n \"name\": \"上海\",\n \"city_code\": \"289\",\n \"id\": 1\n },\n \n \"subcat\": {\n \"cat_id_list\": [\n 0\n ],\n \"id\": 0,\n \"name\": \"全部\"\n },\n \"order_choices\": [\n {\n \"id\": 1,\n \"name\": \"综合排序\"\n },\n {\n \"id\": 2,\n \"name\": \"销量优先\"\n },\n {\n \"id\": 3,\n \"name\": \"好评优先\"\n }\n ],\n \"citys\": [\n {\n \"amap_code\": \"1000\",\n \"name\": \"北京\",\n \"city_code\": \"010\",\n \"id\": 2\n },\n {\n \"amap_code\": \"1212\",\n \"name\": \"上海\",\n \"city_code\": \"289\",\n \"id\": 1\n }\n ],\n \"data\": [\n {\n \"sub_cats\": [\n {\n \"cat_id_list\": [\n 0\n ],\n \"id\": 0,\n \"name\": \"全部\"\n },\n {\n \"cat_id\": 3,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908263.52\",\n \"cat_id_list\": [\n 3,\n 0\n ],\n \"name\": \"假体隆鼻\",\n \"id\": 8\n },\n {\n \"cat_id\": 1,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908336.99\",\n \"cat_id_list\": [\n 1,\n 0\n ],\n \"name\": \"祛痘\",\n \"id\": 3\n },\n {\n \"cat_id\": 1,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908370.25\",\n \"cat_id_list\": [\n 1,\n 0\n ],\n \"name\": \"祛斑\",\n \"id\": 1\n },\n {\n \"cat_id\": 1,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908356.13\",\n \"cat_id_list\": [\n 1,\n 0\n ],\n \"name\": \"点痣\",\n \"id\": 2\n },\n {\n \"cat_id\": 5,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908146.32\",\n \"cat_id_list\": [\n 5,\n 0\n ],\n \"name\": \"肉毒素\",\n \"id\": 11\n },\n {\n \"cat_id\": 5,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908165.44\",\n \"cat_id_list\": [\n 5,\n 0\n ],\n \"name\": \"玻尿酸\",\n \"id\": 10\n },\n {\n \"cat_id\": 4,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908178.83\",\n \"cat_id_list\": [\n 4,\n 0\n ],\n \"name\": \"脱毛\",\n \"id\": 9\n },\n {\n \"cat_id\": 6,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908078.65\",\n \"cat_id_list\": [\n 6,\n 0\n ],\n \"name\": \"牙齿美白\",\n \"id\": 13\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/tuijian_hong.png\",\n \"id\": 0,\n \"name\": \"推荐\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/tuijian_hui.png\"\n },\n {\n \"id\": 2,\n \"sub_cats\": [\n {\n \"cat_id\": 2,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450332742.01\",\n \"cat_id_list\": [\n 2\n ],\n \"name\": \"美瞳线\",\n \"id\": 20\n },\n {\n \"cat_id\": 2,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450332836.29\",\n \"cat_id_list\": [\n 2\n ],\n \"name\": \"纹眉\",\n \"id\": 18\n },\n {\n \"cat_id\": 2,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908282.78\",\n \"cat_id_list\": [\n 2\n ],\n \"name\": \"去眼袋\",\n \"id\": 7\n },\n {\n \"cat_id\": 2,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908295.05\",\n \"cat_id_list\": [\n 2\n ],\n \"name\": \"开眼角\",\n \"id\": 6\n },\n {\n \"cat_id\": 2,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908324.01\",\n \"cat_id_list\": [\n 2\n ],\n \"name\": \"双眼皮\",\n \"id\": 5\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/yanbu_hong.png\",\n \"name\": \"眼部\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/yanbu_hui.png\"\n },\n {\n \"id\": 3,\n \"sub_cats\": [\n {\n \"cat_id\": 3,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450771445.11\",\n \"cat_id_list\": [\n 7,\n 3\n ],\n \"name\": \"鼻部失败修复\",\n \"id\": 23\n },\n {\n \"cat_id\": 3,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450771462.74\",\n \"cat_id_list\": [\n 3\n ],\n \"name\": \"自体软骨隆鼻\",\n \"id\": 22\n },\n {\n \"cat_id\": 3,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450771474.05\",\n \"cat_id_list\": [\n 3\n ],\n \"name\": \"综合隆鼻\",\n \"id\": 21\n },\n {\n \"cat_id\": 3,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908263.52\",\n \"cat_id_list\": [\n 3,\n 0\n ],\n \"name\": \"假体隆鼻\",\n \"id\": 8\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/bibu_hong.png\",\n \"name\": \"鼻部\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/bibu_hui.png\"\n },\n {\n \"id\": 9,\n \"sub_cats\": [\n {\n \"cat_id\": 9,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1452163899.35\",\n \"cat_id_list\": [\n 9\n ],\n \"name\": \"半永久\",\n \"id\": 25\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/banyongjiu-hong.png\",\n \"name\": \"半永久\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/banyongjiu-hui.png\"\n },\n {\n \"id\": 7,\n \"sub_cats\": [\n {\n \"cat_id\": 3,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450771445.11\",\n \"cat_id_list\": [\n 7,\n 3\n ],\n \"name\": \"鼻部失败修复\",\n \"id\": 23\n },\n {\n \"cat_id\": 7,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450333073.69\",\n \"cat_id_list\": [\n 7\n ],\n \"name\": \"减肥\",\n \"id\": 17\n },\n {\n \"cat_id\": 7,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450333094.45\",\n \"cat_id_list\": [\n 7\n ],\n \"name\": \"抽脂\",\n \"id\": 16\n },\n {\n \"cat_id\": 7,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450333105.48\",\n \"cat_id_list\": [\n 7\n ],\n \"name\": \"隆胸\",\n \"id\": 15\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/xingti_hong.png\",\n \"name\": \"形体\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/xingti_hui.png\"\n },\n {\n \"id\": 6,\n \"sub_cats\": [\n {\n \"cat_id\": 6,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450332823.85\",\n \"cat_id_list\": [\n 6\n ],\n \"name\": \"漂唇\",\n \"id\": 19\n },\n {\n \"cat_id\": 6,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908078.65\",\n \"cat_id_list\": [\n 6,\n 0\n ],\n \"name\": \"牙齿美白\",\n \"id\": 13\n },\n {\n \"cat_id\": 6,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908100.73\",\n \"cat_id_list\": [\n 6\n ],\n \"name\": \"洗牙\",\n \"id\": 12\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/yachi_hong.png\",\n \"name\": \"口唇\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/yachi_hui.png\"\n },\n {\n \"id\": 5,\n \"sub_cats\": [\n {\n \"cat_id\": 5,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908146.32\",\n \"cat_id_list\": [\n 5,\n 0\n ],\n \"name\": \"肉毒素\",\n \"id\": 11\n },\n {\n \"cat_id\": 5,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908165.44\",\n \"cat_id_list\": [\n 5,\n 0\n ],\n \"name\": \"玻尿酸\",\n \"id\": 10\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/weizhengxing_hong.png\",\n \"name\": \"微整形\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/weizhengxing_hui.png\"\n },\n {\n \"id\": 4,\n \"sub_cats\": [\n {\n \"cat_id\": 4,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908178.83\",\n \"cat_id_list\": [\n 4,\n 0\n ],\n \"name\": \"脱毛\",\n \"id\": 9\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/maofa_hong.png\",\n \"name\": \"毛发\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/maofa_hui.png\"\n },\n {\n \"id\": 1,\n \"sub_cats\": [\n {\n \"cat_id\": 1,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1450333114.72\",\n \"cat_id_list\": [\n 1\n ],\n \"name\": \"脂肪填充\",\n \"id\": 14\n },\n {\n \"cat_id\": 1,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908309.01\",\n \"cat_id_list\": [\n 1\n ],\n \"name\": \"嫩肤\",\n \"id\": 4\n },\n {\n \"cat_id\": 1,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908336.99\",\n \"cat_id_list\": [\n 1,\n 0\n ],\n \"name\": \"祛痘\",\n \"id\": 3\n },\n {\n \"cat_id\": 1,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908356.13\",\n \"cat_id_list\": [\n 1,\n 0\n ],\n \"name\": \"点痣\",\n \"id\": 2\n },\n {\n \"cat_id\": 1,\n \"icon\": \"http://7xnpdb.com1.z0.glb.clouddn.com/subcaticon/1449908370.25\",\n \"cat_id_list\": [\n 1,\n 0\n ],\n \"name\": \"祛斑\",\n \"id\": 1\n }\n ],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/pifu_hong.png\",\n \"name\": \"面部\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/pifu_hui.png\"\n },\n {\n \"id\": 8,\n \"sub_cats\": [],\n \"icon_active\": \"http://www.meifenfen.com/static/user/img/pf-hong.png\",\n \"name\": \"皮肤\",\n \"icon\": \"http://www.meifenfen.com/static/user/img/pf-hui.png\"\n }\n ]\n}\n</pre>\n</a>\n\n\n<a name='link58'>\n<pre class='doc-api-entry fold'>\n58. 赠送优惠券\n /api/resend_user_coupon/ POST\n 参数:\n phone 手机号\n user_coupon_ids 逗号分隔的优惠券id '1,2'\n sign_user\n返回:\n{\n \"msg\": \"\",\n \"code\": 0\n}\n或\n{\n \"msg\": \"手机号对应用户不存在\",\n \"code\": 10000\n}\n</pre>\n</a>\n\n\n\n <script src='/static/user/js/jquery.min.js'></script>\n <script>\n $('.doc-api-entry').click(function() {\n if($(this).hasClass('fold')) {\n $(this).removeClass('fold')\n } else {\n $(this).addClass('fold')\n }\n })\n $('.api-wrap > a').click(function() {\n var href = $(this).attr('href');\n var selector = '[name=\"'+href+'\"]';\n console.log(selector);\n selector = selector.replace('#', '');\n window.t=$(selector).find('pre');\n $('a > pre').css('background', 'white');\n $(selector).find('pre').css('background', '#E4F7F1');\n });\n \n </script>\n\n\n</html>\n" }, { "alpha_fraction": 0.6385372877120972, "alphanum_fraction": 0.6919831037521362, "avg_line_length": 24.39285659790039, "blob_id": "dec82c9de7e52ec49e27dee5f66ab5297b4b8332", "content_id": "bac568f31699caea94ea1325bbcefa175b5839e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 711, "license_type": "no_license", "max_line_length": 88, "num_lines": 28, "path": "/migrations/versions/174b0601e7f1_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 174b0601e7f1\nRevises: c2bb73ecf64\nCreate Date: 2016-01-05 11:10:04.014741\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '174b0601e7f1'\ndown_revision = 'c2bb73ecf64'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n #xianpeng int > float stu_years\n ### commands auto generated by Alembic - please adjust! ###\n op.execute('ALTER TABLE credit_apply modify stu_years float')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n pass\n #op.add_column('credit_apply', sa.Column('stu_years', mysql.FLOAT(), nullable=True))\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.60317462682724, "alphanum_fraction": 0.6640211343765259, "avg_line_length": 27.350000381469727, "blob_id": "0a8d8a107a048f355eda967ee492a68d62d00898", "content_id": "0ddb945be6c6c0db1c0d24966cf82bacc0f9d6bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1134, "license_type": "no_license", "max_line_length": 65, "num_lines": 40, "path": "/migrations/versions/2c01f9e048f7_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 2c01f9e048f7\nRevises: 34a907573645\nCreate Date: 2015-11-06 09:49:11.763111\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2c01f9e048f7'\ndown_revision = '34a907573645'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('help_cat',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('help_entry',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=100), nullable=True),\n sa.Column('cat_id', sa.Integer(), nullable=True),\n sa.Column('content', sa.String(length=10000), nullable=True),\n sa.ForeignKeyConstraint(['cat_id'], ['help_cat.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('help_entry')\n op.drop_table('help_cat')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.632022500038147, "alphanum_fraction": 0.6601123809814453, "avg_line_length": 24.428571701049805, "blob_id": "67d15db991f5ccadb301be875ba86ebf39111c5f", "content_id": "1a35c977a31e3c4c8b7058b026093eaf4a284c5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 712, "license_type": "no_license", "max_line_length": 78, "num_lines": 28, "path": "/migrations/versions/e1bfa676445_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: e1bfa676445\nRevises: 21e38b1473b2\nCreate Date: 2015-11-07 16:08:03.127777\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'e1bfa676445'\ndown_revision = '21e38b1473b2'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('item', sa.Column('doctor_desc', sa.Text(), nullable=True))\n op.add_column('item', sa.Column('surgery_desc', sa.Text(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('item', 'surgery_desc')\n op.drop_column('item', 'doctor_desc')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6326388716697693, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 31.727272033691406, "blob_id": "3cce6e7c36c36fc1ac8b20b552241c5e983dd95e", "content_id": "cd8f8222aece8d9f1a9242999e015dc6a0418856", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1440, "license_type": "no_license", "max_line_length": 72, "num_lines": 44, "path": "/migrations/versions/1b538e70897_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 1b538e70897\nRevises: 19af1cb7edf0\nCreate Date: 2016-03-04 15:38:47.633307\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1b538e70897'\ndown_revision = '19af1cb7edf0'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('rd_draw_counter',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('used', sa.Integer(), nullable=True),\n sa.Column('total', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('rd_draw_counter_log',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('count', sa.Integer(), nullable=True),\n sa.Column('source', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('rd_draw_counter_log')\n op.drop_table('rd_draw_counter')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6271656155586243, "alphanum_fraction": 0.6555786728858948, "avg_line_length": 31.066667556762695, "blob_id": "de5c0692a300fcfd8bd5ee6a394f1980548621d4", "content_id": "eff06ab3b12fd465584e27d8b4bbc08cbd73c837", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1443, "license_type": "no_license", "max_line_length": 77, "num_lines": 45, "path": "/migrations/versions/32a72ba0ce03_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 32a72ba0ce03\nRevises: e1bfa676445\nCreate Date: 2015-11-09 11:37:08.933073\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '32a72ba0ce03'\ndown_revision = 'e1bfa676445'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('activity',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=300), nullable=True),\n sa.Column('desc', sa.String(length=1000), nullable=True),\n sa.Column('start_time', sa.DateTime(), nullable=True),\n sa.Column('end_time', sa.DateTime(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('activity_item',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('activity_id', sa.Integer(), nullable=True),\n sa.Column('item_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['activity_id'], ['activity.id'], ),\n sa.ForeignKeyConstraint(['item_id'], ['item.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.add_column(u'item', sa.Column('has_fee', sa.Boolean(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(u'item', 'has_fee')\n op.drop_table('activity_item')\n op.drop_table('activity')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5706061124801636, "alphanum_fraction": 0.5749240517616272, "avg_line_length": 32.96739196777344, "blob_id": "9454be6348efd39f660614b96d8a5a6dc9d99820", "content_id": "0bdb9748a4f8e51f8602b22eb03b5ce879fa6486", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6347, "license_type": "no_license", "max_line_length": 128, "num_lines": 184, "path": "/ops/room_design.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import and_\nfrom sqlalchemy import or_\nfrom sqlalchemy import func\nfrom util.sqlerr import SQL_DUPLICATE\nfrom util.sqlerr import SQL_DUPLICATE_PHONE\nfrom util.utils import convert_locaton\nfrom util.utils import dt_obj\n\nfrom models import db\nfrom models import School\nfrom models import RoomDesignDetail\nfrom models import RoomDesignVotePrivilege\nfrom models import RoomDesignVoteLog\nfrom ops.utils import get_items\nfrom ops.utils import get_page\nfrom ops.utils import count_items\nfrom ops.cache import RoomDesignVoteCounter\nfrom thirdparty.qn import upload_img\nfrom settings import celery\n\n\n\nclass RoomDesignService(object):\n\n @staticmethod\n def create_room(user_id, room_name, applyer_name, apply_no, phone, addr, school_id, pics):\n try:\n room = RoomDesignDetail(\n user_id=user_id, room_name=room_name, applyer_name=applyer_name, \n apply_no=apply_no,\n phone=phone, addr=addr,\n school_id=school_id, pics=pics or None\n )\n db.session.add(room)\n db.session.commit()\n RoomDesignVoteCounter.init(room.id)\n return room.id\n except Exception as e:\n import traceback\n traceback.print_exc()\n db.session.rollback()\n if SQL_DUPLICATE.search(str(e)):\n assert 0, '寝室名或者手机号码或申请编号已存在'\n\n @staticmethod\n def get_room_dict_by_id(room_id):\n room = RoomDesignDetail.query.filter(RoomDesignDetail.id==room_id).first()\n if room: return room.as_dict()\n\n @staticmethod\n def update_room(where, **kw):\n count = RoomDesignDetail.query.filter(where).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def get_room(where):\n room = RoomDesignDetail.query.filter(where).first()\n return room\n\n @staticmethod\n def incr_room_vote(room_id, count):\n ''' 提高投票数 '''\n data = {\n 'vote_count': RoomDesignDetail.vote_count+count\n }\n count = RoomDesignDetail.query.filter(RoomDesignDetail.id==room_id).update(data)\n db.session.commit()\n return count\n\n @staticmethod\n def get_paged_rooms(**kw):\n return get_page(RoomDesignDetail, {}, **kw)\n\n @staticmethod\n def get_user_vote_privilede(user_id):\n privileges = RoomDesignVotePrivilege.query.filter(RoomDesignVotePrivilege.user_id==user_id).all()\n data = []\n privileges_id_map = {i.source:i for i in privileges}\n query = and_(\n RoomDesignVoteLog.user_id==user_id,\n RoomDesignVoteLog.source==3\n )\n vote = RoomDesignVoteLog.query.filter(query).order_by(RoomDesignVoteLog.id.desc()).first()\n for i in range(1,3):\n privilede = privileges_id_map.get(i)\n if privilede:\n privilege_id = privilede.id\n privilege_status= privilede.status\n else:\n privilege_id = i\n privilege_status= -1\n data.append({\n 'id' : i,\n 'status' : privilege_status\n })\n if vote and str(vote.create_time)[:10]==str(dt_obj.now())[:10]:\n data.append({\n 'id' : 3,\n 'status' : 1\n })\n else:\n data.append({\n 'id' : 3,\n 'status' : 0\n })\n return data\n\n @staticmethod\n def add_user_vote_privilege(user_id, source):\n ''' 给予用户投票特权\n source 1额度投票 2完成订单投票 3普通投票\n '''\n query = and_(\n RoomDesignVotePrivilege.user_id==user_id,\n RoomDesignVotePrivilege.source==source\n )\n exists = RoomDesignVotePrivilege.query.filter(query).first()\n if exists: return\n privilege = RoomDesignVotePrivilege(user_id=user_id, source=source)\n db.session.add(privilege)\n db.session.commit()\n return privilege.id\n\n @staticmethod\n def update_vote_privilege_status(user_id, source):\n query = and_(\n RoomDesignVotePrivilege.user_id==user_id,\n RoomDesignVotePrivilege.source==source,\n RoomDesignVotePrivilege.status==0\n )\n count = RoomDesignVotePrivilege.query.filter(query).update({'status': 1})\n db.session.commit()\n return count\n\n @staticmethod\n def add_vote_log(room_id, user_id, source):\n log = RoomDesignVoteLog(room_id=room_id, user_id=user_id, source=source)\n db.session.add(log)\n db.session.commit()\n return log.id\n\n @staticmethod\n def count_school_pics(school_id):\n where = and_(\n RoomDesignDetail.school_id==school_id,\n RoomDesignDetail.pics!=None\n )\n return count_items(RoomDesignDetail, where)*4\n\n @staticmethod\n def count_rooms(where=None):\n ''' '''\n return count_items(RoomDesignDetail, where)\n \n @staticmethod\n def today_voted(user_id):\n ''' '''\n query = and_(\n RoomDesignVoteLog.user_id==user_id,\n RoomDesignVoteLog.source==3\n )\n vote = RoomDesignVoteLog.query.filter(query).order_by(RoomDesignVoteLog.id.desc()).first()\n return vote and str(vote.create_time)[:10]==str(dt_obj.now())[:10]\n\n @staticmethod\n def count_school_pics(school_id):\n ''' '''\n result = db.session.query(func.sum(RoomDesignDetail.pics_count)).filter(RoomDesignDetail.school_id==school_id).scalar()\n return int(result or 0)\n\n @staticmethod\n def set_school_pics_count(school_id):\n ''' 参与数'''\n result = db.session.query(func.sum(RoomDesignDetail.pics_count)).filter(RoomDesignDetail.school_id==school_id).scalar()\n pics_count = int(result or 0)\n data = {\n 'pics_count': pics_count\n }\n count = School.query.filter(School.id==school_id).update(data)\n db.session.commit()\n return count\n\n\n\n" }, { "alpha_fraction": 0.6687590479850769, "alphanum_fraction": 0.6798648238182068, "avg_line_length": 24.219512939453125, "blob_id": "22f552ba33f6ddbda48aadf203bbeed4add87708", "content_id": "79355b1d4f55605083bd9af012740128751fe0e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2071, "license_type": "no_license", "max_line_length": 68, "num_lines": 82, "path": "/util/sign.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nimport time\nimport hashlib\n\nfrom flask import request\n\nfrom settings import ADMIN_COOKIE_KEY\nfrom settings import HOSPITAL_COOKIE_KEY\nfrom settings import PROMOTE_COOKIE_KEY\nfrom settings import SECRET_USER_COOKIE\nfrom settings import DOMAIN\n\n\ndef sign_user(user_id):\n if not user_id: return None\n md5 = hashlib.md5()\n md5.update(str(user_id)+SECRET_USER_COOKIE)\n return str(user_id)+'.'+md5.hexdigest()\n\n\ndef extract_user_id(sign_user_str):\n ''' '''\n return (sign_user_str or '').split('.')[0]\n\n\ndef get_cookie(name):\n return request.cookies.get(name) or os.environ.get(name.upper())\n\n\ndef set_cookie(response, key, val, expire=86400*30):\n response.delete_cookie(key, domain=DOMAIN)\n response.set_cookie(key, str(val), expire, DOMAIN)\n\n\ndef del_cookie(response, key):\n response.delete_cookie(key, domain=DOMAIN)\n response.set_cookie(key, expires=0)\n\n\nTOKEN_DLIMITER = ','\ndef check_token(token_str, key=ADMIN_COOKIE_KEY):\n token_args = (token_str or '').split(TOKEN_DLIMITER)\n if len(token_args)!=3: return False, None\n\n name, time_str, token = token_args\n md5 = hashlib.new(\"md5\")\n data = '.'.join((unicode(name), unicode(time_str), key))\n md5.update(str(data))\n access_token = md5.hexdigest()\n\n is_valid = token==access_token\n return is_valid, name\n\n\ndef check_hospital_token(token_str):\n return check_token(token_str, HOSPITAL_COOKIE_KEY)\n\n\ndef check_promote_token(token_str):\n return check_token(token_str, PROMOTE_COOKIE_KEY)\n\n\ndef gen_token(name, key=ADMIN_COOKIE_KEY):\n name = unicode(name)\n md5 = hashlib.new(\"md5\")\n data = {}\n current_time = unicode(int(time.time()))\n data = name+'.'+current_time+'.'+key\n md5.update(str(data))\n access_token = md5.hexdigest()\n token = TOKEN_DLIMITER.join([name, current_time, access_token])\n #token = encode(token)\n return token\n\n\ndef gen_hospital_token(name):\n return gen_token(name, HOSPITAL_COOKIE_KEY)\n\n\ndef gen_promote_token(name):\n return gen_token(name, PROMOTE_COOKIE_KEY)\n\n\n\n" }, { "alpha_fraction": 0.6194267272949219, "alphanum_fraction": 0.6878980994224548, "avg_line_length": 23.153846740722656, "blob_id": "dc5a424e196f1fe6b1f4efdf430d0f351d1bd060", "content_id": "3341db1e3a8bf8dbce80d26b7d36efa2c49300ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 628, "license_type": "no_license", "max_line_length": 96, "num_lines": 26, "path": "/migrations/versions/200344fc698d_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 200344fc698d\nRevises: 3ae16db9c83e\nCreate Date: 2016-01-27 15:15:16.187355\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '200344fc698d'\ndown_revision = '3ae16db9c83e'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('room_design_detail', sa.Column('phone', sa.String(length=30), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('room_design_detail', 'phone')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6260162591934204, "alphanum_fraction": 0.6780487895011902, "avg_line_length": 22.653846740722656, "blob_id": "b77af66b78bbfea09f00bad89dcf94b8fd3f9435", "content_id": "5d87f86f25e2ad4a8565acf40140c2883a9eb55c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 615, "license_type": "no_license", "max_line_length": 92, "num_lines": 26, "path": "/migrations/versions/33a1596e092f_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 33a1596e092f\nRevises: 2c06aa19dd1a\nCreate Date: 2015-11-26 14:02:10.367734\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '33a1596e092f'\ndown_revision = '2c06aa19dd1a'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('activity_item', sa.Column('image', sa.String(length=300), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('activity_item', 'image')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5335589051246643, "alphanum_fraction": 0.5415059924125671, "avg_line_length": 36.137428283691406, "blob_id": "aed5d1ce9318e02d66e03f1b18589bc7e46f8336", "content_id": "15153b113eef6a83a9ef2c811574bd784d33af25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13089, "license_type": "no_license", "max_line_length": 111, "num_lines": 342, "path": "/user/trial.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import request\nfrom sqlalchemy import and_\nfrom sqlalchemy import or_\nfrom models import *\nfrom models import Order\nfrom ops.order import OrderService\nfrom ops.trial import TrialService\nfrom util.utils import dt_obj\nfrom util.utils import date_to_datetime\nfrom util.utils import delta_time_str\nfrom util.utils import jsonify_response\nfrom util.decorators import wechat_loggin_dec\nfrom util.validators import Inputs\nfrom util.validators import Optional\nfrom util.validators import IdField\nfrom util.validators import TextField\nfrom util.validators import MobileField\nfrom util.validators import IntChoiceField\nfrom ops.bulks import fetch_user_refs\nfrom ops.coupon import CouponService\nfrom constants import ResponseCode\n\n\n\ndef set_trial_apply_status(apply, comment=None, order=None):\n ''' 0等待审核 1获得了试用,写试用体会 2获得了试用,去下单 3写试用体会 4查看试用体会 5未获得资格 '''\n if apply['status']==1 and apply['cat']==1:\n apply['status'] = 2\n if apply['status']==1 and comment:\n apply['status'] = 4\n if apply['status']==2 and order:\n apply['status']==3\n if apply['status']==3 and comment:\n apply['status'] = 4\n if apply['status']==0 and apply['trial']['total']==apply['trial']['sent']:\n apply['status'] = 5\n\n\ntrial_list_validator = Inputs(\n {\n 'cat' : IntChoiceField(choices=[1,2], msg='试用分类'),\n 'offset' : Optional(TextField(min_length=0, max_length=10000, msg='分页参数')),\n }\n )\n@wechat_loggin_dec(required=False, validator=trial_list_validator, app=True)\ndef trial_list():\n ''' 试用列表 '''\n offset = request.valid_data.get('offset')\n cat = request.valid_data.get('cat') #1当期试用 2往期试用\n\n _sort_dir = 'ASC'\n _sort = 'sort_order'\n if cat==1:\n where = and_(\n Trial.end_time>dt_obj.now(),\n Trial.start_time<dt_obj.now()\n )\n else:\n _sort_dir = 'DESC'\n _sort = 'end_time'\n where = Trial.end_time<=dt_obj.now()\n fields = ['id', 'sort_order', 'cat_str', 'title', 'total', 'apply_count', 'end_time', 'image']\n\n has_more, infos = TrialService.get_paged_trials(\n _sort_dir=_sort_dir, _sort=_sort, where=where, offset=offset, fields=fields)\n for info in infos:\n end_time = date_to_datetime(str(info['end_time']), format='%Y-%m-%d %H:%M:%S')\n info['end_time_str'] = delta_time_str(end_time)\n\n offset = ''\n if infos:\n offset = str(infos[-1]['sort_order'])\n if cat==2 and infos:\n offset = str(infos[-1]['end_time'])\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'infos' : infos,\n 'offset' : offset,\n 'has_more' : has_more\n }\n return jsonify_response(result)\n\n\n\nmy_trial_list_validator = Inputs(\n {\n 'offset' : Optional(TextField(min_length=0, max_length=10000, msg='分页参数')),\n }\n )\n@wechat_loggin_dec(validator=my_trial_list_validator)\ndef my_trial_list():\n ''' 我的试用 '''\n offset = request.valid_data.get('offset')\n where = TrialApply.user_id==request.user_id\n fields = ['id', 'cat', 'coupon_id', 'trial_id', 'status', 'create_time']\n has_more, infos = TrialService.get_paged_apply_user_list(where=where, fields=fields, offset=offset)\n trial_ids = [ i['trial_id'] for i in infos ]\n user_coupon_ids = [i['coupon_id'] for i in infos if i['coupon_id']]\n where = Trial.id.in_(trial_ids)\n fields = ['id', 'cat_str', 'title', 'total', 'apply_count', 'end_time', 'image', 'sent']\n _, trials = TrialService.get_paged_trials(where=where, fields=fields)\n\n where = and_(\n TrialComment.trial_id.in_(trial_ids),\n TrialComment.user_id==request.user_id\n )\n _, comments = TrialService.get_paged_trial_comments(where=where)\n trial_comment_map = {i['trial_id']:i['id'] for i in comments}\n trials_map = {i['id']:i for i in trials}\n where = Order.coupon_id.in_(user_coupon_ids)\n _, orders = OrderService.get_paged_orders(where=where)\n coupon_order_map = {i['coupon_id']:1 for i in orders}\n where = UserCoupon.id.in_(user_coupon_ids)\n _, user_coupons = CouponService.get_paged_user_coupons(where=where)\n coupon_item_map = {i['id']:i['item_id'] for i in user_coupons}\n for info in infos:\n info['trial'] = trials_map[info['trial_id']]\n for info in infos:\n coupon_id = info['coupon_id']\n order = coupon_order_map.get('coupon_id')\n item_id = coupon_item_map.get(coupon_id)\n info['item_id'] = item_id\n set_trial_apply_status(\n info, comment=trial_comment_map.get(info['trial']['id']), order=order)\n\n offset = ''\n if infos:\n offset = str(infos[-1]['id'])\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'infos' : infos,\n 'has_more' : has_more,\n 'offset' : offset\n }\n return jsonify_response(result)\n\n\n\ncomment_trial_validator = Inputs(\n {\n 'trial_id' : IdField(msg='试用id'),\n 'content' : TextField(min_length=0, max_length=10000, msg='试用体会'),\n 'photos' : Optional(TextField(min_length=0, max_length=10000, msg='图片')),\n }\n )\n@wechat_loggin_dec(validator=comment_trial_validator)\ndef comment_trial():\n ''' 试用体会 '''\n trial_id = request.valid_data.get('trial_id')\n content = request.valid_data.get('content')\n photos = request.valid_data.get('photos')\n trial = TrialService.get_trial(trial_id)\n assert trial, '试用商品不存在'\n apply = TrialService.get_user_apply(request.user_id, trial_id)\n assert apply, '请先提交申请'\n assert apply['status']==1, '您未获得试用资格'\n comment_id = TrialService.comment(trial_id, request.user_id, content, photos)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '发表成功'\n }\n return jsonify_response(result)\n\n\napply_trial_validator = Inputs(\n {\n 'trial_id' : IdField(msg='试用id'),\n 'sex' : IntChoiceField(choices=[1,2], msg='性别'),\n 'name' : TextField(min_length=0, max_length=10000, msg='申请人名字'),\n 'phone' : MobileField(msg='申请人手机号'),\n 'content' : TextField(min_length=0, max_length=10000, msg='申请理由'),\n 'addr' : TextField(min_length=0, max_length=10000, msg='宿舍地址'),\n 'school' : TextField(min_length=0, max_length=10000, msg='学校'),\n }\n )\n@wechat_loggin_dec(required=True, validator=apply_trial_validator)\ndef apply_trial():\n ''' 申请试用 '''\n trial_id = request.valid_data.get('trial_id')\n sex = request.valid_data.get('sex')\n name = request.valid_data.get('name')\n phone = request.valid_data.get('phone')\n content = request.valid_data.get('content')\n addr = request.valid_data.get('addr')\n school = request.valid_data.get('school')\n\n trial = TrialService.get_trial(trial_id)\n assert trial, '试用不存在'\n assert trial['end_time']>dt_obj.now(), '试用已结束'\n apply_id = TrialService.add_apply(\n request.user_id, name, phone, school, trial_id, content, sex, addr)\n TrialService.incr_trial_apply_count(trial_id)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '申请成功'\n }\n return jsonify_response(result)\n\n\ntrial_comment_list_validator = Inputs(\n {\n 'trial_id' : IdField(msg='试用id'),\n 'offset' : Optional(TextField(min_length=0, max_length=10000, msg='分页参数'))\n }\n )\n@wechat_loggin_dec(required=False, validator=trial_comment_list_validator)\ndef trial_comment_list():\n ''' 试用评论列表 '''\n trial_id = request.valid_data.get('trial_id')\n offset = request.valid_data.get('offset')\n where = TrialComment.trial_id==trial_id\n has_more, infos = TrialService.get_paged_trial_comments(where=where, offset=offset)\n user_ids = [i['user_id'] for i in infos]\n fetch_user_refs(infos, fields=['id', 'name', 'avatar'])\n\n apply_list = TrialService.get_trial_applies_by_user_ids(trial_id, user_ids)\n user_school_map = {i['user_id']:i['school'] for i in apply_list}\n for info in infos:\n info['school'] = user_school_map[info['user']['id']]\n offset = ''\n if infos:\n offset = infos[-1]['id']\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'infos' : infos,\n 'offset' : offset\n }\n return jsonify_response(result)\n\n\ntrial_applyers_validator = Inputs(\n {\n 'trial_id' : IdField(msg='试用id'),\n 'offset' : Optional(TextField(min_length=0, max_length=10000, msg='分页参数'))\n }\n )\n@wechat_loggin_dec(required=False, validator=trial_applyers_validator)\ndef trial_applyers():\n ''' 试用申请者 '''\n trial_id = request.valid_data.get('trial_id')\n offset = request.valid_data.get('offset')\n status = None\n info_id = None\n length = len((offset or '').split('_'))\n if offset and length==2:\n print offset, length\n status, info_id = offset.split('_')\n status = int(status)\n info_id = int(info_id)\n where = or_()\n where.append(\n and_(\n TrialApply.status==status,\n TrialApply.id<info_id\n )\n )\n if status==1:\n where.append(\n TrialApply.status==0\n )\n where = and_(\n where,\n TrialApply.trial_id==trial_id\n )\n else:\n where = TrialApply.trial_id==trial_id\n fields = ['id', 'school', 'status', 'user_id', 'create_time']\n order_by = TrialApply.status.desc(), TrialApply.id.desc()\n has_more, infos = TrialService.get_paged_apply_user_list(\n order_by=order_by, where=where, fields=fields\n )\n fetch_user_refs(infos, fields=['id', 'name', 'avatar'])\n offset = ''\n if infos:\n status = str(infos[-1]['status'])\n info_id = str(infos[-1]['id'])\n offset = '{}_{}'.format(status, info_id)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'infos' : infos,\n 'has_more' : has_more,\n 'offset' : offset\n }\n return jsonify_response(result)\n\n\ntrial_detail_validator = Inputs(\n {\n 'trial_id' : IdField(msg='试用id'),\n })\n@wechat_loggin_dec(required=False, validator=trial_detail_validator)\ndef get_trial_detail():\n ''' '''\n trial_id = request.valid_data.get('trial_id')\n trial = TrialService.get_trial(trial_id)\n\n apply = TrialService.get_trial_apply(request.user_id, trial_id)\n\n status = -1 #未申请\n item_id = 0\n if apply:\n comment = TrialService.get_trial_comment(trial_id, request.user_id)\n order = OrderService.get_order_by_coupon_id(apply['coupon_id'])\n where = Trial.id==apply['trial_id']\n _, trials = TrialService.get_paged_trials(where=where, fields=None)\n apply['trial'] = trials[0]\n set_trial_apply_status(apply, comment, order)\n status = apply['status']\n coupon = CouponService.get_user_coupon_by_id(apply['coupon_id']) if apply['coupon_id'] else None\n if coupon: item_id = coupon['item_id']\n elif trial['end_time']<dt_obj.now():\n status = 6 #已结束\n\n result = {\n 'trial' : trial,\n 'item_id' : item_id,\n 'status' : status,\n 'apply' : apply,\n 'code' : ResponseCode.SUCCESS,\n 'msg' : ''\n }\n\n return jsonify_response(result)\n\n\n@wechat_loggin_dec()\ndef get_history_apply():\n ''' 获取最近一次填写的数据 '''\n apply = TrialService.get_latest_apply(request.user_id)\n result = {\n 'code': ResponseCode.SUCCESS,\n 'msg' : '',\n 'data': apply or {}\n }\n return jsonify_response(result)\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6985381245613098, "alphanum_fraction": 0.704859733581543, "avg_line_length": 29.469879150390625, "blob_id": "aaf4e2904fcf8ea2746ade88568c480048195f2a", "content_id": "b1e6df1ad3eee319e256e50de4441bafbd532be5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2631, "license_type": "no_license", "max_line_length": 115, "num_lines": 83, "path": "/app.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nfrom flask import Flask\nfrom flask import request\nfrom flask import render_template\nfrom flask import send_from_directory\nfrom flask.ext.sqlalchemy import SQLAlchemy\n\nfrom models import db\nfrom util.utils import jsonify_response\nfrom thirdparty.views import server_verify\nfrom thirdparty.wechat import wechat\nfrom ops.cache import WechatTokenCache\nwechat.set_cache(WechatTokenCache)#设置token缓存属性 \nfrom user.urls import user_api\nfrom user.api_urls import user_api as user_app_api\n\nfrom admin.urls import admin_api\nfrom hospital.urls import hospital_api\nfrom promote.urls import promote_api\nfrom constants import ResponseCode\n\n\napp = Flask(__name__)\nclose_session = lambda response: db.session.close() or response\napp.after_request(close_session) #当请求结束关闭session\n\n\n#微信回调\napp.register_blueprint(server_verify, url_prefix='/wx_callback')\n#用户端\napp.register_blueprint(user_api, url_prefix='/user')\n#用户端app接口\napp.register_blueprint(user_app_api, url_prefix='/api')\n#管理端\napp.register_blueprint(admin_api, url_prefix='/admin')\n#医院端\napp.register_blueprint(hospital_api, url_prefix='/hospital')\n#推广\napp.register_blueprint(promote_api, url_prefix='/promote')\nfrom user.views import login_link\nfrom user.views import wechat_room_link\n\napp.add_url_rule('/static/user/login.html', 'login_link', login_link, methods=['POST','GET'])\napp.add_url_rule('/static/user/Activities/home.html', 'wechat_room_link', wechat_room_link, methods=['POST','GET'])\n\[email protected](500)\ndef internal_error(exception):\n ''' 服务器异常 '''\n print '-'*80\n print(exception), 'internal_error'\n print '-'*80\n import traceback\n traceback.print_exc()\n if getattr(request, 'is_app', False):\n return jsonify_response({'msg':'服务器异常', 'code': ResponseCode.SERVER_ERROR})\n else:\n return render_template('server_error.html'), 500\n\n\[email protected]('/',methods=['POST','GET'])\ndef pc_index():\n ''' 首页 '''\n return send_from_directory('static/pc/', 'home.html')\n return render_template('meifenfen.html')\n\n\[email protected]('/mobile/',methods=['POST','GET'])\ndef mobile_index():\n ''' 移动端首页 '''\n return send_from_directory('static/pc/', 'home.html')\n return render_template('meifenfen.html')\n\n\n\nif __name__ == \"__main__\":\n from settings import RUN_PORT\n from werkzeug.serving import run_simple\n print RUN_PORT\n run_simple('0.0.0.0', RUN_PORT, app, use_reloader=True, use_debugger=True)\n\n\n" }, { "alpha_fraction": 0.6366047859191895, "alphanum_fraction": 0.6790450811386108, "avg_line_length": 25.928571701049805, "blob_id": "3e23639ddfb762086a00c9b38ba8d812098bf96b", "content_id": "f0da03e1f425713c3bf625012ee63bb972b01029", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 754, "license_type": "no_license", "max_line_length": 94, "num_lines": 28, "path": "/migrations/versions/51aaf1e0ecdd_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 51aaf1e0ecdd\nRevises: a123ae998bf\nCreate Date: 2015-11-16 10:22:20.483272\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '51aaf1e0ecdd'\ndown_revision = 'a123ae998bf'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('hospital', sa.Column('desc', sa.String(length=10000), nullable=True))\n op.add_column('hospital', sa.Column('working_time', sa.String(length=100), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('hospital', 'working_time')\n op.drop_column('hospital', 'desc')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.632013201713562, "alphanum_fraction": 0.6749175190925598, "avg_line_length": 22.30769157409668, "blob_id": "ab5ac88ded138ef4a3b7823d75ef37d706a211ef", "content_id": "4e2721c4058cb0d3517acb3b70deb09216a2a925", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 606, "license_type": "no_license", "max_line_length": 83, "num_lines": 26, "path": "/migrations/versions/2b097c44cbba_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 2b097c44cbba\nRevises: 4806ede530e3\nCreate Date: 2015-11-24 10:03:17.516996\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2b097c44cbba'\ndown_revision = '4806ede530e3'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('order', sa.Column('user_finished', sa.Boolean(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('order', 'user_finished')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6344605684280396, "alphanum_fraction": 0.6795490980148315, "avg_line_length": 22.884614944458008, "blob_id": "d952b70da1da342cee870d9126eac69b1394e940", "content_id": "65e86f2611e269ba279debc15a39eb0fa9a660ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 621, "license_type": "no_license", "max_line_length": 91, "num_lines": 26, "path": "/migrations/versions/125c0c0cb424_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 125c0c0cb424\nRevises: c855246d7e8\nCreate Date: 2015-11-03 11:42:59.684984\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '125c0c0cb424'\ndown_revision = 'c855246d7e8'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('credit_apply', sa.Column('graduate_time', sa.DateTime(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('credit_apply', 'graduate_time')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6319797039031982, "alphanum_fraction": 0.6852791905403137, "avg_line_length": 27.14285659790039, "blob_id": "160c8b4ec9bf4852e37cc6a2aaa2912699e0b489", "content_id": "ae49cb8afa5249ac7b35b1cdf3fe866e9c2dc77e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 788, "license_type": "no_license", "max_line_length": 102, "num_lines": 28, "path": "/migrations/versions/57be38c1806e_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 57be38c1806e\nRevises: 6a2ac421f56\nCreate Date: 2016-02-24 16:41:00.918666\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '57be38c1806e'\ndown_revision = '6a2ac421f56'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('credit_apply', sa.Column('body_choice_ids', sa.String(length=100), nullable=True))\n op.add_column('credit_apply', sa.Column('body_choice_text', sa.String(length=100), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('credit_apply', 'body_choice_text')\n op.drop_column('credit_apply', 'body_choice_ids')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.48721617460250854, "alphanum_fraction": 0.4909276068210602, "avg_line_length": 32.89871597290039, "blob_id": "c77f6945983362ac406ab4d73257ade24d7afabe", "content_id": "a9dd2c17a9290d22fef7b0c2b4b4b5d5f002f326", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 119776, "license_type": "no_license", "max_line_length": 240, "num_lines": 3505, "path": "/static/admin/js/admin.js", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\nvar app = angular.module(\"MyApp\",['ui.router', 'ngProgress', 'ngDialog', 'angularInlineEdit', 'ui-notification', 'ui.bootstrap', 'ui.bootstrap.datetimepicker', 'aMap', 'localytics.directives', 'ui.sortable', 'textAngular', 'toggle-switch'])\n\n\napp.factory('authorization', ['$q', '$http', '$timeout', '$state',\n function($q, $http, $timeout, $state) {\n var _identity = undefined,\n _authenticated = false;\n return {\n isLoggedin: function() {\n return angular.isDefined(_identity);\n },\n isAuthenticated: function() {\n return Boolean(getCookie('token'));\n },\n goSignin: function () {\n $state.go('signin');\n },\n logout: function () {\n console.log('logout');\n\n $state.go('signin');\n },\n getName: function() {\n return $cookieStore.get('name');\n },\n getRole: function() {\n return $cookieStore.get('role');\n },\n authenticate: function(identity) {\n _identity = identity;\n _authenticated = identity != null;\n }\n };\n }\n ])\n\napp.config(\n function setupConfig( $httpProvider ) {\n\n $httpProvider.interceptors.push( interceptHttp);\n\n function interceptHttp( $q) {\n return({\n response: response,\n responseError: responseError\n });\n \n function response( response ) {\n console.log(response);\n console.log('intercept');\n console.log(response.headers()['content-type']);\n if(response.headers()['content-type'] === \"application/json; charset=utf-8\"){\n console.log(response);\n if (response.data.code==-1) {\n console.log('goto login');\n window.location='/admin/#/signin';\n }\n }\n return( response );\n }\n function responseError( response ) {\n return( $q.reject( response ) );\n }\n }\n }\n);\n\napp\n.controller('appController', ['$http', '$scope', '$state', 'ngProgress', 'Notification', function($http, $scope,$state,ngProgress,Notification) {\n window.progress = ngProgress;\n window.notification = Notification;\n window.progress.color('#2a6496');\n window.app_scope = $scope;\n $scope.extra = {};\n var city_id = getCookie('city_id');\n if(city_id) {\n $scope.extra.city_id = parseInt(city_id);\n }\n $scope.$watch('extra.city_id', function(old, newval) {\n if(old!=newval) {\n\n console.log('change city');\n function city_callback(data) {\n if(data.code==0) {\n notification.primary(data.msg);\n } else {\n notification.error(data.msg)\n }\n $state.reload();\n }\n console.log('change city');\n $http.post('/admin/change_city/', {'city_id':$scope.extra.city_id}\n ).success(city_callback);\n }\n }, true\n );\n $scope.token = getCookie('token');\n $scope.name = getCookie('name');\n $scope.loginForm = {\n 'name':'',\n 'passwd':''\n };\n $scope.isRoot = function () {\n var role = getCookie('cat');\n if(!role) { return false }\n role = parseInt(role)\n return role==0\n }\n $scope.isEditor = function () {\n var role = getCookie('cat');\n if(!role) { return false }\n role = parseInt(role)\n return role==1;\n }\n $scope.isPromoter = function () {\n var role = getCookie('cat');\n if(!role) { return false }\n role = parseInt(role)\n return role==2;\n }\n $scope.isMarketer = function () {\n var role = getCookie('cat');\n if(!role) { return false }\n role = parseInt(role)\n return role==3;\n }\n $scope.isTester = function () {\n var role = getCookie('cat');\n if(!role) { return false }\n role = parseInt(role)\n return role==4;\n }\n $scope.isCustom = function () {\n var role = getCookie('cat');\n if(!role) { return false }\n role = parseInt(role)\n return role==5;\n }\n \n \n \n function callback(data) {\n console.log('call');\n if(data.code==0) {\n $state.go('index');\n }else {\n notification.error(data.msg||'用户名或密码错误');\n }\n };\n $scope.login = function() {\n $http.post('/admin/login/', $scope.loginForm)\n .success(callback);\n };\n $http.get('/admin/refresh_qntoken/');\n}])\n\n\napp.config(function($stateProvider, $urlRouterProvider){\n \n console.log('admin config');\n\n $urlRouterProvider.otherwise('/index');\n $stateProvider\n .state('signin',{\n url: '/signin',\n templateUrl: '/static/admin/tpl/signin.html?version=13',\n controller: ''\n })\n .state('index',{\n url: '/index',\n templateUrl: '/static/admin/tpl/index.html?version=13',\n controller: 'IndexCtrl'\n })\n .state('index.city_list',{\n url: '/city_list',\n templateUrl: '/static/admin/tpl/city_list.html?version=13',\n controller: 'CityListCtrl'\n })\n .state('index.new_city',{\n url: '/new_city',\n templateUrl: '/static/admin/tpl/city_edit.html?version=13',\n controller: 'NewCityCtrl'\n })\n .state('index.new_period_pay_choice',{\n url: '/new_period_pay_choice',\n templateUrl: '/static/admin/tpl/new_period_pay_choice.html?version=13',\n controller: 'NewPeriodPayChoiceCtrl'\n })\n .state('index.item_list',{\n url: '/item_list?page&hospital_id&keyword&sub_cat_id&activity_id&is_recommend',\n templateUrl: '/static/admin/tpl/item_list.html?version=13',\n controller: 'ItemListCtrl'\n })\n .state('index.trial_list',{\n url: '/trial_list?page',\n templateUrl: '/static/admin/tpl/trial_list.html?version=13',\n controller: 'TrialListCtrl'\n })\n .state('index.new_item',{\n url: '/new_item',\n templateUrl: '/static/admin/tpl/item_edit.html?version=13',\n controller: 'ItemEditCtrl'\n })\n .state('index.new_trial',{\n url: '/new_trial',\n templateUrl: '/static/admin/tpl/trial_edit.html?version=13',\n controller: 'TrialEditCtrl'\n })\n .state('index.item_edit',{\n url: '/item_edit/:item_id',\n templateUrl: '/static/admin/tpl/item_edit.html?version=13',\n controller: 'ItemEditCtrl'\n })\n .state('index.trial_edit',{\n url: '/trial_edit/:item_id',\n templateUrl: '/static/admin/tpl/trial_edit.html?version=13',\n controller: 'TrialEditCtrl'\n })\n .state('index.new_coupon',{\n url: '/new_coupon',\n templateUrl: '/static/admin/tpl/coupon_edit.html?version=13',\n controller: 'CouponEditCtrl'\n })\n .state('index.new_tutorial',{\n url: '/new_tutorial',\n templateUrl: '/static/admin/tpl/tutorial_edit.html?version=13',\n controller: 'TutorialEditCtrl'\n })\n .state('index.new_daily_coupon',{\n url: '/new_daily_coupon',\n templateUrl: '/static/admin/tpl/daily_coupon_edit.html?version=13',\n controller: 'DailyCouponEditCtrl'\n })\n .state('index.city_edit',{\n url: '/city_edit/:item_id',\n templateUrl: '/static/admin/tpl/city_edit.html?version=13',\n controller: 'CityEditCtrl'\n })\n .state('index.daily_coupon_edit',{\n url: '/daily_coupon_edit/:item_id',\n templateUrl: '/static/admin/tpl/daily_coupon_edit.html?version=13',\n controller: 'DailyCouponEditCtrl'\n })\n .state('index.coupon_edit',{\n url: '/coupon_edit/:item_id',\n templateUrl: '/static/admin/tpl/coupon_edit.html?version=13',\n controller: 'CouponEditCtrl'\n })\n .state('index.tutorial_edit',{\n url: '/tutorial_edit/:item_id',\n templateUrl: '/static/admin/tpl/tutorial_edit.html?version=13',\n controller: 'TutorialEditCtrl'\n })\n .state('index.school_list',{\n url: '/school_list?page&city_name',\n templateUrl: '/static/admin/tpl/school_list.html?version=13',\n controller: 'SchoolListCtrl'\n })\n .state('index.cat_list',{\n url: '/cat_list?page',\n templateUrl: '/static/admin/tpl/cat_list.html?version=13',\n controller: 'CatListCtrl'\n })\n .state('index.subcat_list',{\n url: '/subcat_list?page&cat_id&is_recommend',\n templateUrl: '/static/admin/tpl/subcat_list.html?version=13',\n controller: 'SubcatListCtrl'\n })\n .state('index.hospital_list',{\n url: '/hospital_list?page&keyword&is_recommend',\n templateUrl: '/static/admin/tpl/hospital_list.html?version=13',\n controller: 'HospitalListCtrl'\n })\n .state('index.tutorial_list',{\n url: '/tutorial_list?page&keyword',\n templateUrl: '/static/admin/tpl/tutorial_list.html?version=13',\n controller: 'TutorialListCtrl'\n })\n .state('index.new_hospital',{\n url: '/new_hospital',\n templateUrl: '/static/admin/tpl/hospital_edit.html?version=13',\n controller: 'HospitalEditCtrl'\n })\n .state('index.hospital_edit',{\n url: '/hospital_edit/:item_id',\n templateUrl: '/static/admin/tpl/hospital_edit.html?version=13',\n controller: 'HospitalEditCtrl'\n })\n .state('index.apply_list',{\n url: '/apply_list?page&apply_status',\n templateUrl: '/static/admin/tpl/apply_list.html?version=13',\n controller: 'ApplyListCtrl'\n })\n .state('index.activity_list',{\n url: '/activity_list?page',\n templateUrl: '/static/admin/tpl/activity_list.html?version=13',\n controller: 'ActivityListCtrl'\n })\n .state('index.order_list',{\n url: '/order_list?page&keyword&hospital_id&sub_cat_id&order_status',\n templateUrl: '/static/admin/tpl/order_list.html?version=13',\n controller: 'OrderListCtrl'\n })\n .state('index.user_list',{\n url: '/user_list?page&keyword&promoter_id',\n templateUrl: '/static/admin/tpl/user_list.html?version=13',\n controller: 'UserListCtrl'\n })\n .state('index.promoter_list',{\n url: '/promoter_list?page&keyword',\n templateUrl: '/static/admin/tpl/promoter_list.html?version=13',\n controller: 'PromoterListCtrl'\n })\n .state('index.hospital_user_list',{\n url: '/hospital_user_list?page&hospital_id',\n templateUrl: '/static/admin/tpl/hospital_user_list.html?version=13',\n controller: 'HospitalUserListCtrl'\n })\n .state('index.coupon_list',{\n url: '/coupon_list?page',\n templateUrl: '/static/admin/tpl/coupon_list.html?version=13',\n controller: 'CouponListCtrl'\n })\n .state('index.question_list',{\n url: '/question_list?page',\n templateUrl: '/static/admin/tpl/question_list.html?version=13',\n controller: 'QuestionListCtrl'\n })\n .state('index.user_question_list',{\n url: '/user_question_list?page&is_random&_sort',\n templateUrl: '/static/admin/tpl/user_question_list.html?version=13',\n controller: 'UserQuestionListCtrl'\n })\n .state('index.daily_coupon_list',{\n url: '/daily_coupon_list?page',\n templateUrl: '/static/admin/tpl/daily_coupon_list.html?version=13',\n controller: 'DailyCouponListCtrl'\n })\n .state('index.advice_list',{\n url: '/advice_list?page',\n templateUrl: '/static/admin/tpl/advice_list.html?version=13',\n controller: 'AdviceListCtrl'\n })\n .state('index.room_list',{\n url: '/room_list?page&_sort',\n templateUrl: '/static/admin/tpl/room_list.html?version=13',\n controller: 'RoomListCtrl'\n })\n .state('index.period_pay_choice_list',{\n url: '/period_pay_choice_list?page',\n templateUrl: '/static/admin/tpl/period_pay_choice_list.html?version=13',\n controller: 'PeriodPayChoiceListCtrl'\n })\n .state('index.apply_detail',{\n url: '/apply_detail/:apply_id',\n templateUrl: '/static/admin/tpl/apply_detail.html?version=13',\n controller: 'ApplyDetailCtrl'\n })\n .state('index.advice_detail',{\n url: '/advice_detail/:advice_id',\n templateUrl: '/static/admin/tpl/advice_detail.html?version=13',\n controller: 'AdviceDetailCtrl'\n })\n .state('index.item_recommend_edit',{\n url: '/item_recommend_edit/:item_id',\n templateUrl: '/static/admin/tpl/item_recommend_edit.html?version=13',\n controller: 'ItemRecommendEditCtrl'\n })\n .state('index.item_activity_edit',{\n url: '/item_activity_edit/:item_id',\n templateUrl: '/static/admin/tpl/item_activity_edit.html?version=13',\n controller: 'ItemActivityEditCtrl'\n })\n .state('index.subcat_recommend_edit',{\n url: '/subcat_recommend_edit/:item_id',\n templateUrl: '/static/admin/tpl/subcat_recommend_edit.html?version=13',\n controller: 'SubcatRecommendEditCtrl'\n })\n .state('index.hospital_recommend_edit',{\n url: '/hospital_recommend_edit/:item_id',\n templateUrl: '/static/admin/tpl/hospital_recommend_edit.html?version=13',\n controller: 'HospitalRecommendEditCtrl'\n })\n .state('index.user_detail',{\n url: '/user_detail/:item_id',\n templateUrl: '/static/admin/tpl/user_detail.html?version=13',\n controller: 'UserDetailCtrl'\n })\n .state('index.new_itemcat',{\n url: '/new_itemcat',\n templateUrl: '/static/admin/tpl/itemcat_edit.html?version=13',\n controller: 'ItemCatEditCtrl'\n })\n .state('index.new_activity',{\n url: '/new_activity',\n templateUrl: '/static/admin/tpl/activity_edit.html?version=13',\n controller: 'ActivityEditCtrl'\n })\n .state('index.activity_edit',{\n url: '/activity_edit/:item_id',\n templateUrl: '/static/admin/tpl/activity_edit.html?version=13',\n controller: 'ActivityEditCtrl'\n })\n .state('index.new_itemsubcat',{\n url: '/new_itemsubcat?cat_id',\n templateUrl: '/static/admin/tpl/itemsubcat_edit.html?version=13',\n controller: 'ItemSubcatEditCtrl'\n })\n .state('index.itemcat_edit',{\n url: '/itemcat_edit/:item_id',\n templateUrl: '/static/admin/tpl/itemcat_edit.html?version=13',\n controller: 'ItemCatEditCtrl'\n })\n .state('index.itemsubcat_edit',{\n url: '/itemsubcat_edit/:item_id',\n templateUrl: '/static/admin/tpl/itemsubcat_edit.html?version=13',\n controller: 'ItemSubcatEditCtrl'\n })\n .state('index.period_pay_log_list', {\n url: '/period_pay_log_list?keyword&page&is_delayed',\n templateUrl: '/static/admin/tpl/period_pay_log_list.html?version=13',\n controller: 'PeriodPayLogListCtrl'\n })\n .state('index.trial_detail', {\n url: '/trial_detail?page&item_id',\n templateUrl: '/static/admin/tpl/trial_detail.html?version=13',\n controller: 'TrialDetailCtrl'\n })\n .state('index.daily_detail', {\n url: '/daily_detail?page&item_id',\n templateUrl: '/static/admin/tpl/daily_detail.html?version=13',\n controller: 'DailyDetailCtrl'\n })\n .state('index.send_user_coupon', {\n url: '/send_user_coupon?&phone',\n templateUrl: '/static/admin/tpl/send_user_coupon.html?version=13',\n controller: 'SendUserCouponCtrl'\n })\n .state('index.user_vcode', {\n url: '/user_vcode',\n templateUrl: '/static/admin/tpl/user_vcode.html?version=13',\n controller: 'UserVcodeCtrl'\n })\n .state('index.new_question',{\n url: '/new_question',\n templateUrl: '/static/admin/tpl/question_edit.html?version=13',\n controller: 'QuestionEditCtrl'\n })\n .state('index.room_detail',{\n url: '/room_detail/:room_id',\n templateUrl: '/static/admin/tpl/room_detail.html?version=13',\n controller: 'RoomDetailCtrl'\n })\n });\n \n /*global angular */\n(function (ng) {\n 'use strict';\n\n app.directive('script', function() {\n return {\n restrict: 'E',\n scope: false,\n link: function(scope, elem, attr) {\n if (attr.type === 'text/javascript-lazy') {\n var code = elem.text();\n var f = new Function(code);\n console.log(code);\n f();\n }\n }\n };\n });\n\n}(angular));\n\n\napp.controller('IndexCtrl', ['$scope', '$http', '$state',function($scope, $http, $state) {\n if(!Boolean(getCookie('token'))) {\n $state.go('signin');\n }\n}])\n\n\napp.controller('CityListCtrl', ['$scope', '$http', function($scope, $http) {\n $scope.infos = [];\n function callback(data) {\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n }\n $scope.getPage = function(page) {\n $http.get('/admin/get_city_list',\n {params: {page:page}})\n .success(callback);\n }\n $scope.getPage(1);\n}])\n\n\napp.controller('ItemListCtrl', ['$scope', '$http', '$state', '$stateParams', function($scope, $http, $state, $stateParams) {\n $scope.infos = [];\n var filters = angular.copy($stateParams);\n if ($stateParams['sub_cat_id']) { //整型字段\n filters['sub_cat_id'] = parseInt($stateParams['sub_cat_id']);\n }\n if ($stateParams['hospital_id']) { //整型字段\n filters['hospital_id'] = parseInt($stateParams['hospital_id']);\n }\n if ($stateParams['activity_id']) {\n filters['activity_id'] = parseInt($stateParams['activity_id']);\n }\n if ($stateParams['is_recommend']=='1') {\n filters['is_recommend'] = 1;\n }\n $scope.filters = filters;\n $scope.currentpage = $stateParams.page || 1 ;\n function callback(data) {\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n }\n $scope.refresh = function() {\n $http.get('/admin/get_item_list',\n {params: $stateParams})\n .success(callback);\n }\n $scope.removeItemActivity = function(item_id) {\n progress.start()\n function del_callback(data) {\n progress.complete();\n if(data.code==0) {\n notification.primary(data.msg);\n $state.reload();\n } else {\n notification.error(data.msg);\n }\n }\n $http.post('/admin/del_item_activity/',\n {'item_id':item_id})\n .success(del_callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n var is_recommend = undefined;\n if($scope.filters.is_recommend) {\n is_recommend = 1\n }\n params['page'] = page;\n params['keyword'] = $scope.filters.keyword;\n params['sub_cat_id'] = $scope.filters.sub_cat_id;\n params['hospital_id'] = $scope.filters.hospital_id;\n params['activity_id'] = $scope.filters.activity_id;\n params['is_recommend'] = is_recommend;\n console.log(params);\n return $state.go('index.item_list', params);\n }\n $scope.reset = function() {\n var params = {}\n params['page'] = 1;\n params['keyword'] = undefined;\n params['sub_cat_id'] = undefined;\n params['hospital_id'] = undefined;\n params['activity_id'] = undefined;\n params['is_recommend'] = undefined;\n return $state.go('index.item_list', params);\n }\n $scope.topRecommend = function(item_id) { //推荐置顶\n function top_callback(response) {\n progress.complete();\n console.log(response);\n if(response.code>0) {\n notification.error(response.msg);\n } else {\n notification.primary(response.msg);\n $state.reload();\n }\n }\n progress.start()\n $http.post('/admin/top_recommend_item/',\n {'item_id':item_id})\n .success(top_callback);\n }\n $scope.Recommend = function(item_id, recommend) { //推荐\n function recommend_callback(response) {\n progress.complete();\n console.log(response);\n if(response.code>0) {\n notification.error(response.msg);\n } else {\n notification.primary(response.msg);\n $state.reload();\n }\n }\n progress.start()\n $http.post('/admin/recommend_item/',\n {'item_id':item_id, 'recommend':recommend})\n .success(recommend_callback);\n }\n $scope.Online = function(item_id, status) { //上下线\n var msg = '';\n if(status==0) {\n msg = '确认下线吗?';\n } else {\n msg = '确认上线吗?';\n }\n if(confirm(msg)) {\n function online_callback(response) {\n progress.complete();\n if(response.code>0) {\n notification.error(response.msg);\n } else {\n notification.primary(response.msg);\n $state.reload();\n }\n }\n progress.start()\n $http.post('/admin/set_item_status/',\n {'item_id':item_id, 'status':status})\n .success(online_callback);\n }\n }\n\n $scope.refresh();\n}])\n\n\napp.controller('TrialListCtrl', ['$scope', '$http', '$state', '$stateParams', function($scope, $http, $state, $stateParams) {\n $scope.infos = [];\n var filters = angular.copy($stateParams);\n \n $scope.filters = filters;\n $scope.currentpage = $stateParams.page || 1 ;\n function callback(data) {\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.now = data.now;\n }\n $scope.refresh = function() {\n $http.get('/admin/get_trial_list',\n {params: $stateParams})\n .success(callback);\n }\n\n $scope.routeTo = function(page) {\n var params = {};\n\n params['page'] = page;\n console.log(params);\n return $state.go('index.trial_list', params);\n }\n $scope.reset = function() {\n var params = {}\n params['page'] = 1;\n params['keyword'] = undefined;\n params['sub_cat_id'] = undefined;\n params['activity_id'] = undefined;\n params['is_recommend'] = undefined;\n return $state.go('index.item_list', params);\n }\n \n $scope.refresh();\n}])\n\napp.controller('SchoolListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n $scope.infos = [];\n $scope.filters = {};\n $scope.currentpage = $stateParams.page || 1;\n if ($stateParams.city_name) {\n $scope.filters['city_name'] = $stateParams.city_name;\n }\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n params = {page:page};\n if ($stateParams.city_name) {\n params['city_name'] = $stateParams.city_name;\n }\n $http.get('/admin/get_school_list',\n {params: params})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n params['city_name'] = $scope.filters.city_name;\n return $state.go('index.school_list', params);\n }\n $scope.getPage($scope.currentpage);\n}])\n\n\napp.controller('CatListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n $scope.infos = [];\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_cat_list',\n {params: {page:page}})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n return $state.go('index.cat_list', params);\n }\n $scope.getPage($scope.currentpage);\n}])\n\n\napp.controller('SubcatListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n //$scope.infos = [];\n $scope.filters = angular.copy($stateParams);\n var link = '#/index/new_itemsubcat';\n if($scope.filters.cat_id) {\n $scope.filters.cat_id = parseInt($scope.filters.cat_id);\n link = '#/index/new_itemsubcat?cat_id='+$scope.filters.cat_id;\n }\n if($stateParams.is_recommend) {\n $scope.filters.is_recommend = 1;\n }\n window.subcatlistscope = $scope;\n $scope.sortableOptions = {\n containment: '#images-list-wrap',\n stop: function(e, vt) {\n console.log('stop drag cat');\n }\n };\n $scope.$on('ngRepeatFinished', function(ngRepeatFinishedEvent) {\n console.log('finish');\n });\n $scope.link = link;\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.refresh = function() {\n progress.start()\n $http.get('/admin/get_subcat_list',\n {params: $stateParams})\n .success(callback);\n }\n $scope.optionsData = [];\n $scope.itemCatids = [];\n $scope.iditemMap = {};\n function cats_callback(data) {\n $scope.optionsData = data.infos; //optionsData在ngitemcat时不起作用?\n for(var i in $scope.optionsData) {\n var cat = $scope.optionsData[i];\n $scope.iditemMap[cat.id] = cat\n $scope.itemCatids.push(cat.id);\n }\n $scope.$watch('itemCatids', function(old, newval) {\n console.log(old)\n console.log(newval)\n if(old.join('')==newval.join('')) {\n return;\n }\n console.log('cat ids list');\n $http.post('/admin/set_cats_order/',\n $scope.itemCatids)\n .success(set_order_callback);\n }, true);\n \n }\n $http.get('/admin/get_cat_list')\n .success(cats_callback);\n\n function set_order_callback (data) {\n if(data.code==0) {\n notification.primary(data.msg)\n } else {\n notification.error(data.msg)\n }\n }\n \n\n $scope.setStatus = function (item_id, status) {\n progress.start()\n $http.post('/admin/subcat/set_status/',\n {'subcat_id':item_id, 'status':status})\n .success(post_callback);\n }\n function post_callback(data) {\n console.log('callback');\n progress.complete()\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n $state.reload(); //state.go not working here! the same route not working here\n }\n }\n $scope.choose = function (cat_id) {\n if ($scope.filters.cat_id==cat_id) {\n $scope.filters.cat_id = undefined;\n } else {\n $scope.filters.cat_id = cat_id;\n }\n console.log($scope.filters);\n console.log(cat_id);\n $scope.routeTo(1);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n params['cat_id'] = $scope.filters.cat_id;\n var is_recommend = undefined;\n if($scope.filters.is_recommend) {\n is_recommend = 1;\n }\n params['is_recommend'] = is_recommend;\n console.log(params);\n return $state.go('index.subcat_list', params);\n }\n $scope.refresh();\n $scope.Recommend = function(item_id, recommend) { //推荐\n function recommend_callback(response) {\n progress.complete();\n console.log(response);\n if(response.code>0) {\n notification.error(response.msg);\n } else {\n notification.primary(response.msg);\n $state.reload();\n }\n }\n progress.start()\n $http.post('/admin/recommend_subcat/',\n {'item_id':item_id, 'recommend':recommend})\n .success(recommend_callback);\n }\n}])\n\n\napp.controller('HospitalListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n $scope.infos = [];\n $scope.filters = {};\n if($stateParams.keyword) {\n $scope.filters.keyword = $stateParams.keyword;\n }\n if ($stateParams['is_recommend']=='1') {\n $scope.filters['is_recommend'] = 1;\n }\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_hospital_list',\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n if($scope.filters.is_recommend) {\n params['is_recommend'] = 1;\n } else {\n params['is_recommend'] = undefined;\n }\n params['keyword'] = $scope.filters.keyword;\n return $state.go('index.hospital_list', params);\n }\n $scope.reset = function(page) {\n var params = {};\n params['page'] = 1;\n params['keyword'] = undefined;\n params['is_recommend'] = undefined;\n return $state.go('index.hospital_list', params);\n }\n $scope.Online = function(item_id, status) { //上下线\n var msg = '';\n if(status==0) {\n msg = '确认下线吗?';\n } else {\n msg = '确认上线吗?';\n }\n if(confirm(msg)) {\n function online_callback(response) {\n progress.complete();\n if(response.code>0) {\n notification.error(response.msg);\n } else {\n notification.primary(response.msg);\n $state.reload();\n }\n }\n progress.start()\n $http.post('/admin/set_hospital_status/',\n {'item_id':item_id, 'status':status})\n .success(online_callback);\n }\n }\n $scope.Recommend = function(item_id, recommend) { //推荐\n function recommend_callback(response) {\n progress.complete();\n console.log(response);\n if(response.code>0) {\n notification.error(response.msg);\n } else {\n notification.primary(response.msg);\n $state.reload();\n }\n }\n progress.start()\n $http.post('/admin/recommend_hospital/',\n {'item_id':item_id, 'recommend':recommend})\n .success(recommend_callback);\n }\n $scope.getPage($scope.currentpage);\n}])\n\napp.controller('TutorialListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n $scope.infos = [];\n $scope.filters = {};\n if($stateParams.keyword) {\n $scope.filters.keyword = $stateParams.keyword;\n }\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_tutorial_list',\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n params['keyword'] = $scope.filters.keyword;\n return $state.go('index.tutorial_list', params);\n }\n $scope.reset = function(page) {\n var params = {};\n params['page'] = 1;\n params['keyword'] = undefined;\n return $state.go('index.tutorial_list', params);\n }\n $scope.Online = function(item_id, status) { //上下线\n var msg = '';\n if(status==0) {\n msg = '确认下线吗?';\n } else {\n msg = '确认上线吗?';\n }\n if(confirm(msg)) {\n function online_callback(response) {\n progress.complete();\n if(response.code>0) {\n notification.error(response.msg);\n } else {\n notification.primary(response.msg);\n $state.reload();\n }\n }\n progress.start()\n $http.post('/admin/set_tutorial_status/',\n {'item_id':item_id, 'status':status})\n .success(online_callback);\n }\n }\n $scope.getPage($scope.currentpage);\n}])\n\n\n//逾期分期列表\napp.controller('PeriodPayLogListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n $scope.infos = [];\n $scope.filters = {};\n if ($stateParams.keyword) {\n $scope.filters.keyword = $stateParams.keyword;\n }\n if ($stateParams.is_delayed=='true') {\n $scope.filters.is_delayed = 'true';\n }\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_period_pay_log_list',\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n params['keyword'] = $scope.filters.keyword;\n params['is_delayed']= $scope.filters.is_delayed;\n return $state.go('index.period_pay_log_list', params);\n }\n $scope.reset = function(page) {\n var params = {};\n params['page'] = 1;\n params['is_delayed']= undefined;\n params['keyword'] = undefined;\n return $state.go('index.period_pay_log_list', params);\n }\n $scope.getPage($scope.currentpage);\n}])\n\n\napp.controller('ActivityListCtrl', ['$scope', '$http', '$stateParams', '$state', 'ngDialog', function($scope, $http, $stateParams, $state, ngDialog) {\n $scope.infos = [];\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_activity_list',\n {params: {page:page}})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n return $state.go('index.activity_list', params);\n }\n $scope.addActivityItem = function (activity_id) { //弹出框编辑活动商品列表\n progress.start()\n function get_list_ids(list){\n var ids = [];\n for(var i in list) {\n ids.push(list[i].id);\n }\n return ids;\n }\n $http.get('/admin/get_activity_items/?activity_id='+String(activity_id))\n .success(function(data) {\n progress.complete();\n window.s= $scope;\n $scope.choices = data.infos;\n var selected = []\n for(var i in $scope.choices) {\n item = $scope.choices[i]\n if(item.selected) {\n selected.push(item);\n }\n }\n $scope.selected = selected;\n ngDialog.open({\n template: '/static/admin/tpl/activity_items.html?version=13',\n scope: $scope,\n controller: ['$scope', function($scope) {\n \n $scope.settings = {enableSearch: true};\n \n //$scope.customFilter = '';\n \n $scope.Cancel = function () {\n $scope.closeThisDialog();\n }\n \n $scope.Ok = function() {\n function post_callback(data) {\n progress.complete();\n if(data.code==0) {\n notification.primary(data.msg);\n } else {\n notification.error(data.msg);\n }\n $scope.closeThisDialog();\n }\n var data = {\n activity_id: activity_id,\n ids: get_list_ids($scope.selected)\n }\n progress.start();\n $http.post('/admin/set_activity_items/',\n data)\n .success(post_callback);\n }\n }]\n })\n }\n );\n }\n $scope.getPage($scope.currentpage);\n}])\n\n\napp.controller('OrderListCtrl', ['$scope', '$http', '$stateParams', '$state', 'ngDialog', function($scope, $http, $stateParams, $state, ngDialog) {\n $scope.infos = [];\n $scope.filters = {};\n $scope.currentpage = $stateParams.page || 1;\n window.s = $scope;\n if($stateParams.hospital_id) {\n $scope.filters['hospital_id'] = parseInt($stateParams.hospital_id);\n }\n if($stateParams.sub_cat_id) {\n $scope.filters['sub_cat_id'] = parseInt($stateParams.sub_cat_id);\n }\n if($stateParams.order_status) {\n $scope.filters['order_status'] = parseInt($stateParams.order_status);\n }\n if($stateParams.keyword) {\n $scope.filters['keyword'] = $stateParams.keyword;\n }\n $scope.order_status_choices = [\n ];\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n $scope.order_status_choices = data.order_status_choices;\n }\n $scope.refresh = function() {\n progress.start()\n $http.get('/admin/get_order_list',\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n params['order_status'] = $scope.filters.order_status;\n params['keyword'] = $scope.filters.keyword;\n params['sub_cat_id'] = $scope.filters.sub_cat_id;\n params['hospital_id'] = $scope.filters.hospital_id;\n return $state.go('index.order_list', params);\n }\n $scope.chooseStatus = function (status) {\n if($scope.filters.order_status==status) {\n $scope.filters.order_status = undefined;\n } else {\n $scope.filters.order_status = status;\n }\n $scope.routeTo(1);\n }\n $scope.refund = function(order_id) {//退款\n progress.start();\n $http.get('/admin/get_refund_detail/?order_id='+order_id)\n .success( function (data) {\n progress.complete();\n if(data.code==0) {\n $scope.price = data.price;\n $scope.repayment_amount = data.repayment_amount;\n $scope.has_alipay = data.has_alipay;\n ngDialog.open({\n template: '/static/admin/tpl/refund_dialog.html?version=13',\n scope: $scope,\n controller: ['$scope', function($scope) {\n $scope.Cancel = function () {\n $scope.closeThisDialog();\n }\n $scope.Ok = function() {\n function post_callback(data) {\n \n progress.complete();\n if(data.code==0) {\n notification.primary(data.msg);\n } else {\n notification.error(data.msg);\n }\n $scope.closeThisDialog();\n if(!data.has_alipay) {\n $state.reload();\n } else {\n window.location = data.link;\n }\n }\n var data = {\n order_id: order_id\n }\n progress.start();\n $http.post('/admin/refund_order/',\n data)\n .success(post_callback);\n }\n }]\n })\n } else {\n notification.error(data.msg)\n }\n });\n }\n $scope.remark = function(info) {//备注\n $scope.order_id = info.id;\n $scope.remark_text = info.remark\n ngDialog.open({\n template: '/static/admin/tpl/remark_dialog.html?version=13',\n scope: $scope,\n controller: ['$scope', function($scope) {\n $scope.Cancel = function () {\n $scope.closeThisDialog();\n }\n $scope.Ok = function() {\n function post_callback(data) {\n \n progress.complete();\n if(data.code==0) {\n notification.primary(data.msg);\n $state.reload()\n } else {\n notification.error(data.msg);\n }\n $scope.closeThisDialog();\n }\n var data = {\n order_id: info.id,\n remark: $scope.remark_text\n }\n progress.start();\n $http.post('/admin/remark_order/',\n data)\n .success(post_callback);\n }\n }]\n })\n }\n $scope.reset = function() {\n var params = {\n page: 1,\n keyword: undefined,\n order_status: undefined,\n hospital_id: undefined,\n sub_cat_id: undefined\n }\n return $state.go('index.order_list', params);\n }\n $scope.refresh();\n}])\n\n\n\napp.controller('CouponListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n $scope.infos = [];\n $scope.filters = {};\n\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_coupon_list/',\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n return $state.go('index.coupon_list', params);\n }\n $scope.reset = function() {\n var params = {};\n params['page'] = 1;\n return $state.go('index.coupon_list', params); \n }\n $scope.getPage($scope.currentpage);\n}])\n\n\napp.controller('QuestionListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n $scope.infos = [];\n $scope.filters = {};\n\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_question_list/',\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n return $state.go('index.question_list', params);\n }\n $scope.Online = function(item_id, status) { //上下线\n var msg = '';\n if(status==0) {\n msg = '确认下线吗?';\n } else {\n msg = '确认上线吗?';\n }\n if(confirm(msg)) {\n function online_callback(response) {\n progress.complete();\n if(response.code>0) {\n notification.error(response.msg);\n } else {\n notification.primary(response.msg);\n $state.reload();\n }\n }\n progress.start()\n $http.post('/admin/set_question_status/',\n {'item_id':item_id, 'status':status})\n .success(online_callback);\n }\n }\n $scope.getPage($scope.currentpage);\n}])\n\n\napp.controller('UserQuestionListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n $scope.infos = [];\n $scope.filters = {};\n $scope.question_choices = [\n {'id':0,'name':'固定问题'},\n {'id':1,'name':'自定义问题'},\n ]\n if($stateParams.is_random) {\n $scope.filters.is_random = parseInt($stateParams.is_random);\n }\n if($stateParams._sort) {\n $scope.filters._sort = $stateParams._sort;\n }\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n $scope.total_money = data.total_money;\n $scope.total_redpack = data.total_redpack;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_user_question_list/',\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n if($scope.filters.is_random==1||$scope.filters.is_random==0) {\n params['is_random'] = $scope.filters.is_random;\n } else {\n params['is_random'] = undefined;\n }\n if($scope.filters._sort) {\n params['_sort'] = $scope.filters._sort;\n }\n return $state.go('index.user_question_list', params);\n }\n $scope.sortQuestion = function (field) {\n $scope.filters._sort = field;\n $scope.routeTo(1)\n }\n $scope.getPage($scope.currentpage);\n}])\n\n\napp.controller('DailyCouponListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n $scope.infos = [];\n $scope.filters = {};\n\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_daily_coupon_list/',\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n return $state.go('index.daily_coupon_list', params);\n }\n $scope.reset = function() {\n var params = {};\n params['page'] = 1;\n return $state.go('index.daily_coupon_list', params); \n }\n $scope.getPage($scope.currentpage);\n}])\n\n\n\napp.filter('rawHtml', ['$sce', function($sce){\n return function(val) {\n return $sce.trustAsHtml(val);\n };\n}]);\napp.controller('TrialDetailCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n $scope.infos = [];\n\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.item = data.item;\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n\n //赠送申请\n $scope.approve = function (apply_id) {\n progress.start();\n var params = {}\n params['apply_id'] = apply_id;\n params['item_id'] = item_id;\n $http.post('/admin/send_trial/',\n params)\n .success(function (data) {\n progress.complete();\n if(data.code==0) {\n notification.primary(data.msg);\n $state.reload();\n } else {\n notification.error(data.msg);\n }\n });\n }\n\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/trial_applyer_list/?item_id='+item_id,\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n return $state.go('index.trial_detail', params);\n }\n $scope.reset = function() {\n var params = {};\n params['page'] = 1;\n params['item_id'] = item_id;\n return $state.go('index.trial_detail', params); \n }\n $scope.getPage($scope.currentpage);\n}])\n\n\n//每日优惠券详情\napp.controller('DailyDetailCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n $scope.infos = [];\n\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.item = data.item;\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/daily_applyer_list/?item_id='+item_id,\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n return $state.go('index.daily_detail', params);\n }\n\n $scope.getPage($scope.currentpage);\n}])\n\n\n//短信验证码\napp.controller('UserVcodeCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n\n $scope.phone = '';\n function callback(data) {\n progress.complete();\n if (data.code==0) {\n $scope.vcode = data.vcode;\n $scope.count = data.count;\n } else {\n notification.error(data.msg);\n }\n }\n\n //获取验证码\n $scope.getVcode = function (cat) {\n\n var params = {}\n params['phone'] = $scope.phone;\n params['cat'] = cat;\n $http.post('/admin/get_user_vcode/',\n params)\n .success(function (data) {\n progress.complete();\n if(data.code==0) {\n $scope.vcode = data.vcode;\n $scope.count = data.count;\n } else {\n notification.error(data.msg);\n }\n });\n }\n\n //重置短信发送次数\n $scope.resetVcodeSent = function () {\n var params = {};\n params['phone'] = $scope.phone;\n $http.post('/admin/reset_user_vcode/',\n params)\n .success(function (data) {\n progress.complete();\n if(data.code==0) {\n notification.primary(data.msg);\n $state.reload();\n } else {\n notification.error(data.msg);\n }\n });\n }\n\n}])\n\n\napp.controller('UserListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n $scope.infos = [];\n $scope.filters = {};\n if($stateParams.keyword) {\n $scope.filters.keyword = $stateParams.keyword\n }\n if($stateParams.promoter_id) {\n $scope.filters.promoter_id = parseInt($stateParams.promoter_id);\n }\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_user_list',\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n params['keyword'] = $scope.filters.keyword;\n params['promoter_id'] = $scope.filters.promoter_id;\n return $state.go('index.user_list', params);\n }\n $scope.reset = function() {\n var params = {};\n params['page'] = 1;\n params['keyword'] = undefined\n params['promoter_id'] = undefined\n return $state.go('index.user_list', params); \n }\n $scope.getPage($scope.currentpage);\n}])\n\n\napp.controller('PromoterListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n $scope.infos = [];\n $scope.filters = {};\n if($stateParams.keyword) {\n $scope.filters.keyword = $stateParams.keyword\n }\n\n $scope.tmp = undefined;\n $scope.togglePromoter = function () {\n if($scope.tmp) {\n $scope.tmp = undefined;\n } else {\n $scope.tmp = {}\n }\n }\n $scope.addPromoter = function() {\n progress.start();\n $http.post('/admin/add_promoter/',\n $scope.tmp)\n .success(function (data) {\n if(data.code==0) {\n console.log(data);\n $state.reload();\n } else {\n notification.error(data.msg||'服务器异常');\n }\n progress.complete();\n });\n }\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_promoter_list',\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n params['keyword'] = $scope.filters.keyword;\n return $state.go('index.promoter_list', params);\n }\n $scope.reset = function() {\n var params = {};\n params['page'] = 1;\n params['keyword'] = undefined\n return $state.go('index.promoter_list', params); \n }\n $scope.getPage($scope.currentpage);\n}])\n\n\napp.controller('HospitalUserListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n $scope.infos = [];\n $scope.filters = {};\n if($stateParams.hospital_id) {\n $scope.filters.hospital_id = parseInt($stateParams.hospital_id);\n }\n\n $scope.tmp = undefined;\n $scope.toggleUser = function () {\n if($scope.tmp) {\n $scope.tmp = undefined;\n } else {\n $scope.tmp = {}\n }\n }\n $scope.addUser = function() {\n if(!($scope.tmp&&$scope.tmp.hospital_id)) {\n return notification.error('请选择医院');\n }\n progress.start();\n $http.post('/admin/add_hospital_admin/',\n $scope.tmp)\n .success(function (data) {\n if(data.code==0) {\n console.log(data);\n $state.reload();\n } else {\n notification.error(data.msg||'服务器异常');\n }\n progress.complete();\n });\n }\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_hospital_user_list/',\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n params['hospital_id'] = $scope.filters.hospital_id;\n return $state.go('index.hospital_user_list', params);\n }\n $scope.reset = function() {\n var params = {};\n params['page'] = 1;\n params['hospital_id'] = undefined;\n return $state.go('index.hospital_user_list', params); \n }\n $scope.getPage($scope.currentpage);\n}])\n\n\napp.controller('AdviceListCtrl', ['$scope', '$http', '$stateParams', '$state', 'ngDialog', function($scope, $http, $stateParams, $state, ngDialog) {\n $scope.infos = [];\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_advice_list',\n {params: {page:page}})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n return $state.go('index.advice_list', params);\n }\n $scope.getPage($scope.currentpage);\n $scope.remark = function(info) {//备注\n $scope.advice_id = info.id;\n $scope.remark_text = info.remark\n ngDialog.open({\n template: '/static/admin/tpl/remark_advice_dialog.html?version=13',\n scope: $scope,\n controller: ['$scope', function($scope) {\n $scope.Cancel = function () {\n $scope.closeThisDialog();\n }\n $scope.Ok = function() {\n function post_callback(data) {\n \n progress.complete();\n if(data.code==0) {\n notification.primary(data.msg);\n $state.reload()\n } else {\n notification.error(data.msg);\n }\n $scope.closeThisDialog();\n }\n var data = {\n advice_id: info.id,\n remark: $scope.remark_text\n }\n progress.start();\n $http.post('/admin/remark_useradvice/',\n data)\n .success(post_callback);\n }\n }]\n })\n }\n\n}])\n\napp.controller('RoomListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n $scope.infos = [];\n $scope.filters = {};\n if($stateParams._sort) {\n $scope.filters._sort = $stateParams._sort;\n } else {\n $scope.filters._sort = 'vote_count';\n }\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_room_list/',\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n params['_sort'] = $scope.filters._sort;\n return $state.go('index.room_list', params);\n }\n $scope.sortField = function (field) {\n $scope.filters._sort = field;\n $scope.routeTo(1)\n }\n $scope.getPage($scope.currentpage);\n}])\n\napp.controller('PeriodPayChoiceListCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n $scope.infos = [];\n $scope.currentpage = $stateParams.page || 1;\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_period_choice_list',\n {params: {page:page}})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n return $state.go('index.period_pay_choice_list', params);\n }\n $scope.getPage($scope.currentpage);\n}])\n\n\napp.controller('ApplyListCtrl', ['$scope', '$http', '$stateParams', '$state', 'ngDialog', function($scope, $http, $stateParams, $state, ngDialog) {\n $scope.infos = [];\n $scope.currentpage = $stateParams.page || 1;\n $scope.filters = {}\n if($stateParams.apply_status) {\n $scope.filters.apply_status = parseInt($stateParams.apply_status);\n }\n $scope.apply_status_choices = [\n {'id':1, 'title':'已通过'},\n {'id':2, 'title':'被拒绝'},\n {'id':3, 'title':'待审核'},\n {'id':4, 'title':'待补充'},\n ]\n $scope.chooseStatus = function (status) {\n if($scope.filters.apply_status==status) {\n $scope.filters.apply_status = undefined;\n } else {\n $scope.filters.apply_status = status;\n }\n $scope.routeTo(1);\n }\n function callback(data) {\n progress.complete();\n $scope.infos = data.infos;\n $scope.page_info = data.page_info;\n $scope.total = data.total;\n }\n $scope.getPage = function(page) {\n progress.start()\n $http.get('/admin/get_apply_list',\n {params: $stateParams})\n .success(callback);\n }\n $scope.routeTo = function(page) {\n var params = {};\n params['page'] = page||1;\n params['apply_status'] = $scope.filters.apply_status;\n return $state.go('index.apply_list', params);\n }\n $scope.remark = function(info) {//备注\n $scope.apply_id = info.id;\n $scope.remark_text = info.remark\n $scope.remark_img = info.remark_img\n ngDialog.open({\n template: '/static/admin/tpl/remark_apply_dialog.html?version=13',\n scope: $scope,\n controller: ['$scope', function($scope) {\n $scope.Cancel = function () {\n $scope.closeThisDialog();\n }\n $scope.Ok = function() {\n function post_callback(data) {\n \n progress.complete();\n if(data.code==0) {\n notification.primary(data.msg);\n $state.reload()\n } else {\n notification.error(data.msg);\n }\n $scope.closeThisDialog();\n }\n var data = {\n apply_id: info.id,\n remark: $scope.remark_text,\n remark_img: $scope.image\n }\n data = $('#remark-apply-form').serializeObject()\n data['apply_id'] = info.id;\n progress.start();\n $http.post('/admin/remark_apply/',\n data)\n .success(post_callback);\n }\n }]\n })\n }\n $scope.getPage($scope.currentpage);\n}])\n\n//医院编辑\napp.controller('HospitalEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/new_hospital/';\n if(item_id) { action = '/admin/edit_hospital/' + String(item_id) + '/'; }\n $scope.is_edit = Boolean(item_id);\n\n function post_callback(data) {\n console.log('callback');\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n $state.go('index.hospital_list');\n }\n }\n $scope.addItem = function() {\n data = $('#hospitalform').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n $scope.getItem = function(item_id) {\n function get_callback(result) {\n $scope.item = result.data;\n }\n $http.get('/admin/get_hospital/?item_id=' + String(item_id))\n .success(get_callback);\n }\n if(item_id) {\n $scope.getItem(item_id);\n } else {\n $scope.item = {};\n }\n}])\n\n\n//商品编辑\napp.controller('ItemEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/new_item/';\n $scope.textmenu = [\n ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'pre', 'quote'],\n ['bold', 'italics', 'underline', 'strikeThrough', 'ul', 'ol', 'redo', 'undo', 'clear'],\n ['justifyLeft','justifyCenter','justifyRight','justifyFull','indent','outdent'],\n ['html', 'insertImage', 'insertLink', 'insertVideo']\n ];\n if(item_id) { action = '/admin/edit_item/' + String(item_id) + '/'; }\n $scope.is_edit = Boolean(item_id);\n $scope.item = {};\n window.ei = $scope.item;\n function post_callback(data) {\n console.log('callback');\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n $state.go('index.item_list');\n }\n }\n $scope.addItem = function() {\n data = $('#itemform').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n $scope.getItem = function(item_id) {\n function get_callback(result) {\n $scope.item = result.data;\n }\n $http.get('/admin/get_item/?item_id=' + String(item_id))\n .success(get_callback);\n }\n $scope.subcat_choices = [\n {\"id\":1, \"name\":\"双眼皮\"},\n {\"id\":2, \"name\":\"眉毛\"}\n ];\n if(item_id) { $scope.getItem(item_id); }\n}])\n\n\n//试用编辑\napp.controller('TrialEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/new_trial/';\n $scope.textmenu = [\n ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'pre', 'quote'],\n ['bold', 'italics', 'underline', 'strikeThrough', 'ul', 'ol', 'redo', 'undo', 'clear'],\n ['justifyLeft','justifyCenter','justifyRight','justifyFull','indent','outdent'],\n ['html', 'insertImage', 'insertLink', 'insertVideo']\n ];\n if(item_id) { action = '/admin/edit_trial/' + String(item_id) + '/'; }\n $scope.is_edit = Boolean(item_id);\n $scope.item = {};\n function post_callback(data) {\n console.log('callback');\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n $state.go('index.trial_list');\n }\n }\n\n $scope.addItem = function() {\n data = $('#trialform').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n\n $scope.getItem = function(item_id) {\n function get_callback(result) {\n $scope.item = result.data;\n }\n $http.get('/admin/get_trial/?item_id=' + String(item_id))\n .success(get_callback);\n }\n\n if(item_id) { $scope.getItem(item_id); }\n}])\n\n\n\n//优惠券编辑\napp.controller('CouponEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/new_coupon/';\n $scope.textmenu = [\n ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'pre', 'quote'],\n ['bold', 'italics', 'underline', 'strikeThrough', 'ul', 'ol', 'redo', 'undo', 'clear'],\n ['justifyLeft','justifyCenter','justifyRight','justifyFull','indent','outdent'],\n ['html', 'insertImage', 'insertLink', 'insertVideo']\n ];\n if(item_id) { action = '/admin/coupon_edit/' + String(item_id) + '/'; }\n $scope.is_edit = Boolean(item_id);\n $scope.item = {};\n function post_callback(data) {\n console.log('callback');\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n $state.go('index.coupon_list');\n }\n }\n $scope.addItem = function() {\n data = $('#couponform').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n $scope.getItem = function(item_id) {\n function get_callback(result) {\n $scope.item = result.data;\n }\n $http.get('/admin/get_coupon/?item_id=' + String(item_id))\n .success(get_callback);\n }\n\n if(item_id) { $scope.getItem(item_id); }\n}])\n\n\n\n//优惠券编辑\napp.controller('QuestionEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/new_question/';\n\n $scope.item = {};\n\n function post_callback(data) {\n console.log('callback');\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n notification.primary(data.msg);\n $state.go('index.question_list');\n }\n }\n\n $scope.addItem = function() {\n data = $('#questionform').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n\n}])\n\n\n\n//城市编辑\napp.controller('CityEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/new_city/';\n \n if(item_id) { action = '/admin/city_edit/' + String(item_id) + '/'; }\n $scope.is_edit = Boolean(item_id);\n $scope.item = {};\n function post_callback(data) {\n console.log('callback');\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n $state.go('index.city_list');\n }\n }\n $scope.addItem = function() {\n data = $('#cityform').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n $scope.getItem = function(item_id) {\n function get_callback(result) {\n $scope.item = result.data;\n }\n $http.get('/admin/get_city/?item_id=' + String(item_id))\n .success(get_callback);\n }\n\n if(item_id) { $scope.getItem(item_id); }\n}])\n\n\n\n//美攻略编辑\napp.controller('TutorialEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/new_tutorial/';\n $scope.textmenu = [\n ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'pre', 'quote'],\n ['bold', 'italics', 'underline', 'strikeThrough', 'ul', 'ol', 'redo', 'undo', 'clear'],\n ['justifyLeft','justifyCenter','justifyRight','justifyFull','indent','outdent'],\n ['html', 'insertImage', 'insertLink', 'insertVideo']\n ];\n if(item_id) { action = '/admin/tutorial_edit/' + String(item_id) + '/'; }\n $scope.is_edit = Boolean(item_id);\n $scope.item = {};\n function post_callback(data) {\n console.log('callback');\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n $state.go('index.tutorial_list');\n }\n }\n $scope.addItem = function() {\n data = $('#tutorialform').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n $scope.getItem = function(item_id) {\n function get_callback(result) {\n $scope.item = result.data;\n }\n $http.get('/admin/get_tutorial/?item_id=' + String(item_id))\n .success(get_callback);\n }\n\n if(item_id) { $scope.getItem(item_id); }\n}])\n\n\n//每日优惠券编辑\napp.controller('DailyCouponEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/new_daily_coupon/';\n $scope.textmenu = [\n ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'pre', 'quote'],\n ['bold', 'italics', 'underline', 'strikeThrough', 'ul', 'ol', 'redo', 'undo', 'clear'],\n ['justifyLeft','justifyCenter','justifyRight','justifyFull','indent','outdent'],\n ['html', 'insertImage', 'insertLink', 'insertVideo']\n ];\n if(item_id) { action = '/admin/daily_coupon_edit/' + String(item_id) + '/'; }\n $scope.is_edit = Boolean(item_id);\n $scope.item = {};\n function post_callback(data) {\n console.log('callback');\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n $state.go('index.daily_coupon_list');\n }\n }\n $scope.addItem = function() {\n data = $('#dailycouponform').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n $scope.getItem = function(item_id) {\n function get_callback(result) {\n $scope.item = result.data;\n }\n $http.get('/admin/get_daily_coupon/?item_id=' + String(item_id))\n .success(get_callback);\n }\n\n if(item_id) { $scope.getItem(item_id); }\n}])\n\n\n//按用户发放优惠券\napp.controller('SendUserCouponCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var phone = $stateParams.phone;\n \n $scope.item = {};\n if(phone) {\n $scope.item.phone = phone; \n }\n function post_callback(data) {\n console.log('callback');\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n notification.primary(data.msg);\n $state.go('index.send_user_coupon', {phone:$scope.item.phone});\n }\n }\n $scope.addItem = function() {\n data = $('#sendusercouponform').serializeObject();\n $http.post('/admin/send_user_coupon/',\n data)\n .success(post_callback);\n }\n\n}])\n\n\n//编辑活动/添加活动\napp.controller('ActivityEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/new_activity/';\n if(item_id) { action = '/admin/edit_activity/' + String(item_id) + '/'; }\n $scope.is_edit = Boolean(item_id);\n function post_callback(data) {\n console.log('callback');\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n if (data.msg.length>0) {\n notification.primary(data.msg);\n }\n if(item_id) {\n $state.reload();\n } else {\n $state.go('index.activity_list');\n }\n }\n }\n $scope.addItem = function() {\n data = $('#activityform').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n $scope.getItem = function(item_id) {\n function get_callback(result) {\n $scope.item = result.data;\n }\n $http.get('/admin/get_activity/?item_id=' + String(item_id))\n .success(get_callback);\n }\n if(item_id) { $scope.getItem(item_id); }\n}])\n\n\napp.controller('ItemCatEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/new_itemcat/';\n if(item_id) { action = '/admin/edit_itemcat/' + String(item_id) + '/'; }\n $scope.is_edit = Boolean(item_id);\n function post_callback(data) {\n console.log('callback');\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n if (data.msg.length>0) {\n notification.primary(data.msg);\n }\n if(item_id) {\n $state.reload();\n } else {\n $state.go('index.subcat_list');\n }\n }\n }\n $scope.addItem = function() {\n data = $('#itemcatform').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n $scope.getItem = function(item_id) {\n function get_callback(result) {\n $scope.item = result.data;\n }\n $http.get('/admin/get_cat/?cat_id=' + String(item_id))\n .success(get_callback);\n }\n if(item_id) { $scope.getItem(item_id); }\n}])\n\napp.controller('ItemSubcatEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/new_itemsubcat/';\n if(item_id) { action = '/admin/edit_itemsubcat/' + String(item_id) + '/'; }\n $scope.is_edit = Boolean(item_id);\n function post_callback(data) {\n console.log('callback');\n progress.complete();\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n if (data.msg.length>0) {\n notification.primary(data.msg);\n }\n if(item_id) {\n $state.reload();\n } else {\n $state.go('index.subcat_list');\n }\n }\n }\n $scope.addItem = function() {\n progress.start();\n data = $('#itemsubcatform').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n $scope.getItem = function(item_id) {\n function get_callback(result) {\n $scope.item = result.data;\n }\n $http.get('/admin/get_subcat/?sub_cat_id=' + String(item_id))\n .success(get_callback);\n }\n if(item_id) {\n $scope.getItem(item_id);\n } else {\n $scope.item = {};\n window.ss=$scope;\n if($stateParams.cat_id) {\n $scope.item['cat_id'] = parseInt($stateParams.cat_id);\n }\n }\n}])\n\napp.controller('SubcatRecommendEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/subcat_recommend_edit/'+String(item_id)+'/';\n\n function post_callback(data) {\n console.log('callback');\n progress.complete();\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n if (data.msg.length>0) {\n notification.primary(data.msg);\n }\n $state.reload();\n\n }\n }\n $scope.edit = function() {\n progress.start();\n data = $('#subcatrecommend').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n $scope.getItem = function(item_id) {\n function get_callback(result) {\n $scope.item = result.data;\n }\n $http.get('/admin/get_subcat_recommend/?sub_cat_id=' + String(item_id))\n .success(get_callback);\n }\n\n $scope.getItem(item_id);\n \n}])\n\n\napp.controller('HospitalRecommendEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/hospital_recommend_edit/'+String(item_id)+'/';\n\n function post_callback(data) {\n console.log('callback');\n progress.complete();\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n if (data.msg.length>0) {\n notification.primary(data.msg);\n }\n $state.reload();\n\n }\n }\n $scope.edit = function() {\n progress.start();\n data = $('#hospitalrecommend').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n $scope.getItem = function(item_id) {\n function get_callback(result) {\n $scope.item = result.data;\n }\n $http.get('/admin/get_hospital_recommend/?hospital_id=' + String(item_id))\n .success(get_callback);\n }\n\n $scope.getItem(item_id);\n \n}])\n\napp.controller('ItemRecommendEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/item_recommend_edit/'+String(item_id)+'/';\n function post_callback(data) {\n console.log('callback');\n progress.complete();\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n if (data.msg.length>0) {\n notification.primary(data.msg);\n }\n if(item_id) {\n $state.reload();\n }\n }\n }\n $scope.edit = function() {\n progress.start();\n data = $('#itemrecommend').serializeObject();\n console.log(data);\n $http.post(action,\n data)\n .success(post_callback);\n }\n $scope.getItem = function(item_id) {\n function get_callback(result) {\n $scope.item = result.data;\n }\n $http.get('/admin/get_item_recommend/?item_id=' + String(item_id))\n .success(get_callback);\n }\n $scope.getItem(item_id);\n\n}])\n\n\napp.controller('ItemActivityEditCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/item_activity_edit/'+String(item_id)+'/';\n function post_callback(data) {\n console.log('callback');\n progress.complete();\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n if (data.msg.length>0) {\n notification.primary(data.msg);\n }\n if(item_id) {\n $state.reload();\n }\n }\n }\n $scope.edit = function() {\n progress.start();\n data = $('#itemactivity').serializeObject();\n console.log(data);\n $http.post(action,\n data)\n .success(post_callback);\n }\n $scope.getItem = function(item_id) {\n function get_callback(result) {\n $scope.item = result.data;\n }\n $http.get('/admin/get_item_activity/?item_id=' + String(item_id))\n .success(get_callback);\n }\n $scope.getItem(item_id);\n\n}])\n\n\n//内部使用七牛\napp.directive('imagesField', ['$state', function ($state) {\n return {\n restrict: 'E',\n scope: {\n 'images': '=',\n 'fieldname': '='\n },\n link: function (scope, $scope) {\n window.iii = scope;\n scope.remove = function (image) {\n console.log('remove...'+image);\n image = image.split('/')\n image = image[image.length-1];\n scope.images = scope.images.replace(image+',', '');\n scope.images = scope.images.replace(','+image, '');\n scope.images = scope.images.replace(image, '');\n }\n scope.getImages = function(images) {\n if((!images)||images==''){ return [];}\n var images = scope.images.split(',');\n for (var i in images) {\n images[i] = \"http://7xnpdb.com2.z0.glb.qiniucdn.com/\"+images[i];\n }\n return images;\n }\n scope.sortableOptions = {\n containment: '#images-list-wrap'\n };\n scope.$on('ngRepeatFinished', function(ngRepeatFinishedEvent) {\n console.log('finish');\n });\n scope.format_images = function (image_list) {\n var a= [];\n for (var i in image_list) {\n var str = image_list[i].split('/');\n var img = str[str.length-1];\n a.push(img);\n }\n return a.join(',')\n }\n scope.$watch('images', function() {\n console.log('images changes');\n console.log(scope.images);\n scope.image_list = scope.getImages(scope.images);\n });\n if(!getCookie('qntoken')) {\n alert('七牛上传不了');\n }\n scope.image_list = scope.getImages(scope.images);\n var uploader = Qiniu.uploader({\n runtimes: 'html5,flash,html4',\n browse_button: 'qiniuuploads',\n container: 'images-container',\n drop_element: 'images-container',\n max_file_size: '100mb',\n flash_swf_url: 'js/plupload/Moxie.swf',\n dragdrop: true,\n chunk_size: '4mb',\n uptoken: eval(getCookie('qntoken')),\n domain: '127.0.0.1',\n auto_start: true,\n init: {\n 'FilesAdded': function(up, files) {\n $('table').show();\n $('#success').hide();\n plupload.each(files, function(file) {\n var progress = new FileProgress(file, 'fsUploadProgress');\n progress.setStatus(\"绛夊緟...\");\n progress.bindUploadCancel(up);\n });\n },\n 'BeforeUpload': function(up, file) {\n var progress = new FileProgress(file, 'fsUploadProgress');\n var chunk_size = plupload.parseSize(this.getOption('chunk_size'));\n if (up.runtime === 'html5' && chunk_size) {\n progress.setChunkProgess(chunk_size);\n }\n },\n 'UploadProgress': function(up, file) {\n var progress = new FileProgress(file, 'fsUploadProgress');\n var chunk_size = plupload.parseSize(this.getOption('chunk_size'));\n progress.setProgress(file.percent + \"%\", file.speed, chunk_size);\n console.log(file.percent);\n },\n 'UploadComplete': function() {\n $('#success').show();\n },\n 'FileUploaded': function(up, file, info) {\n var progress = new FileProgress(file, 'fsUploadProgress');\n progress.setComplete(up, info);\n console.log(info)\n console.log(info.key);\n var i = JSON.parse(info);\n console.log(i)\n if((scope.images||'').length>0) {\n if(scope.images.indexOf(i.key)==-1) {\n scope.images = scope.images+ ','+i.key;\n }\n } else {\n scope.images = i.key;\n }\n scope.image_list = scope.getImages(scope.images);\n scope.$apply();//refresh ng-repeat\n //$('#img_uploaded').append('<img class=\"uploaded-img\" src=\"http://7xnpdb.com2.z0.glb.qiniucdn.com/'+i.key+'\" </img>');\n },\n 'Error': function(up, err, errTip) {\n $('table').show();\n var progress = new FileProgress(err.file, 'fsUploadProgress');\n progress.setError();\n progress.setStatus(errTip);\n },\n 'Key': function(up, file) {\n var key = \"\";\n console.log(up);\n console.log(file);\n // do something with key\n var list = (file.name || '').split('.')\n var suffix = list[list.length-1] || 'jpg';\n return up.id+ (new Date()).getTime()+'.' + suffix;\n }\n }\n });\n\n uploader.bind('FileUploaded', function() {\n console.log('hello man,a file is uploaded');\n });\n\n },\n templateUrl: '/static/admin/tpl/images_field.html?version=13'\n };\n}]);\n\n\napp.directive('onFinishRender', function ($timeout) {\n return {\n restrict: 'A',\n link: function (scope, element, attr) {\n if (scope.$last === true) {\n $timeout(function () {\n console.log('12');\n scope.$emit('ngRepeatFinished');\n });\n }\n }\n }\n});\n\n\n//反馈详情\napp.controller('AdviceDetailCtrl', ['$scope', '$http', '$timeout', '$stateParams', '$state', 'ngDialog', function($scope, $http, $timeout, $stateParams, $state, ngDialog) {\n var advice_id = $stateParams.advice_id;\n function callback(data) {\n $scope.item = data.data;\n }\n $http.get('/admin/get_advice_detail?advice_id='+advice_id)\n .success(callback);\n}]);\n\n\napp.controller('ApplyDetailCtrl', ['$scope', '$http', '$timeout', '$stateParams', '$state', 'ngDialog', function($scope, $http, $timeout, $stateParams, $state, ngDialog) {\n var apply_id = $stateParams.apply_id;\n function callback(data) {\n $scope.item = data.apply;\n $scope.credit = data.credit;\n }\n $scope.supply = {'apply_id': apply_id};\n $scope.is_edit_supply = false;\n\n $scope.toggleEditSupply = function () {\n $scope.is_edit_supply = !$scope.is_edit_supply;\n $scope.supply.id_no = $scope.item.id_no;\n $scope.supply.stu_no = $scope.item.stu_no;\n\n $scope.supply.graduate_time= $scope.item.graduate_time.substring(0,10);\n $scope.supply.enrollment_time= $scope.item.enrollment_time.substring(0,10);\n $scope.supply.stu_education= $scope.item.stu_education;\n $scope.supply.stu_years= $scope.item.stu_years;\n $scope.supply.school = $scope.item.school;\n $scope.supply.name = $scope.item.name;\n $scope.supply.major = $scope.item.major;\n }\n\n $http.get('/admin/get_apply_detail?apply_id='+apply_id)\n .success(callback);\n $scope.viewImage = function (image) {\n $scope.image = image;\n $scope.angle = 0;\n ngDialog.open({\n template: '/static/admin/tpl/image_lightbox.html?version=13',\n scope: $scope,\n controller: ['$scope', function($scope) {\n window.img_scope = $scope;\n $scope.rotateImg = function () {\n $scope.angle = ($scope.angle+90)%360;\n $('.img-lightbox').find('img')[0].className = \"rotate\"+$scope.angle;\n console.log($scope.angle);\n }\n }],\n });\n }\n $scope.submitSupply = function (apply_id) {\n progress.start()\n function callback (data) {\n progress.complete();\n if(data.code==0) {\n $state.reload();\n } else {\n notification.error(data.msg);\n }\n }\n $http.post('/admin/supply_apply/', $scope.supply)\n .success(callback);\n \n }\n $scope.toSupply = function (apply_id) { //学信网账号正确,去填充资料\n if(confirm('学信网账号确认可以登录吗,确认后将进入到补充资料页面?')) {\n progress.start()\n function callback (data) {\n progress.complete();\n if(data.code==0) {\n $state.reload();\n } else {\n notification.error(data.msg);\n }\n }\n $http.post('/admin/to_supply/', {'apply_id':apply_id})\n .success(callback);\n }\n }\n $scope.verifyChsi = function (user_id) {\n if ($scope.chsi_info) {\n ngDialog.open({\n template: '/static/admin/tpl/chsi_dialog.html?version=13',\n scope: $scope,\n })\n return;\n }\n function openDialog($scope) {\n ngDialog.open({\n template: '/static/admin/tpl/chsi_dialog.html?version=13',\n scope: $scope\n })\n return; \n }\n window.openDialog = openDialog;\n function callback(response) {\n progress.complete()\n window.res = response;\n if(response.return_captcha) {\n notification.error('请输入验证码')\n $scope.return_captcha = true;\n $scope.showCaptcha = true;\n $scope.captcha_img = response.data;\n ngDialog.open({\n template: '/static/admin/tpl/chsi_captcha.html?version=13',\n scope: $scope,\n controller: ['$scope', function($scope) {\n window.ss=$scope;\n function refresh_callback(data) {\n progress.complete()\n $scope.captcha_img = data.data\n }\n $scope.refreshCaptcha = function () {\n progress.start()\n $http.get('/admin/refresh_chsi_captcha/?apply_id='+String(apply_id))\n .success(refresh_callback);\n }\n $scope.Ok = function() {\n //alert('拒绝');\n function post_callback(data) {\n \n //$scope.closeThisDialog();\n progress.complete();\n if(data.success) {\n $scope.chsi_info = data.data;\n window.data = data;\n \n $scope.showCaptcha = false;\n } else {\n notification.error('验证码输入错误')\n $scope.refreshCaptcha();\n }\n }\n var data = {\n apply_id: $scope.item.id,\n captcha: $scope.captcha\n }\n progress.start();\n $http.post('/admin/set_chsi_captcha/',\n data)\n .success(post_callback);\n }\n }]\n })\n } else if(!response.success) {\n notification.error(response.msg||'查询失败')\n } else {\n $scope.chsi_info = response.data;\n ngDialog.open({\n template: '/static/admin/tpl/chsi_dialog.html?version=13',\n scope: $scope\n })\n }\n }\n progress.start()\n $http.get('/admin/verify_chsi/?user_id='+user_id)\n .success(callback);\n }\n $scope.Reject = function () {\n ngDialog.open({\n template: '/static/admin/tpl/apply_reject.html?version=13',\n scope: $scope,\n controller: ['$scope', function($scope) {\n window.ss=$scope;\n $scope.Ok = function() {\n //alert('拒绝');\n function post_callback(data) {\n $scope.closeThisDialog();\n progress.complete();\n console.log('callback');\n if(data.code!=0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n $state.reload();\n }\n }\n var data = {\n apply_id: $scope.item.id,\n reason: $scope.reason\n }\n progress.start();\n $http.post('/admin/apply_reject/',\n data)\n .success(post_callback);\n }\n $scope.Cancel = function() {\n $scope.closeThisDialog();\n }\n }]\n });\n }\n $scope.Approve = function () {\n ngDialog.open({\n template: '/static/admin/tpl/apply_approve.html?version=13',\n scope: $scope,\n controller: ['$scope', function($scope) {\n window.ss=$scope;\n $scope.Ok = function() {\n function post_callback(data) {\n $scope.closeThisDialog();\n progress.complete();\n console.log('callback');\n if(data.code!=0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n $state.reload();\n }\n }\n var data = {\n apply_id: $scope.item.id,\n total : $scope.total,\n }\n progress.start();\n $http.post('/admin/apply_approve/',\n data)\n .success(post_callback);\n }\n $scope.Cancel = function() {\n $scope.closeThisDialog();\n }\n }]\n });\n }\n}])\n\n\napp.controller('UserDetailCtrl', ['$scope', '$http', '$stateParams', '$state', 'ngDialog', function($scope, $http, $stateParams, $state, ngDialog) {\n var item_id = $stateParams.item_id;\n $scope.currentpage = 1;\n function callback(response) {\n $scope.item = response.data;\n $scope.apply = response.apply;\n $scope.location = response.location;\n $scope.wechat_info = response.wechat_info;\n $http.get('/admin/get_order_list/?keyword='+$scope.item.phone).success(\n function (data) {\n $scope.user_orders = data.infos;\n $scope.user_orders_total = data.total;\n }\n \n )\n \n }\n $http.get('/admin/get_user_detail?item_id='+item_id)\n .success(callback);\n\n $scope.routeTo = function(page) {\n $http.get('/admin/get_user_list?same_user_id='+item_id+'&page='+page)\n .success(function (data){\n $scope.infos = data.infos;\n $scope.total = data.total;\n $scope.page_info = data.page_info;\n $scope.currentpage = page;\n });\n }\n $scope.routeTo(1);\n\n}])\n\n\napp.controller('NewCityCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/new_city/';\n\n function post_callback(data) {\n console.log('callback');\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n $state.go('index.city_list');\n }\n }\n $scope.addItem = function() {\n data = $('#cityform').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n return false\n}])\n\n\napp.controller('NewPeriodPayChoiceCtrl', ['$scope', '$http', '$stateParams', '$state', function($scope, $http, $stateParams, $state) {\n var item_id = $stateParams.item_id;\n var action = '/admin/new_period_pay_choice/';\n\n function post_callback(data) {\n console.log('callback');\n if(data.code>0) {\n notification.error(data.msg||'服务器异常');\n }\n else {\n console.log('go to ');\n $state.go('index.period_pay_choice_list');\n }\n }\n $scope.addItem = function() {\n data = $('#periodpaychoiceform').serializeObject();\n $http.post(action,\n data)\n .success(post_callback);\n }\n return false\n}])\n\n\napp.controller(\"ngCity\",function($scope, $http){\n var vm = $scope.vm = {};\n\n vm.optionsData = [];\n function callback(data) {\n $scope.optionsData = data.infos;\n }\n $http.get('/admin/get_city_list/')\n .success(callback);\n})\n\n\napp.directive('jsonText', function() {\n return {\n restrict: 'A',\n require: 'ngModel',\n link: function(scope, element, attr, ngModel) { \n function into(input) {\n return JSON.parse(input);\n }\n function out(data) {\n return JSON.stringify(data);\n }\n ngModel.$parsers.push(into);\n ngModel.$formatters.push(out);\n\n }\n };\n});\n\n\napp.controller('AmapCtrl', ['$scope',\n function($scope) {\n\n }\n]);\n\napp.directive('positionField', function () {\n return {\n restrict: 'EA', //E = element, A = attribute, C = class, M = comment \n scope: {\n //@ reads the attribute value, = provides two-way binding, & works with functions\n item: '=',\n },\n templateUrl: '/static/admin/tpl/position_field.html?version=13',\n controller: 'AmapCtrl',\n link: function (scope, $scope, element, attrs) { //DOM manipulation\n init_map();\n console.log(scope.item); //还未初始化\n\n if(scope.item.lng&&scope.item.lng>0) {\n console.log('center pos....');\n var lnglat = [parseFloat(scope.item.lng), parseFloat(scope.item.lat)];\n addMarker(lnglat);\n }\n\n }\n }\n});\n\napp.controller(\"ngItemSubcat\",function($scope, $http){\n $scope.optionsData = [];\n function callback(data) {\n $scope.optionsData = data.infos;\n }\n $http.get('/admin/get_subcat_list')\n .success(callback);\n})\n\n\napp.controller(\"ngPeriodChoice\",function($scope, $http){\n $scope.optionsData = [];\n function callback(data) {\n $scope.optionsData = data.infos;\n }\n $http.get('/admin/get_period_choice_list')\n .success(callback);\n})\n\n\napp.controller(\"ngHospital\",function($scope, $http){\n $scope.optionsData = [];\n function callback(data) {\n $scope.optionsData = data.infos;\n }\n $http.get('/admin/get_hospital_list')\n .success(callback);\n})\n\n\napp.controller(\"ngSchoolCity\",function($scope, $http){\n $scope.optionsData = [];\n function callback(data) {\n $scope.optionsData = data.infos;\n }\n $http.get('/admin/get_school_city_list/')\n .success(callback);\n})\n\n\napp.controller(\"ngItemCat\",function($scope, $http, $state){\n window.ngitems = $scope;\n $scope.optionsData = [];\n $scope.itemCatids = [];\n $scope.iditemMap = {};\n function callback(data) {\n $scope.optionsData = data.infos;\n for(var i in $scope.optionsData) {\n var cat = $scope.optionsData[i];\n $scope.iditemMap[cat.id] = cat\n $scope.itemCatids.push(cat.id);\n }\n }\n $http.get('/admin/get_cat_list')\n .success(callback);\n $scope.editCat = function($event, cat_id) {\n window.eve = $event;\n $event.stopPropagation();\n var params = {'item_id': cat_id};\n $state.go('index.itemcat_edit', params);\n }\n $scope.sortableOptions = {\n containment: '#images-list-wrap',\n stop: function(e, vt) {\n console.log('stop drag cat');\n }\n };\n \n $scope.goAddSubCat = function($event, cat_id) {\n window.eve = $event;\n $event.stopPropagation();\n var params = {'cat_id': cat_id};\n $state.go('index.new_itemsubcat', params);\n }\n})\n\napp.controller(\"ngActivity\", function($scope, $http, $state){\n $scope.optionsData = [];\n function callback(data) {\n $scope.optionsData = data.infos;\n }\n $http.get('/admin/get_activity_list')\n .success(callback);\n\n})\n\n\napp.controller(\"ngCoupon\", function($scope, $http, $state){\n $scope.optionsData = [];\n function callback(data) {\n $scope.optionsData = data.infos;\n }\n $http.get('/admin/get_coupon_list')\n .success(callback);\n\n})\n\n\napp.controller(\"ngPromoter\", function($scope, $http, $state){\n $scope.optionsData = [];\n function callback(data) {\n $scope.optionsData = data.infos;\n }\n $http.get('/admin/get_promoter_list')\n .success(callback);\n\n})\n\n\n//图片字段\n//内部使用七牛\napp.directive('imageField', ['$state', '$http', '$timeout', function ($state, $http, $timeout) {\n return {\n restrict: 'E',\n scope: {\n 'image': '=',\n 'prefix': '=',\n 'fieldname': '='\n },\n link: function (scope, $scope) {\n scope.extractQiniuDomain = function (url) {\n if(!url) { return undefined }\n url = url.replace(/^.*\\/\\/[^\\/]+\\//, '')\n return url\n }\n\n window.sc=scope;\n $scope.image = scope.image;\n $scope.prefix = scope.prefix || 'admin_image';\n $scope.fieldname = scope.fieldname;\n scope.input_status = 'imageInput-'+scope.fieldname;\n scope.image_input_status = 'imageInput-'+scope.fieldname +'-input'\n \n console.log(scope.image_input_status);\n var handleFileSelect=function(evt) {\n console.log('image change');\n angular.element(document.querySelector(scope.input_status)).html('上传中...');\n var file=evt.currentTarget.files[0];\n console.log('file change;');\n var reader = new FileReader();\n reader.onload = function (evt) {\n myImage=evt.target.result;\n $http.post('/admin/upload_image/', {image: myImage, prefix: $scope.prefix})\n .then(function(res) {\n if (res.status === 200) {\n window.res=res;\n scope.current_img = res.data.fullpath;\n angular.element(document.querySelector('#'+scope.input_status)).html('上传成功');\n } else {\n notificatio.error((res.data||{}).msg||'服务器异常');\n }\n });\n };\n reader.readAsDataURL(file);\n };\n \n $timeout(function () {\n scope.current_img = scope.image;\n angular.element(document.querySelector('#'+scope.image_input_status)).on('change',handleFileSelect);\n }, 500);\n },\n templateUrl: '/static/admin/tpl/image_field.html?version=13'\n };\n}]);\n\napp.directive('draggable', function() {\n return function(scope, element) {\n // this gives us the native JS object\n var el = element[0];\n window.t = el;;\n //el.draggable = true;\n\n el.addEventListener(\n 'dragstart',\n function(e) {\n console.log('start');\n el.cursor_start_x = e.clientX;\n el.cursor_start_y = e.clientY;\n el.current_x = parseInt($('.ngdialog-content').css('left'));\n el.current_y = parseInt($('.ngdialog-content').css('top'));\n e.dataTransfer.effectAllowed = 'move';\n e.dataTransfer.setData('Text', this.id);\n this.classList.add('drag');\n $('.ngdialog-content').css('opacity', 0.1); \n return false;\n },\n false\n );\n el.addEventListener(\n 'dragover',\n function(e) {\n window.e = e;\n console.log(e.clientX);\n var offset_x = e.clientX - el.cursor_start_x;\n var offset_y = e.clientY - el.cursor_start_y;\n console.log(offset_x);\n console.log(offset_y);\n $('.ngdialog-content').css('left', el.current_x+offset_x*2);\n $('.ngdialog-content').css('top', el.current_y+offset_y*2);\n e.dataTransfer.dropEffect = 'move';\n // allows us to drop\n if (e.preventDefault) e.preventDefault();\n this.classList.add('over');\n return false;\n },\n false\n );\n el.addEventListener(\n 'dragend',\n function(e) {\n console.log('end');\n this.classList.remove('drag');\n $('.ngdialog-content').css('opacity', 1); \n return false;\n },\n false\n );\n }\n});\n\n\napp.controller('DateTimePickerDemoCtrl',\nfunction ($scope, $timeout) {\n $scope.dateTimeNow = function() {\n $scope.date = new Date();\n };\n $scope.dateTimeNow();\n \n $scope.toggleMinDate = function() {\n $scope.minDate = $scope.minDate ? null : new Date();\n };\n \n $scope.maxDate = new Date('2014-06-22');\n $scope.toggleMinDate();\n\n $scope.dateOptions = {\n startingDay: 1,\n showWeeks: false\n };\n \n // Disable weekend selection\n $scope.disabled = function(calendarDate, mode) {\n return mode === 'day' && ( calendarDate.getDay() === 0 || calendarDate.getDay() === 6 );\n };\n \n $scope.hourStep = 1;\n $scope.minuteStep = 15;\n\n $scope.timeOptions = {\n hourStep: [1, 2, 3],\n minuteStep: [1, 5, 10, 15, 25, 30]\n };\n\n $scope.showMeridian = true;\n $scope.timeToggleMode = function() {\n $scope.showMeridian = !$scope.showMeridian;\n };\n \n $scope.$watch(\"date\", function(value) {\n console.log('New date value:' + value);\n }, true);\n \n $scope.resetHours = function() {\n $scope.date.setHours(1);\n };\n});\n\n\napp.controller('DatetimePickerController', ['$scope', function($scope) {\n var that = this;\n var in10Days = new Date();\n in10Days.setDate(in10Days.getDate() + 10);\n \n // Disable weekend selection\n this.disabled = function(date, mode) {\n return (mode === 'day' && (new Date().toDateString() == date.toDateString()));\n };\n\n this.dateOptions = {\n showWeeks: false,\n startingDay: 1\n };\n \n this.timeOptions = {\n readonlyInput: false,\n showMeridian: false\n };\n \n this.dateModeOptions = {\n minMode: 'year',\n maxMode: 'year'\n };\n \n this.openCalendar = function(e) {\n $scope.is_open = true;\n };\n \n // watch date4 and date5 to calculate difference\n\n $scope.$on('$destroy', function() {\n that.calculateWatch();\n });\n}]);\n\n\n//日期字段\napp.directive('datetimeField', ['$state', function ($state) {\n return {\n restrict: 'E',\n scope: {\n 'val': '=',\n 'fieldname': '='\n },\n link: function (scope, $scope) {\n window.s=scope;\n },\n templateUrl: '/static/admin/tpl/datetime_field.html?version=13'\n };\n}]);\n\n\n//开关\napp.directive('switchField', ['$state', '$timeout', function ($state, $timeout) {\n return {\n restrict: 'E',\n scope: {\n 'val': '=',\n 'fieldname': '='\n },\n link: function (scope, $scope) {\n window.ss =scope;\n console.log('switch......');\n $timeout(function(){$(\"[name='\"+scope.fieldname+\"']\").bootstrapSwitch();},10)\n },\n templateUrl: '/static/admin/tpl/switch.html?version=13'\n };\n}]);\n\n//inplace编辑排序\napp.directive('orderField', ['$state', '$timeout', '$q', '$http', function ($state, $timeout, $q, $http) {\n return {\n restrict: 'E',\n scope: {\n 'action': '=',\n 'item': '='\n },\n link: function (scope, $scope) {\n scope.myModel = scope.item.sort_order;\n scope.validateOnServer = function(newValue) {\n var defer = $q.defer();\n function post_callback(data) {\n if(data.code==0) {\n defer.resolve();\n notification.primary(data.msg||'修改成功');\n $state.reload();\n }else {\n defer.reject();\n notification.error(data.msg||'修改失败');\n }\n }\n var data = {\n 'sort_order' : newValue,\n 'item_id' : scope.item.id,\n }\n $http.post(scope.action,\n data)\n .success(post_callback)\n .error(function () {\n defer.reject()\n notification.error('修改失败');\n });\n \n return defer.promise;\n };\n },\n templateUrl: '/static/admin/tpl/inplace_edit.html?version=13'\n };\n}]);\n\n\n\n\n//app toggleclass\napp.directive('toggleClass', function(){\n return {\n restrict: 'A',\n scope: {\n toggleClass: '@'\n },\n link: function($scope, $element){\n window.thee = $element;\n var icon_i = $($element.find('i')[1]);\n icon_i.addClass('fa-angle-right');\n $element.on('click', function(evt){\n window.ev = evt;\n var icon_i = $($element.find('i')[1]);\n console.log(icon_i);\n if(!$(ev.target).hasClass('menu-label')) {\n return true; //true事件继续冒泡\n }\n if(!$element.hasClass($scope.toggleClass)) {\n icon_i.removeClass('fa-angle-down');\n icon_i.addClass('fa-angle-right');\n console.log('right')\n } else {\n console.log('bottom');\n icon_i.removeClass('fa-angle-right');\n icon_i.addClass('fa-angle-down');\n }\n $element.toggleClass($scope.toggleClass);\n });\n }\n };\n});\n\n\n//寝室详情\napp.controller('RoomDetailCtrl', ['$scope', '$http', '$timeout', '$stateParams', '$state', 'ngDialog', function($scope, $http, $timeout, $stateParams, $state, ngDialog) {\n var room_id = $stateParams.room_id;\n function callback(data) {\n $scope.room = data.room;\n }\n\n $http.get('/admin/get_room_detail?room_id='+room_id)\n .success(callback);\n $scope.viewImage = function (image) {\n $scope.image = image;\n $scope.angle = 0;\n ngDialog.open({\n template: '/static/admin/tpl/image_lightbox.html?version=13',\n scope: $scope,\n controller: ['$scope', function($scope) {\n window.img_scope = $scope;\n $scope.rotateImg = function () {\n $scope.angle = ($scope.angle+90)%360;\n $('.img-lightbox').find('img')[0].className = \"rotate\"+$scope.angle;\n console.log($scope.angle);\n }\n }],\n });\n }\n\n}])\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6313363909721375, "alphanum_fraction": 0.6658986210823059, "avg_line_length": 25.303030014038086, "blob_id": "5a4e3ec39f27fda59a2d9d65ae3de4c3b77169b8", "content_id": "3e03ca32da69aa9065f5d5aa98f37fc0d9ed5e6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 868, "license_type": "no_license", "max_line_length": 63, "num_lines": 33, "path": "/migrations/versions/19af1cb7edf0_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 19af1cb7edf0\nRevises: 5adc2c5e2c4f\nCreate Date: 2016-03-03 16:30:05.662503\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '19af1cb7edf0'\ndown_revision = '5adc2c5e2c4f'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('rd_money_prize',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('amount', sa.Integer(), nullable=True),\n sa.Column('sent', sa.Integer(), nullable=True),\n sa.Column('total', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('rd_money_prize')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.635064959526062, "alphanum_fraction": 0.6857143044471741, "avg_line_length": 26.5, "blob_id": "bbbc2690f456fcac08db38fe7f03853aed112fa8", "content_id": "81acf58b863b5cb2226af0150a2836c0756ceff7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 770, "license_type": "no_license", "max_line_length": 94, "num_lines": 28, "path": "/migrations/versions/5329d119ee5f_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 5329d119ee5f\nRevises: 125c0c0cb424\nCreate Date: 2015-11-04 10:52:05.593655\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '5329d119ee5f'\ndown_revision = '125c0c0cb424'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('item', sa.Column('is_recommend', sa.Boolean(), nullable=True))\n op.add_column('item', sa.Column('status', mysql.TINYINT(display_width=1), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('item', 'status')\n op.drop_column('item', 'is_recommend')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.609368622303009, "alphanum_fraction": 0.6101832985877991, "avg_line_length": 32.121620178222656, "blob_id": "557e3148f17393ca2fdb10ba613a47a1b2330a1c", "content_id": "227e1ade5170258578b36f970e422d5fdd16409f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2475, "license_type": "no_license", "max_line_length": 98, "num_lines": 74, "path": "/ops/hospital.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom collections import defaultdict\nfrom sqlalchemy import and_\n\nfrom util.sqlerr import SQL_DUPLICATE_NAME\nfrom util.sqlerr import SQL_DUPLICATE_PHONE\n\nfrom models import db\nfrom models import Item\nfrom ops.utils import get_items\nfrom ops.utils import count_items\nfrom ops.utils import get_page\nfrom models import HospitalUser\n\n\nclass HospitalService(object):\n\n @staticmethod\n def check_user(name, passwd):\n admin = HospitalUser.query.filter(HospitalUser.name==name).first()\n return admin and admin.passwd==passwd\n\n @staticmethod\n def create_user(name, passwd, hospital_id):\n try:\n admin = HospitalUser(name=name, passwd=passwd, hospital_id=hospital_id)\n db.session.add(admin)\n db.session.commit()\n return admin.id\n except Exception as e:\n db.session.rollback()\n if SQL_DUPLICATE_NAME.search(str(e)):\n assert 0, '用户名已存在'\n else:\n import traceback\n traceback.print_exc()\n\n @staticmethod\n def change_passwd(name, new_passwd):\n ''' 修改密码 '''\n count = HospitalUser.query.filter(HospitalUser.name==name).update({'passwd':new_passwd})\n db.session.commit()\n return count\n\n @staticmethod\n def get_hospital_sub_cat_ids(hospital_id):\n result = db.session.query(Item).filter(Item.hospital_id==hospital_id).all()\n sub_cat_ids = []\n for item in result:\n sub_cat_id_list = item.as_dict()['sub_cat_id_list']\n sub_cat_ids.extend(sub_cat_id_list)\n return sub_cat_ids\n\n @staticmethod\n def get_hospital_sub_cat_ids_and_count(hospital_id):\n result = db.session.query(Item).filter(Item.hospital_id==hospital_id).all()\n sub_cat_ids_map = defaultdict(set)\n for item in result:\n item_dict = item.as_dict()\n for i in item_dict['sub_cat_id_list']:\n sub_cat_ids_map[i].add(item_dict['id'])\n return sub_cat_ids_map\n\n @staticmethod\n def get_user_by_name(name):\n return HospitalUser.query.filter(HospitalUser.name==name).first()\n\n @staticmethod\n def get_paged_hospital_admin_users(**kw):\n return get_page(HospitalUser, {}, **kw)\n\n @staticmethod\n def count_admin(where=None):\n return count_items(HospitalUser, where=where)\n\n\n\n\n" }, { "alpha_fraction": 0.6199095249176025, "alphanum_fraction": 0.6223459839820862, "avg_line_length": 36.76315689086914, "blob_id": "c6e511fd65046cffff428d26a1d105d21eb854fd", "content_id": "ff0091e74a68273a1375de55126774321ad6fc37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2887, "license_type": "no_license", "max_line_length": 137, "num_lines": 76, "path": "/ops/utils.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom functools import wraps\n\nfrom sqlalchemy import func\nfrom util.utils import keep_fields_from_list\nfrom models import db\n\n\ndef get_page(model, condition=None, offset=None, \\\n limit=10, fields=None, \\\n start=None, end=None, where=None, \\\n join=None, order_by=None, \\\n extra=None, order_by_case=None, \\\n _sort='id', _sort_dir='DESC', no_limit=False):\n if extra:\n query = db.session.query(model,extra).outerjoin(extra)\n else:\n query = db.session.query(model)\n if join is not None: query = query.join(join)\n if order_by is not None:\n if isinstance(order_by, (tuple,list)):\n query = query.order_by(*order_by)\n else:\n query = query.order_by(order_by)\n elif _sort and _sort!='id' and getattr(model, _sort, None): #order by 第一个column为主排序column\n if _sort_dir=='ASC': order_by = getattr(model, _sort).asc()\n if _sort_dir=='DESC': order_by = getattr(model, _sort).desc()\n if order_by_case is None: query = query.order_by(order_by)\n if order_by_case is None: query = query.order_by(model.id.desc())\n else:\n query = query.order_by(model.id.asc()) if _sort_dir=='ASC' else query.order_by(model.id.desc())\n if condition: query = query.filter_by(**condition)\n if where is not None: query = query.filter(where)\n if offset: query = query.filter(getattr(model, _sort)< offset) if _sort_dir=='DESC' else query.filter(getattr(model, _sort) > offset)\n if start:query = query.offset(start)\n if end: query = query.limit(end-start)\n items = []\n\n if order_by_case is not None:\n query = query.order_by(order_by_case)\n data = query.limit(limit+1).all() if not (start or end or no_limit) else query.all()\n extras = None\n if extra is not None:\n extras = [i[1] for i in data if i[1]]\n data = [i[0] for i in data]\n items[:] = tuple(row.as_dict() for row in (data if no_limit else data[:limit]))\n is_more = len(data)>limit\n\n if fields: keep_fields_from_list(items, fields)\n if extra:\n return is_more, items, extras\n return is_more, items\n\n\ndef get_items(model, ids=None, fields=None, all=False):\n query = model.query\n\n if getattr(model, 'status', None) and not(all):\n query = query.filter(model.show_status())\n data = []\n if not ids: return data\n data[:] = query.filter(model.id.in_(ids)).all()\n data[:] = tuple(i.as_dict() for i in data)\n if fields: keep_fields_from_list(data, fields)\n return data\n\n\ndef get_fields_column(model, fields):\n return tuple(getattr(model, field) for field in fields)\n\n\ndef count_items(model, where=None, field='id'):\n query = db.session.query(func.count(getattr(model, field)))\n if where is not None: query = query.filter(where)\n\n return query.scalar()\n\n\n\n" }, { "alpha_fraction": 0.6617862582206726, "alphanum_fraction": 0.6734992861747742, "avg_line_length": 28.7391300201416, "blob_id": "f81dc41e6f41c16ead9c302905a97aa0c87dff36", "content_id": "c41449d4bb7e39b6bfd89e95202c0085acec2ee6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 707, "license_type": "no_license", "max_line_length": 58, "num_lines": 23, "path": "/sql_profile.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nimport time\n\nfrom sqlalchemy import event\nfrom sqlalchemy.engine import Engine\n\n\[email protected]_for(Engine, \"before_cursor_execute\")\ndef before_cursor_execute(conn, cursor, statement,\n parameters, context, executemany):\n context._query_start_time = time.time()\n print(\"查询开始:\\n%s\\n查询参数:\\n%r\" % (statement,parameters))\n\n\[email protected]_for(Engine, \"after_cursor_execute\")\ndef after_cursor_execute(conn, cursor, statement,\n parameters, context, executemany):\n total = time.time() - context._query_start_time\n\n print(\"查询耗时: %.02fms\\n\" % (total*1000))" }, { "alpha_fraction": 0.5899999737739563, "alphanum_fraction": 0.7099999785423279, "avg_line_length": 15.5, "blob_id": "7524912078137b3f65efc4da8e72bf4e3ab8e70e", "content_id": "0e0317fa92d7308994744edd45b22c9aeefb9b76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 100, "license_type": "no_license", "max_line_length": 25, "num_lines": 6, "path": "/uwsgi_nginx_dev.ini", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "[uwsgi]\nsocket=127.0.0.1:10002\nwsgi=app:app\nprocesses = 5\nlogdate = true\nlogto = /tmp/user_dev.log\n\n" }, { "alpha_fraction": 0.6105769276618958, "alphanum_fraction": 0.6794871687889099, "avg_line_length": 23, "blob_id": "d039830a07e2ad466b3f20e05c244f41cec03a1b", "content_id": "fe2b51aa76f2163391d0b6fc2d28375ae8dcbffc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 624, "license_type": "no_license", "max_line_length": 93, "num_lines": 26, "path": "/migrations/versions/5853d4187f15_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 5853d4187f15\nRevises: 3f2769d4ed16\nCreate Date: 2016-01-07 15:41:09.540410\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '5853d4187f15'\ndown_revision = '3f2769d4ed16'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('item_sub_cat', sa.Column('cat_ids', sa.String(length=500), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('item_sub_cat', 'cat_ids')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6274193525314331, "alphanum_fraction": 0.6838709712028503, "avg_line_length": 22.846153259277344, "blob_id": "8539bda02df4ef842f449fe2d54edb4077da10aa", "content_id": "c9a7a215b7abe5c37a0441b43e3dc0b63a1945cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 620, "license_type": "no_license", "max_line_length": 91, "num_lines": 26, "path": "/migrations/versions/4bbb37c90d8c_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4bbb37c90d8c\nRevises: 13a5889df13\nCreate Date: 2016-03-09 10:49:01.279517\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4bbb37c90d8c'\ndown_revision = '13a5889df13'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user_advice', sa.Column('remark', sa.String(length=300), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user_advice', 'remark')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6115702390670776, "alphanum_fraction": 0.6743801832199097, "avg_line_length": 22.269229888916016, "blob_id": "e12543041d44d3cb9be84a8134ebde85793d5476", "content_id": "44945f5b6e86b3a8d3d753b01cce2d62c3c70333", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 605, "license_type": "no_license", "max_line_length": 84, "num_lines": 26, "path": "/migrations/versions/2ce138017f09_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 2ce138017f09\nRevises: 38dd6746c99b\nCreate Date: 2015-12-10 19:14:00.636524\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2ce138017f09'\ndown_revision = '38dd6746c99b'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user_coupon', sa.Column('is_trial', sa.Boolean(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user_coupon', 'is_trial')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5603063106536865, "alphanum_fraction": 0.5686352252960205, "avg_line_length": 36.30281066894531, "blob_id": "4567dc5ca748c365d69339d0ef0fe25e461a991d", "content_id": "16c249a8e63fd9b6b2b643a2d07ba878f100ce87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111310, "license_type": "no_license", "max_line_length": 151, "num_lines": 2916, "path": "/user/views.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nimport time\nimport json\nfrom itertools import chain\n\nfrom flask import request\nfrom flask import redirect\nfrom flask import render_template\nfrom flask import send_from_directory\n\nfrom sqlalchemy import case\nfrom sqlalchemy import and_\nfrom sqlalchemy import or_\nfrom models import db\nfrom models import ItemComment\nfrom models import Item\nfrom models import School\nfrom models import UserCoupon\nfrom models import Order\nfrom models import PeriodPayLog\nfrom models import ItemFav\nfrom models import Repayment\nfrom models import UserCoupon\nfrom models import HelpCat\nfrom models import ActivityItem\nfrom models import CreditApply\nfrom models import ItemSubCat\nfrom models import Hospital\nfrom models import DailyCoupon\nfrom models import BeautyEntry\n\nfrom util.utils import format_price\nfrom util.utils import deadline_zh\nfrom util.utils import jsonify_response\nfrom util.utils import template_response\nfrom util.utils import trans_list\nfrom util.utils import calc_expire_remain\nfrom util.utils import dt_obj\nfrom util.utils import day_delta\nfrom util.utils import get_current_period\nfrom util.utils import get_next_period\nfrom util.utils import get_due_time\nfrom util.utils import is_delayed\nfrom util.utils import date_to_datetime\nfrom util.utils import get_timestamp\nfrom util.utils import add_months\nfrom util.utils import js_response\nfrom util.utils import get_date_delta\nfrom util.utils import cacl_punish_fee\nfrom util.utils import get_time_str_from_dt\nfrom util.utils import prefix_img_domain\nfrom util.utils import get_delayed_info\nfrom util.utils import get_next_working_day\nfrom util.utils import get_img_key\nfrom util.utils import set_coupon_use_time\nfrom util.utils import format_dt\nfrom util.utils import format_rate\nfrom util.utils import str_to_int_list\nfrom util.sign import get_cookie\nfrom util.sign import set_cookie\nfrom util.decorators import wechat_loggin_dec\n\nfrom util.validators import Optional\nfrom util.validators import REGField\nfrom util.validators import Inputs\nfrom util.validators import MobileField\nfrom util.validators import TextField\nfrom util.validators import IntChoiceField\nfrom util.validators import IdField\nfrom util.validators import JsonField\nfrom util.validators import ChoiceField\nfrom ops.common import pay_success_action\nfrom ops.common import get_item_activity_price\nfrom ops.bulks import fetch_user_refs\nfrom ops.bulks import fetch_item_refs\nfrom ops.bulks import fetch_item_subcat_refs\nfrom ops.bulks import fetch_min_period_info\nfrom ops.bulks import fetch_hospital_refs\nfrom ops.bulks import fetch_order_refs\nfrom ops.bulks import fetch_coupon_refs\nfrom ops.item import ItemService\nfrom ops.comment import CommentService\nfrom ops.promote import PromoteService\nfrom ops.user import UserService\nfrom ops.order import OrderService\nfrom ops.activity import ActivityService\nfrom ops.log import LogService\nfrom ops.data import DataService\nfrom ops.redpack import RedpackService\nfrom ops.credit import CreditService\nfrom ops.coupon import CouponService\nfrom ops.beauty_tutorial import TutorialService\nfrom ops.bulks import fetch_servicecode_refrence\nfrom constants import ResponseCode\nfrom constants import ORDER_STATUS\nfrom constants import ORDER_STATUS_LABEL\nfrom constants import PAY_METHOD\nfrom constants import REPAYMENT_STATUS\nfrom constants import APPLY_STATUS\nfrom constants import CREDIT_STATUS\nfrom settings import CONTACT\nfrom settings import DEFAULT_CREDIT\nfrom settings import WX_PAY_NOTIFY_URL\nfrom settings import WX_REPAYMENT_NOTIFY_URL\nfrom thirdparty.wx_pay import Notify_pub\nfrom thirdparty.wx_pay import WxPayConf_pub\nfrom thirdparty.wx_pay import UnifiedOrder_pub\nfrom thirdparty.wx_pay import JsApi_pub\nfrom thirdparty.wx_pay import get_wx_pay_params\nfrom thirdparty.wechat import wechat\nfrom thirdparty.wechat import get_jssdk_context\nfrom thirdparty.qn import gen_qn_token\nfrom thirdparty.qn import upload_img\nfrom thirdparty.sms import send_sms_new_order\nfrom settings import SERVER_NAME\nfrom settings import ITEM_ORDER_CHOICES\nfrom settings import HOSPITAL_ORDER_CHOICES\nfrom settings import CAT_ICONS\nfrom settings import CAT_ICONS_ACTIVE\n\nfrom ops.order import set_order_status\n\n\ndef set_coupon_cat_str(info, item_cats=None, item_subcats=None):\n ''' 优惠券品类信息 '''\n if info['coupon_cat']==0:\n info['cat_str'] = '全部适用'\n elif info['coupon_cat']==1:\n cat = filter(lambda i:i['id']==info['cat_id'], item_cats)[0]\n info['cat_str'] = '仅限{}类项目'.format(cat['name'])\n elif info['coupon_cat']==2:\n print info['sub_cat_id'], [i['id'] for i in item_subcats]\n subcat = filter(lambda i:i['id']==info['sub_cat_id'], item_subcats)[0]\n info['cat_str'] = '仅限{}项目'.format(subcat['name'])\n else:\n info['cat_str'] = '指定项目'\n\n\n\n@wechat_loggin_dec(required=False, validator=None, app=True)\ndef user_index():\n ''' 用户首页 '''\n _, recommend_sub_cats = ItemService.get_paged_recommend_subcats(_sort='sort_order', _sort_dir='ASC')\n fetch_item_subcat_refs(recommend_sub_cats)\n\n current_activity = ActivityService.get_current_activity() or {}\n where = ActivityItem.activity_id==current_activity.get('id')\n fields = ('id', 'item_id', 'price')\n _, activity_items = ItemService.get_paged_activity_items(fields=fields, where=where, _sort='sort_order', _sort_dir='ASC')\n fields = ('id', 'item_id', 'image', 'desc')\n where = None\n _, recommend_items = ItemService.get_paged_recommend_items(fields=fields, where=where, _sort='sort_order', _sort_dir='ASC')\n\n fields = ['id', 'title', 'price', 'orig_price', 'has_fee', 'support_choice_list']\n fetch_item_refs(chain(activity_items, recommend_items), fields=fields)\n\n item_dict_list = [i['item'] for i in chain(activity_items, recommend_items)]\n item_list = []\n for i in item_dict_list:\n if i not in item_list:\n item_list.append(i)\n fetch_min_period_info(item_list)\n \n banner = [\n {'image':'http://7xnpdb.com1.z0.glb.clouddn.com/o_1a32t99l213e55j47fp1v96u80111348368_1467882916861451_480196332_n.jpg', 'link':'/user/login'},\n {'image':'http://7xnpdb.com1.z0.glb.clouddn.com/o_1a32t99l213e55j47fp1v96u80111348368_1467882916861451_480196332_n.jpg', 'link':'/user/login'},\n {'image':'http://7xnpdb.com1.z0.glb.clouddn.com/o_1a32t99l213e55j47fp1v96u80111348368_1467882916861451_480196332_n.jpg', 'link':'/user/login'}\n ]\n context = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'activity_items' : activity_items,\n 'recommend_items' : recommend_items,\n 'activity' : current_activity,\n 'banner' : banner,\n }\n print dir(request)\n print request.headers\n\n return jsonify_response(context)\n js_sdk_context = get_jssdk_context()\n return render_template('user/user_index.html', recommend_sub_cats=recommend_sub_cats, nav={1:'active'}, **js_sdk_context)\n\n\nitem_filters_validator = Inputs(\n {\n 'sub_cat_id' : Optional(IdField(msg='分类id')),\n 'hospital_id' : Optional(IdField(msg='医院id')),\n 'city_id' : Optional(IdField(msg='城市id')),\n 'offset' : Optional(TextField(min_length=1, max_length=100, msg='分页参数')),\n 'sort_type' : Optional(IntChoiceField(choices=[1,2,3,4], msg='排序选项')),\n }\n )\n@wechat_loggin_dec(required=False, validator=item_filters_validator, app=True)\ndef item_filters():\n ''' 筛选参数列表 '''\n sub_cat_id = request.valid_data.get('sub_cat_id')\n sort_type = request.valid_data.get('sort_type') or 1\n\n order_choices = [\n {'id':1, 'name':'综合排序'},\n {'id':2, 'name':'销量优先'},\n {'id':3, 'name':'低价优先'},\n {'id':4, 'name':'高价优先'},\n ]\n has_more, citys = DataService.get_paged_cities()\n\n cat_id = None\n subcat = None\n if sub_cat_id:\n subcat = ItemService.get_subcat_dict_by_id(sub_cat_id)\n\n sort_type_obj = None\n if sort_type:\n for i in order_choices:\n if i['id'] == sort_type:\n sort_type_obj = i\n \n all_cats = ItemService.get_item_cats()\n all_sub_cats = ItemService.get_item_subcats()\n _, all_recommend_subcats = ItemService.get_paged_recommend_subcats(no_limit=True)\n\n id_order_map = {i['sub_cat_id']:i['sort_order'] for i in all_recommend_subcats}\n recommend_subcat_ids = set(i['sub_cat_id'] for i in all_recommend_subcats)\n recommend_subcats = filter(lambda i:i['id'] in recommend_subcat_ids, all_sub_cats)\n recommend_subcats.sort(key=lambda i: id_order_map[i['id']])\n\n item_cat = [\n {\n 'id': 0,\n 'name':'推荐',\n 'sub_cats':recommend_subcats,\n 'icon' : CAT_ICONS[0],\n 'icon_active' : CAT_ICONS_ACTIVE[0]\n }]\n for cat in all_cats:\n tmp = {'name': cat.name, 'id': cat.id}\n tmp['sub_cats'] = [i for i in all_sub_cats if cat.id in i['cat_id_list']]\n tmp['icon'] = CAT_ICONS.get(cat.id) or ''\n tmp['icon_active'] = CAT_ICONS_ACTIVE.get(cat.id) or ''\n item_cat.append(tmp)\n sort_type_obj = sort_type_obj or order_choices[0]\n subcat = subcat or item_cat[0]['sub_cats'][0]\n\n city_id = get_current_city_id()\n city = None\n for the_city in citys:\n if the_city['id']==city_id: city = the_city\n for i in all_sub_cats:\n if i['id'] in recommend_subcat_ids:\n i['cat_id_list'].append(0)\n city = city or citys[0]\n result = {\n 'order_choices': order_choices,\n 'data': item_cat,\n 'all_sub_cats':all_sub_cats,\n 'citys': citys,\n 'sort_type_obj':sort_type_obj,\n 'city': city,\n 'subcat': subcat\n }\n #return json.dumps(result).decode('unicode-escape').encode('utf8')\n return jsonify_response(result)\n\n\n\nhospital_filters_validator = Inputs(\n {\n 'sub_cat_id' : Optional(IdField(msg='分类id')),\n 'city_id' : Optional(IdField(msg='城市id')),\n 'offset' : Optional(TextField(min_length=1, max_length=100, msg='分页参数')),\n 'sort_type' : Optional(IntChoiceField(choices=[1,2,3,4], msg='排序选项')),\n }\n )\n@wechat_loggin_dec(required=False, validator=hospital_filters_validator, app=True)\ndef hospital_filters():\n ''' 筛选参数列表 '''\n sub_cat_id = request.valid_data.get('sub_cat_id')\n sort_type = request.valid_data.get('sort_type') or 1\n\n order_choices = HOSPITAL_ORDER_CHOICES\n\n has_more, citys = DataService.get_paged_cities()\n\n cat_id = None\n subcat = None\n if sub_cat_id:\n subcat = ItemService.get_subcat_dict_by_id(sub_cat_id)\n\n sort_type_obj = None\n if sort_type:\n for i in order_choices:\n if i['id'] == sort_type:\n sort_type_obj = i\n \n all_cats = ItemService.get_item_cats()\n all_sub_cats = ItemService.get_item_subcats()\n _, all_recommend_subcats = ItemService.get_paged_recommend_subcats(limit=1000)\n\n id_order_map = {i['sub_cat_id']:i['sort_order'] for i in all_recommend_subcats}\n recommend_subcat_ids = set(i['sub_cat_id'] for i in all_recommend_subcats)\n recommend_subcats = filter(lambda i:i['id'] in recommend_subcat_ids, all_sub_cats)\n recommend_subcats.sort(key=lambda i: id_order_map[i['id']])\n\n item_cat = [\n {\n 'id': 0,\n 'name':'推荐',\n 'sub_cats':recommend_subcats,\n 'icon' : CAT_ICONS[0],\n 'icon_active' : CAT_ICONS_ACTIVE[0]\n }]\n total_cat = {'id': 0, 'name':'全部', 'cat_id_list': [0]\n }\n all_sub_cats.insert(0, total_cat)\n recommend_subcats.insert(0, total_cat)\n\n for cat in all_cats:\n tmp = {'name': cat.name, 'id': cat.id}\n tmp['sub_cats'] = [i for i in all_sub_cats if cat.id in i['cat_id_list']]\n tmp['icon'] = CAT_ICONS.get(cat.id) or ''\n tmp['icon_active'] = CAT_ICONS_ACTIVE.get(cat.id) or ''\n item_cat.append(tmp)\n sort_type_obj = sort_type_obj or order_choices[0]\n subcat = subcat or item_cat[0]['sub_cats'][0]\n\n city_id = get_current_city_id()\n city = None\n for the_city in citys:\n if the_city['id']==city_id: city = the_city\n for i in all_sub_cats:\n if i['id'] in recommend_subcat_ids:\n i['cat_id_list'].append(0)\n city = city or citys[0]\n result = {\n 'order_choices': order_choices,\n 'data': item_cat,\n 'all_sub_cats':all_sub_cats,\n 'citys': citys,\n 'sort_type_obj':sort_type_obj,\n 'city': city,\n 'subcat': subcat\n }\n return jsonify_response(result)\n\n\nitem_list_validator = Inputs(\n {\n 'sub_cat_id' : Optional(IdField(msg='分类id')),\n 'hospital_id' : Optional(IdField(msg='医院id')),\n 'city_id' : Optional(IdField(msg='城市id')),\n 'offset' : Optional(TextField(min_length=1, max_length=100, msg='分页参数')),\n 'sort_type' : Optional(IntChoiceField(choices=[1,2,3,4], msg='排序选项')),\n }\n )\n@wechat_loggin_dec(required=False, validator=item_list_validator, app=True)\ndef item_list():\n ''' 商品列表 '''\n sub_cat_id = request.valid_data.get('sub_cat_id')\n hospital_id = request.valid_data.get('hospital_id')\n city_id = get_current_city_id()\n offset = request.valid_data.get('offset') or ''\n sort_type = request.valid_data.get('sort_type') or 1\n\n _sort = 'id'\n _sort_dir = 'ASC'\n where = and_()\n where.append(Item.status==1)\n if city_id:\n subquery= db.session.query(Hospital.id).filter(Hospital.city_id==city_id).subquery()\n where.append(Item.hospital_id.in_(subquery))\n if sub_cat_id:\n or_query= or_(\n Item.sub_cat_ids==sub_cat_id,\n Item.sub_cat_ids.like('%,{}'.format(sub_cat_id)),\n Item.sub_cat_ids.like('%,{},%'.format(sub_cat_id)),\n Item.sub_cat_ids.like('{},%'.format(sub_cat_id))\n )\n where.append(or_query)\n\n order_by_case = None\n offset_id = 0\n offset_field= ''\n _sort = 'price'\n if hospital_id:\n _sort='id'\n _sort_dir='DESC'\n if offset:\n offset_id, offset_field = offset.split('_')\n offset_id = int(offset_id)\n offset_where = Item.id<offset_id\n if sort_type==2:\n _sort='sold_count'; _sort_dir='DESC'\n offset_field = int(offset_field or 10**10)\n offset_where = or_(\n Item.sold_count<offset_field,\n and_(\n Item.sold_count<=offset_field,\n Item.id<offset_id \n )\n )\n if sort_type==3:\n order_by_case = case([(ActivityItem.price>0, ActivityItem.price)], else_=Item.price).asc()\n _sort='price'; _sort_dir='ASC'\n offset_field = float(offset_field or 0)\n offset_where = or_(\n Item.price>offset_field,\n and_(\n Item.price>=offset_field,\n Item.id<offset_id \n )\n )\n if sort_type==4:\n order_by_case = case([(ActivityItem.price>0, ActivityItem.price)], else_=Item.price).desc()\n _sort='price'; _sort_dir='DESC'\n offset_field = float(offset_field or 10**10)\n offset_where = or_(\n Item.price<offset_field,\n and_(\n Item.price<=offset_field,\n Item.id<offset_id \n )\n )\n if offset: where.append(offset_where)\n\n if hospital_id:\n where.append(Item.hospital_id==hospital_id)\n if offset: where.append(Item.id<offset)\n\n offset = offset\n fields = ['id', 'hospital_id', 'title', 'sold_count', 'price', 'orig_price', 'support_choice_list', 'image', 'has_fee']\n has_more, items = ItemService.get_paged_items(where=where, order_by_case=order_by_case, fields=fields, _sort=_sort, _sort_dir=_sort_dir)\n\n fetch_min_period_info(items)\n fetch_hospital_refs(items, fields=['id','name'])\n offset = ''\n if items: offset = str(items[-1]['id']) + '_' + (str(items[-1][_sort]) if sort_type !=1 else '')\n print offset, 'offset'\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'has_more' : has_more,\n 'infos' : items,\n 'offset' : offset\n }\n return jsonify_response(result)\n\n\n\nhospital_list_validator = Inputs(\n {\n 'sub_cat_id' : Optional(IdField(msg='分类id')),\n 'city_id' : Optional(IdField(msg='城市id')),\n 'offset' : Optional(TextField(min_length=1, max_length=100, msg='分页参数')),\n 'sort_type' : Optional(IntChoiceField(choices=[1,2,3], msg='排序选项')), #1综合 2销量 3好评优先\n }\n )\n@wechat_loggin_dec(required=False, validator=hospital_list_validator, app=True)\ndef hospital_list():\n ''' 医院列表 '''\n sub_cat_id = request.valid_data.get('sub_cat_id')\n city_id = get_current_city_id()\n offset = request.valid_data.get('offset') or ''\n sort_type = request.valid_data.get('sort_type') or 1\n\n _sort = 'id'\n _sort_dir = 'DESC'\n where = and_()\n where.append(Hospital.status==1)\n if city_id:\n where.append(Hospital.city_id==city_id)\n if sub_cat_id:\n query = or_(\n Item.sub_cat_ids==sub_cat_id,\n Item.sub_cat_ids.like('%,{}'.format(sub_cat_id)),\n Item.sub_cat_ids.like('%,{},%'.format(sub_cat_id)),\n Item.sub_cat_ids.like('{},%'.format(sub_cat_id))\n )\n hospital_id_sub = db.session.query(Item.hospital_id).filter(query).subquery()\n where.append(Hospital.id.in_(hospital_id_sub))\n\n offset_id = 0\n offset_field= ''\n _sort = 'sold_count'\n if offset:\n offset_id, offset_field = offset.split('_')\n offset_id = int(offset_id)\n offset_where = Hospital.id<offset_id\n if sort_type==2:\n _sort='sold_count'; _sort_dir='DESC'\n offset_field = int(offset_field or 10**10)\n offset_where = or_(\n Hospital.sold_count<offset_field,\n and_(\n Hospital.sold_count<=offset_field,\n Hospital.id<offset_id \n )\n )\n if sort_type==3:\n _sort='rate'; _sort_dir='DESC'\n offset_field = float(offset_field or 0)\n offset_where = or_(\n Hospital.rate>offset_field,\n and_(\n Hospital.rate>=offset_field,\n Hospital.id<offset_id \n )\n )\n if offset: where.append(offset_where)\n\n offset = offset\n fields = ['id', 'image', 'name', 'tag_list', 'rate', 'sold_count', 'addr']\n has_more, items = ItemService.get_paged_hospitals(where=where, fields=fields, _sort=_sort, _sort_dir=_sort_dir)\n\n _, sub_cats = ItemService.get_paged_sub_cats(limit=1000)\n _, cats = ItemService.get_paged_cats(limit=1000)\n\n hospital_ids = [i['id'] for i in items]\n hospital_item_count_map = ItemService.count_hospital_items(hospital_ids)\n hospital_item_subcat_map= ItemService.get_hospital_item_cats(hospital_ids)\n for i in items:\n i['rate'] = str(format_rate(i['rate']))\n i['item_count'] = hospital_item_count_map.get(i['id']) or 0\n subcat_ids = hospital_item_subcat_map.get(i['id']) or []\n i['cats'] = ItemService.get_sub_cat_id_name(subcat_ids, sub_cats, cats)\n offset = ''\n if items:\n offset = str(items[-1]['id']) + '_' + (str(items[-1][_sort]) if sort_type !=1 else '')\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'has_more' : has_more,\n 'infos' : items,\n 'offset' : offset\n }\n return jsonify_response(result)\n\n\ndef cacl_need_pay(choice, price, credit, has_fee=True):\n if has_fee:\n total = format_price((choice.period_fee+1) * price)\n else:\n total = price\n need_pay = 0\n if total>credit:\n credit_amount = format_price(credit*1.0/(1.0+choice.period_fee))\n period_amount = format_price(credit_amount*1.0/choice.period_count)\n period_money = format_price(credit*1.0/choice.period_count)\n period_fee = format_price(period_money-period_amount)\n credit_used = credit\n need_pay = format_price(price - period_amount*choice.period_count)\n else:\n period_money = format_price(total/choice.period_count)\n period_fee = format_price((choice.period_fee) * price*1.0/choice.period_count)\n period_amount = format_price(period_money-period_fee)\n credit_used = total\n\n result = {\n 'id' : choice.id,\n 'need_pay' : need_pay,\n 'period_money' : period_money,\n 'period_total' : period_money,\n 'period_fee' : period_fee,\n 'fee' : choice.period_fee,\n 'total' : total,\n 'credit_used' : credit_used,\n 'credit' : credit,\n 'period_amount' : period_amount,\n 'period_count' : choice.period_count\n }\n return result\n\n\nitem_detail_validator = Inputs(\n {\n 'item_id' : IdField(msg='商品id'),\n }\n )\n@wechat_loggin_dec(required=False, validator=item_detail_validator, app=True)\ndef item_detail():\n ''' 商品详情 '''\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : ''\n }\n item_id = request.valid_data.get('item_id')\n fields = [\n 'id', 'title', 'note', 'use_time', 'support_choice_list', 'has_fee', 'direct_buy', 'photo_list', 'hospital_id', 'price', 'orig_price']\n item = ItemService.get_item_dict_by_id(item_id, fields=fields)\n can_comment = PromoteService.get_fakeuser_by_userid(request.user_id)\n has_fav = False\n if request.user_id:\n has_fav = bool(ItemService.has_fav(item_id, request.user_id))\n assert item, '商品不存在'\n get_item_activity_price(item)\n\n credit_amount = DEFAULT_CREDIT #预计额度\n verified = False #待审核\n if request.user_id:\n credit = CreditService.init_credit(request.user_id)\n credit_amount = format_price(credit.total-credit.used)\n verified = bool(credit.status)\n apply = CreditService.get_apply_dict_by_userid(request.user_id)\n need_pay = 0\n if item['price']>credit_amount:\n need_pay = format_price(item['price'] - credit_amount)\n total_period_amount = credit_amount\n else:\n total_period_amount = item['price']\n period_choices = CreditService.get_period_choices()\n choices = []\n now = dt_obj.now()\n for period_choice in period_choices:\n if period_choice.id not in item['support_choice_list']: continue\n tmp = cacl_need_pay(period_choice, item['price'], credit_amount, item['has_fee'])\n if apply and apply.get('graduate_time') and not apply['graduate_time']>add_months(now, period_choice.period_count+6):\n tmp['disabled'] = True\n else:\n tmp['disabled'] = False\n if not total_period_amount: continue\n choices.append(tmp)\n if True:#item['direct_buy']:\n tmp = {\n 'id' : 0,\n 'period_amount': 0,\n 'period_fee' : 0,\n 'period_total' : item['price'],\n 'period_count' : 0\n }\n choices.insert(0, tmp)\n choices.sort(key=lambda i:i['period_count'], reverse=False)\n where = ItemComment.item_id==item_id\n comment_count = CommentService.count_comments(where)\n fields = ['id', 'user_id', 'is_anonymous', 'content', 'rate', 'create_time', 'photo_list', 'item_id']\n has_more, comment_list = CommentService.get_paged_comments(where=where, limit=1, fields=fields)\n fetch_user_refs(comment_list, fields=['id','name','avatar'])\n\n fields = ['id', 'name', 'photo_list', 'working_time', 'phone', 'long_lat', 'desc', 'tag_list', 'addr']\n hospital = ItemService.get_hospital_dict_by_id(item['hospital_id'], fields=fields)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'can_comment': bool(can_comment),\n 'has_fav' : has_fav,\n 'pay_choices': choices,\n 'item' : item,\n 'hospital' : hospital,\n 'credit_amount': format_price(credit_amount),\n 'need_pay' : need_pay,\n 'verified' : verified,\n 'total_period_amount': total_period_amount,\n 'comments' : {\n 'total' : comment_count,\n 'infos' : comment_list,\n }\n }\n return jsonify_response(result)\n\n\nitem_comment_list_validator = Inputs(\n {\n 'item_id' : IdField(msg='商品id'),\n 'offset' : Optional(TextField(min_length=1, max_length=100, msg='分页参数'))\n }\n )\n@wechat_loggin_dec(required=False, validator=item_comment_list_validator, app=True)\ndef item_comment_list():\n ''' 评论列表 '''\n item_id = request.valid_data.get('item_id')\n item = ItemService.get_item_dict_by_id(item_id, fields=['id', 'image', 'title', 'hospital_id', 'price', 'orig_price'])\n assert item, '商品不存在'\n get_item_activity_price(item)\n hospital = ItemService.get_hospital_dict_by_id(item['hospital_id'], fields=['id', 'name'])\n offset = request.valid_data.get('offset')\n where = ItemComment.item_id==item_id\n fields = ['id', 'is_anonymous', 'user_id', 'item_id', 'is_re_comment', 'photo_list', 'content', 'rate', 'create_time']\n has_more, comments = CommentService.get_paged_comments(where=where, offset=offset, fields=fields)\n fetch_user_refs(comments, fields=['id','name','avatar'])\n offset = str(comments[-1]['id']) if comments else ''\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'item' : item,\n 'hospital' : hospital,\n 'has_more' : has_more,\n 'infos' : comments,\n 'offset' : offset\n }\n return jsonify_response(result)\n\n\n\nmy_item_comment_list_validator = Inputs(\n {\n 'item_id' : IdField(msg='商品id'),\n 'offset' : Optional(TextField(min_length=1, max_length=100, msg='分页参数'))\n }\n )\n@wechat_loggin_dec(required=False, validator=my_item_comment_list_validator, app=True)\ndef my_item_comment_list():\n ''' 我的评论列表 '''\n item_id = request.valid_data.get('item_id')\n item = ItemService.get_item_dict_by_id(item_id, fields=['id', 'image', 'title', 'hospital_id', 'price', 'orig_price'])\n assert item, '商品不存在'\n get_item_activity_price(item)\n\n hospital = ItemService.get_hospital_dict_by_id(item['hospital_id'], fields=['id', 'name'])\n offset = request.valid_data.get('offset')\n where = and_(\n ItemComment.item_id==item_id,\n ItemComment.user_id==request.user_id\n )\n fields = ['id', 'is_anonymous', 'user_id', 'is_re_comment', 'item_id', 'photo_list', 'content', 'rate', 'create_time']\n has_more, comments = CommentService.get_paged_comments(where=where, offset=offset, fields=fields)\n fetch_user_refs(comments, fields=['id','name','avatar'])\n offset = str(comments[-1]['id']) if comments else ''\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'item' : item,\n 'hospital' : hospital,\n 'has_more' : has_more,\n 'infos' : comments,\n 'offset' : offset\n }\n return jsonify_response(result)\n\n\n\nuser_fav_item_validator = Inputs(\n {\n 'item_id' : IdField(msg='商品id'),\n 'status' : IntChoiceField(choices=[0, 1], msg='是否收藏'),\n }\n )\n@wechat_loggin_dec(required=True, validator=user_fav_item_validator, app=True)\ndef user_fav_item():\n ''' 添加心愿单 '''\n item_id = request.valid_data.get('item_id')\n status = request.valid_data.get('status')\n msg = '添加成功' if status else '已从心愿单中移除'\n if status:\n ItemService.fav_item(request.user_id, item_id)\n else:\n ItemService.unfav_item(request.user_id, item_id) \n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : msg,\n }\n return jsonify_response(result)\n\n\nuser_advice_validator = Inputs(\n {\n 'content' : TextField(min_length=1, max_length=10000, msg='反馈内容'),\n 'contact' : Optional(TextField(min_length=1, max_length=100, msg='手机号'))\n }\n )\n@wechat_loggin_dec(required=False, validator=user_advice_validator, app=True)\ndef user_advice():\n ''' 用户反馈 '''\n content = request.valid_data.get('content')\n contact = request.valid_data.get('contact')\n msg = '感谢您的反馈'\n UserService.advice(request.user_id, content, contact)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : msg,\n }\n return jsonify_response(result)\n\n\n\n\nuser_order_list_validator = Inputs(\n {\n 'cat' : IntChoiceField(choices=[0,1,2,3], msg='订单类型'),\n 'offset' : Optional(TextField(min_length=0, max_length=100, msg='分页'))\n }\n )\n@wechat_loggin_dec(required=True, validator=user_order_list_validator, app=True)\ndef user_order_list():\n ''' 我的订单列表 '''\n cat = request.valid_data.get('cat')\n offset = request.valid_data.get('offset')\n where = and_()\n where.append(Order.user_id==request.user_id)\n if cat==1:#待支付\n where.append(or_(\n Order.status.in_([ORDER_STATUS.NEW_ORDER, ORDER_STATUS.TO_PAY]),\n Order.credit_verified==False,\n )\n )\n elif cat==2:#待服务\n where.append(and_(\n Order.status.in_([ORDER_STATUS.PAY_SUCCESS]),\n Order.credit_verified==True\n ))\n elif cat==3:#待评价\n subquery = db.session.query(ItemComment.order_id).filter(ItemComment.order_id>0).subquery()\n where.append(and_(\n Order.user_finished==True,\n Order.credit_verified==True,\n ~Order.id.in_(subquery)\n )\n )\n choices = CreditService.get_period_choices()\n choice_fee_map = { i.id:i.period_fee for i in choices}\n choice_count_map = { i.id:i.period_count for i in choices}\n has_more, order_list = OrderService.get_paged_orders(where=where, offset=offset)\n fetch_item_refs(order_list, fields=['id', 'title', 'image'], keep_id=True)\n for i in order_list:\n period_fee_amount = 0\n period_count = 1\n credit_choice_id = i['credit_choice_id']\n i['period_fee'] = 0\n period_money = i['credit_amount']/period_count\n if credit_choice_id:\n period_count = choice_count_map[credit_choice_id]\n period_money = i['credit_amount']/period_count\n period_fee_amount = i['total_fee']/period_count\n i['period_amount'] = format_price(period_money - period_fee_amount)\n i['period_fee'] = format_price(period_fee_amount)\n i['period_count'] = period_count\n\n fetch_servicecode_refrence(order_list, 'id', dest_key='service_code_dict', keep_id=True)\n order_ids = [order['id'] for order in order_list]\n comments = CommentService.get_comments_by_order_ids(order_ids, user_id=request.user_id)\n order_comment_map = {i['order_id']:i['id'] for i in comments}\n print order_comment_map, 'order_comment_map'\n for order in order_list:\n order['comment'] = order_comment_map.get(order['id'])\n set_order_status(order, comment=order_comment_map.get(order['id']), servicecode=order['service_code_dict'])\n\n trans_list(order_list, 'status', 'status_label', ORDER_STATUS_LABEL, pop=False)\n\n offset = str(order_list[-1]['id']) if order_list else ''\n fetch_hospital_refs(order_list, fields=['id', 'name', 'phone'])\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'has_more' : has_more,\n 'infos' : order_list,\n 'offset' : offset\n }\n\n return jsonify_response(result)\n\n\ndef wx_pay_callback():\n xml = request.data\n LogService.log_pay_callback(PAY_METHOD.WECHAT_WEB, xml)\n notify = Notify_pub()\n rs = notify.check_sign(xml)\n re = {}\n if not rs:\n re['return_code'] = 'FAIL'\n re['return_msg'] = '签名失败'\n return notify.arrayToXml(re)\n\n data = notify.get_data()\n result_code = data['result_code']\n order_no = str(data['out_trade_no'])\n total_fee = data['total_fee']\n transaction_id = data['transaction_id']\n\n order_info = OrderService.get_order_by_orderno(order_no)\n if not order_info:\n re['return_code'] = 'FAIL'\n re['return_msg'] = '订单不存在:'+order_no\n return notify.arrayToXml(re)\n\n total_price = float(total_fee)/100\n order_price = float(order_info.price)\n if order_price != total_price and (os.environ.get('APP_ENV')=='production'):\n print order_price, total_price, '金额不匹配'\n re['return_code'] = 'FAIL'\n re['return_msg'] = '金额不匹配'\n return notify.arrayToXml(re)\n\n msg = ''\n if (order_info.status==ORDER_STATUS.PAY_SUCCESS):\n re = {'return_code':'SUCCESS','return_msg':'ok'}\n return notify.arrayToXml(re)\n if result_code.upper() == 'FAIL':\n re['return_code'] = 'FAIL'\n pay_error_action(order_info)\n elif result_code.upper()=='SUCCESS':\n re['return_code'] = 'SUCCESS'\n pay_success_action(order_info, transaction_id=transaction_id, pay_method=PAY_METHOD.WECHAT_WEB)\n else:\n print 'wxpay_notify:',result_code\n re['return_code'] = 'SUCCESS'\n msg = '未知返回码'\n\n re['return_msg'] = msg\n return notify.arrayToXml(re)\n\n\ndef wx_repayment_callback():\n ''' 微信还款回调 '''\n xml = request.data\n LogService.log_pay_callback(PAY_METHOD.WECHAT_WEB, xml)\n notify = Notify_pub()\n rs = notify.check_sign(xml)\n re = {}\n if not rs:\n re['return_code'] = 'FAIL'\n re['return_msg'] = '签名失败'\n return notify.arrayToXml(re)\n\n data = notify.get_data()\n result_code = data['result_code']\n order_no = str(data['out_trade_no'])\n total_fee = data['total_fee']\n transaction_id = data['transaction_id']\n\n repayment = OrderService.get_repayment_by_orderno(order_no)\n if not repayment:\n re['return_code'] = 'FAIL'\n re['return_msg'] = '订单不存在:'+order_no\n return notify.arrayToXml(re)\n\n total_price = float(total_fee)/100\n order_price = float(repayment.price)\n if order_price != total_price and (os.environ.get('APP_ENV')=='production'):\n print order_price, total_price, '金额不匹配'\n re['return_code'] = 'FAIL'\n re['return_msg'] = '金额不匹配'\n return notify.arrayToXml(re)\n\n msg = ''\n if (repayment.status==REPAYMENT_STATUS.PAY_SUCCESS):\n re = {'return_code':'SUCCESS','return_msg':'ok'}\n return notify.arrayToXml(re)\n if result_code.upper() == 'FAIL':\n re['return_code'] = 'FAIL'\n repayment_error_action(repayment)\n elif result_code.upper()=='SUCCESS':\n re['return_code'] = 'SUCCESS'\n repayment_success_action(repayment, transaction_id=transaction_id, pay_method=PAY_METHOD.WECHAT_WEB)\n else:\n print 'wxpay_notify:',result_code\n re['return_code'] = 'SUCCESS'\n msg = '未知返回码'\n\n re['return_msg'] = msg\n return notify.arrayToXml(re)\n\n\n\n\ndef repayment_error_action(repayment):\n ''' 还款失败 '''\n pass\n\n\ndef repayment_success_action(repayment, **kw):\n ''' 还款成功 '''\n new_status = REPAYMENT_STATUS.PAY_SUCCESS\n kw['status'] = new_status\n where = and_(\n Repayment.id==repayment.id,\n Repayment.status.in_([REPAYMENT_STATUS.TO_PAY, REPAYMENT_STATUS.NEW])\n )\n count = OrderService.update_repayment(where, **kw)\n if count:\n print '还款成功'\n log_ids = [i['id'] for i in json.loads(repayment.data)]\n OrderService.gen_repayment_log(repayment)\n result = CreditService.update_pay_log(log_ids)\n if repayment.price:\n CreditService.modify_credit(repayment.user_id, -(repayment.price))\n\n\ndef pay_error_action(order):\n ''' 支付失败 处理函数 '''\n new_status = ORDER_STATUS.PAY_ERROR\n where = Order.status!=ORDER_STATUS.PAY_SUCCESS\n count = OrderService.update_order_status(order.id, new_status, where=where)\n if count:\n print 'pay error'\n pass #通知后端以及管理员\n\n\norder_preview_validator = Inputs(\n {\n 'item_id' : IdField(msg='商品id'),\n 'coupon_id' : Optional(IdField(msg='优惠券id')),\n 'period_choice_id' : Optional(IdField(msg='分期类型id'))\n }\n )\n@wechat_loggin_dec(required=True, validator=order_preview_validator, app=True)\ndef order_preview():\n item_id = request.valid_data.get('item_id')\n coupon_id = request.valid_data.get('coupon_id')\n period_choice_id = request.valid_data.get('period_choice_id') or 0\n msg = ''\n\n fields = [\n 'id', 'has_fee', 'direct_buy',\n 'image', 'title', 'price', 'sub_cat_id',\n 'sub_cat_id_list', 'support_choice_list', 'hospital_id', 'orig_price'\n ]\n item = ItemService.get_item_dict_by_id(item_id, fields=fields)\n assert item, '商品不存在'\n get_item_activity_price(item)\n\n if period_choice_id: assert period_choice_id in item['support_choice_list'], '商品不支持该分期选项'\n #if period_choice_id==0: assert item['direct_buy'], '商品不支持直接购买'\n\n sub_cat_id_list = item['sub_cat_id_list']\n sub_cats = ItemService.get_subcats_by_ids(sub_cat_id_list)\n cat_id_list = []\n for i in sub_cats:\n cat_id_list.extend(i['cat_id_list'])\n user_coupon = None\n if coupon_id:\n user_coupon = CouponService.get_user_coupon(\n coupon_id, request.user_id, item_id=item_id, cat_id_list=cat_id_list, sub_cat_id_list=sub_cat_id_list,\n item_price=item['price']\n )\n if not(user_coupon):\n msg = '此优惠券不存在, 请选择其他优惠券'\n else:\n user_coupon = user_coupon.as_dict()\n\n\n coupon_list = []\n where = and_()\n where.append(UserCoupon.status==0)\n where.append(UserCoupon.user_id==request.user_id)\n where.append(UserCoupon.price<=item['price'])\n where.append(UserCoupon.end_time>dt_obj.now())\n\n or_query = or_(\n UserCoupon.coupon_cat==0,\n CouponService.cat_query(cat_id_list),\n CouponService.sub_cat_query(sub_cat_id_list),\n and_(\n UserCoupon.item_id==item_id,\n UserCoupon.coupon_cat==3\n )\n )\n where.append(or_query)\n final_where = or_(\n and_(\n where,\n UserCoupon.need==0,\n ),\n and_(\n where,\n UserCoupon.need<=item['price']\n )\n )\n has_more, user_coupons = CouponService.get_paged_user_coupons(where=final_where, limit=100)\n user_coupons.sort(key=lambda i:i['price'], reverse=False)\n if coupon_id==None and user_coupons and not user_coupon:\n for c in user_coupons[::-1]:\n if c['price']<=item['price']:\n user_coupon = c\n if user_coupon:\n print user_coupon['price'], item['price']\n assert user_coupon['price']<=item['price'], '优惠券金额不能超过订单总额'\n coupon_amount = 0\n if user_coupon:\n if user_coupon['is_trial']: #试用券 金额等于商品金额\n user_coupon['price'] = item['price']\n coupon_amount = format_price(user_coupon['price'])\n\n\n fields = ['id', 'name']\n hospital = ItemService.get_hospital_dict_by_id(item['hospital_id'], fields=fields)\n\n credit = CreditService.get_user_credit(request.user_id)\n if not credit:\n CreditService.init_credit(request.user_id)\n credit = CreditService.get_user_credit(request.user_id)\n verified = bool(credit.status)\n #period_choice_id为0时 直购\n credit_amount_remain= format_price(credit.total-credit.used)\n if period_choice_id==0:\n credit_amount_remain = 0\n\n if period_choice_id:\n period_choice = CreditService.get_period_choice(period_choice_id)\n assert period_choice, '分期选项不存在'\n period_count = period_choice.period_count\n result = cacl_need_pay(\n period_choice, item['price']-coupon_amount, credit_amount_remain, item['has_fee'])\n need_pay = result['need_pay']\n period_money = result['period_money']\n period_amount = result['period_amount']\n period_fee = result['period_fee']\n credit_used = result['credit_used']\n else:\n period_count = 1\n period_fee = 0\n period_amount = 0\n period_money = 0\n credit_used = 0\n need_pay = item['price'] - coupon_amount\n\n _, item_cats = ItemService.get_paged_cats(limit=1000)\n _, item_subcats = ItemService.get_paged_sub_cats(limit=1000)\n for i in user_coupons:\n i['cat_str'] = '全部适用'\n i['remain_str'] = calc_expire_remain(i['end_time'])\n set_coupon_cat_str(i, item_cats, item_subcats)\n coupon_title = ''\n if user_coupon: coupon_title = user_coupon['title']\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : msg,\n 'item' : item,\n 'hospital' : hospital,\n 'coupon_amout' : coupon_amount,\n 'coupon_title' : coupon_title,\n 'coupon_id' : user_coupon['id'] if user_coupon else 0,\n 'credit_amount' : credit_used, #使用了的额度\n 'credit_amount_can_use': credit_amount_remain , #总分期金额\n 'total' : item['price'],\n 'period_count' : period_count,\n 'period_amount' : period_amount,\n 'period_fee' : period_fee,\n 'period_total' : period_money,\n 'coupon_list' : user_coupons,\n 'need_pay' : format_price(need_pay),\n 'credit_status' : credit.status\n }\n\n return jsonify_response(result)\n\n\nconfirm_order_validator = Inputs(\n {\n 'item_id' : IdField(msg='商品id'),\n 'period_choice_id' : Optional(IdField(msg='分期类型id')),\n 'coupon_id' : Optional(IdField(msg='优惠券id'))\n }\n )\n@wechat_loggin_dec(required=True, validator=confirm_order_validator, app=True)\ndef confirm_order():\n item_id = request.valid_data.get('item_id')\n user_coupon_id = request.valid_data.get('coupon_id')\n period_choice_id = request.valid_data.get('period_choice_id')\n\n item = ItemService.get_item_dict_by_id(item_id)\n assert item, '商品不存在'\n get_item_activity_price(item)\n\n sub_cat_id_list = item['sub_cat_id_list']\n sub_cats = ItemService.get_subcats_by_ids(sub_cat_id_list)\n cat_id_list = []\n for i in sub_cats:\n cat_id_list.extend(i['cat_id_list'])\n\n hospital_id = item['hospital_id']\n\n if period_choice_id: assert period_choice_id in item['support_choice_list'], '商品不支持该分期选项'\n\n user_coupon = None\n\n if user_coupon_id:\n user_coupon = CouponService.get_user_coupon(\n user_coupon_id, request.user_id, item_id=item_id, cat_id_list=cat_id_list, sub_cat_id_list=sub_cat_id_list,\n item_price=item['price']\n )\n assert user_coupon, '优惠券不存在'\n assert user_coupon.status==0, '优惠券已被使用'\n assert user_coupon.end_time>dt_obj.now(), '优惠券已过期'\n\n total = item['price']\n order_no = OrderService.create_no()\n coupon_amount = 0\n credit_amount = 0\n if user_coupon_id:\n if user_coupon.is_trial: #试用券 金额等于商品金额\n user_coupon.price = item['price']\n coupon_amount = format_price(user_coupon.price)\n credit = CreditService.get_user_credit(request.user_id)\n credit_amount_remain = format_price(credit.total-credit.used)\n if period_choice_id:\n assert credit.status!=CREDIT_STATUS.DEFAULT, '请先申请额度'\n assert credit.status!=CREDIT_STATUS.REJECTED, '请重新申请额度'\n credit_verified = 1 if (credit.status==CREDIT_STATUS.VERIFIED) else 0\n if period_choice_id==0: credit_verified=1\n\n if period_choice_id:\n period_choice = CreditService.get_period_choice(period_choice_id)\n assert period_choice, '分期选项不存在'\n apply = CreditService.get_apply_dict_by_userid(request.user_id)\n now = dt_obj.now()\n if apply and apply.get('graduate_time'):\n assert apply['graduate_time']>add_months(now, period_choice.period_count+6), '选择分期期数需小于现在到毕业前六个月的月数'\n period_count = period_choice.period_count\n result = cacl_need_pay(\n period_choice, item['price']-coupon_amount, credit_amount_remain, item['has_fee'])\n need_pay = result['need_pay']\n period_money = result['period_money']\n period_amount = result['period_amount']\n period_fee = result['period_fee']\n credit_used = result['credit_used']\n else:\n period_count = 1\n period_fee = 0\n period_amount = 0\n period_money = 0\n credit_used = 0\n need_pay = item['price'] - coupon_amount\n\n if user_coupon_id:\n query = and_(\n UserCoupon.user_id==request.user_id,\n UserCoupon.id==user_coupon_id,\n UserCoupon.status==0,\n or_(\n UserCoupon.coupon_cat==0,\n CouponService.cat_query(cat_id_list),\n CouponService.sub_cat_query(sub_cat_id_list),\n and_(\n UserCoupon.coupon_cat==3,\n UserCoupon.item_id==item_id\n )\n )\n )\n or_query = or_(\n and_(\n query,\n UserCoupon.need==0,\n ),\n and_(\n query,\n UserCoupon.need<=item['price']\n )\n )\n count = CouponService.update_user_coupon_status(or_query, 1)\n assert count, '优惠券已被使用'\n total_fee = format_price(period_fee*period_count)\n if credit_used:\n result = CreditService.modify_credit(request.user_id, credit_used)\n assert result in {1,2}, '额度不足'\n if need_pay:\n to_status = ORDER_STATUS.NEW_ORDER\n else:\n to_status = ORDER_STATUS.PAY_SUCCESS\n order_id = OrderService.add_order(\n request.user_id, item_id, hospital_id, need_pay,\n credit_used, total_fee, coupon_amount, total, period_choice_id, user_coupon_id, order_no,\n credit_verified,\n status=to_status)\n if not(need_pay) and credit_verified:#额度已通过审核 并全部用额度购买成功\n order = OrderService.get_user_order(order_id, request.user_id)\n pay_success_action(order, need_pay=False)\n result = {\n 'code': ResponseCode.SUCCESS,\n 'msg': '',\n 'order_id': order_id}\n return jsonify_response(result)\n\n\norder_prepay_validator = Inputs(\n {\n 'order_id' : IdField(msg='订单id'),\n }\n )\n@wechat_loggin_dec(required=True, validator=order_prepay_validator, app=False)\ndef order_pay():\n order_id = request.valid_data.get('order_id')\n order_info = OrderService.get_user_order(order_id, request.user_id)\n\n assert order_info, '订单不存在'\n if order_info.status==ORDER_STATUS.PAY_SUCCESS:\n return redirect('/user/order_pay_success/?order_id='+str(order_id))\n if order_info.price==0:\n return render_template('user/pay_success_no_cash.html', order_id=order_id)\n\n assert order_info.status!=ORDER_STATUS.PAY_SUCCESS, '订单已支付成功'\n\n open_id = request.open_id or 'o56qvw-ThtwfthGGlZ-XbH-3fjRc'\n wx_pay_params, err = get_wx_pay_params(\n open_id, order_info.price, order_info.order_no, WX_PAY_NOTIFY_URL, '美分分购买商品'\n )\n\n if err:\n return 'error'\n\n print wx_pay_params, 'wx_pay_params', type(wx_pay_params)\n return render_template('user/order_pay.html', order=order_info, wx_pay_params=wx_pay_params)\n\n\nrepayment_pay_validator = Inputs(\n {\n 'repayment_id' : IdField(msg='还款id'),\n }\n )\n@wechat_loggin_dec(required=True, validator=repayment_pay_validator, app=False)\ndef repayment_pay():\n ''' 还款 '''\n repayment_id = request.valid_data.get('repayment_id')\n repayment = OrderService.get_user_repayment(repayment_id, request.user_id)\n\n assert repayment, '还款不存在'\n\n open_id = request.open_id or 'o56qvw-ThtwfthGGlZ-XbH-3fjRc'\n wx_pay_params, err = get_wx_pay_params(\n open_id, repayment.price, repayment.order_no, WX_REPAYMENT_NOTIFY_URL, '美分分分期账单还款'\n )\n\n if err: return ''\n\n print wx_pay_params, 'wx_pay_params', type(wx_pay_params)\n return render_template('user/repayment_pay.html', repayment=repayment, wx_pay_params=wx_pay_params)\n\n\n\n\n@wechat_loggin_dec(required=False, app=True)\ndef uploads():\n ''' https://github.com/qiniu/js-sdk '''\n token = gen_qn_token()\n return render_template('user/upload.html', token=token)\n\n\norder_detail_validator = Inputs(\n {\n 'order_id' : IdField(msg='订单id'),\n }\n )\n@wechat_loggin_dec(required=True, validator=order_detail_validator, app=True)\ndef order_detail():\n order_id = request.valid_data.get('order_id')\n order_info = OrderService.get_user_order(order_id, request.user_id)\n\n assert order_info, '订单不存在'\n\n fields = ['id', 'title', 'price', 'orig_price', 'image', 'hospital_id']\n item = ItemService.get_item_dict_by_id(order_info.item_id, fields=fields)\n assert item, '商品不存在'\n get_item_activity_price(item)\n\n _, period_choices = CreditService.get_paged_period_choices(limit=1000)\n period_amount = 0\n period_count = 0\n period_fee = 0\n for choice in period_choices:\n if order_info.credit_choice_id==choice['id']:\n period_count = choice['period_count']\n period_money = format_price((order_info.credit_amount)/period_count)\n period_fee = format_price(order_info.total_fee/period_count)\n period_amount = format_price(period_money - period_fee)\n fields = ['id', 'addr', 'long_lat', 'tag_list', 'phone', 'name']\n hospital = ItemService.get_hospital_dict_by_id(item['hospital_id'], fields=fields)\n\n service_code = ''\n service_status = 0\n service_code_dict = {}\n if order_info.credit_verified==1 and order_info.status in {ORDER_STATUS.PAY_SUCCESS, ORDER_STATUS.FINISH}:\n service = OrderService.get_servicecode(order_id)\n assert service, '服务码不存在'\n service_code_dict = service.as_dict()\n service_code = service.code\n service_status = service.status\n order_info = order_info.as_dict()\n cancel_msg = '确认取消订单吗'\n if order_info['price'] and order_info['status']==ORDER_STATUS.PAY_SUCCESS:\n repayment_amount= OrderService.get_order_repayment_logs_amount(order_id)\n repayment_amount= sum([format_price(i['price']) for i in repayment_amount.values()] or [0])\n refund_total = order_info['price']+repayment_amount\n if repayment_amount:\n cancel_msg = '取消订单将退还首付金额{}元和已还款金额{}元,是否取消订单?'.format(order_info['price'], repayment_amount)\n else:\n cancel_msg = '取消订单将退还首付金额{}元,是否取消订单?'.format(order_info['price'])\n comment = CommentService.get_comment(ItemComment.order_id==order_id)\n set_order_status(order_info, comment=comment, servicecode=service_code_dict)\n\n order_info.update({\n 'period_fee' : format_price(period_fee),\n 'period_count' : period_count,\n 'period_amount' : format_price(period_amount)\n })\n order_info['status_labbel'] = ORDER_STATUS_LABEL.get(order_info['status'])\n result = {\n 'cancel_msg' : cancel_msg,\n 'item' : item,\n 'service_code' : service_code,\n 'service_status': service_status,\n 'hospital' : hospital,\n 'order_info' : order_info\n }\n return jsonify_response(result)\n\n\ncomment_post_validator = Inputs(\n {\n 'order_id' : Optional(IdField(msg='订单id')),\n 'item_id' : Optional(IdField(msg='商品id')),\n 'content' : TextField(min_length=1, max_length=10000, msg='评价内容'),\n 'photos' : Optional(TextField(min_length=0, max_length=10000, msg='逗号分隔的图片列表')),\n 'is_anonymous' : IntChoiceField(choices=[0,1], msg='是否匿名'),\n 'rate' : IntChoiceField(choices=range(1,6), msg='评星'),\n }\n )\n@wechat_loggin_dec(required=True, validator=comment_post_validator, app=True)\ndef comment_post():\n order_id = request.valid_data.get('order_id')\n item_id = request.valid_data.get('item_id')\n content = request.valid_data.get('content')\n photos = request.valid_data.get('photos')\n is_anonymous = request.valid_data.get('is_anonymous')\n rate = request.valid_data.get('rate')\n order = OrderService.get_user_order(order_id, request.user_id)\n can_comment = PromoteService.get_fakeuser_by_userid(request.user_id)\n if not can_comment:\n assert order, '订单不存在'\n assert order_id or item_id, '请评论商品'\n if order:\n item_id = order.item_id\n item = ItemService.get_item_dict_by_id(item_id)\n assert item, '商品不存在'\n query = and_()\n query.append(ItemComment.user_id==request.user_id)\n if order_id: query.append(ItemComment.order_id==order_id)\n if item_id: query.append(ItemComment.item_id==item_id)\n exists = bool(CommentService.get_comment(query))\n comment_id = CommentService.comment_item(\n item_id or order.item_id, request.user_id, content, photos, rate, is_anonymous,\n order_id,\n is_re_comment=exists\n )\n CommentService.rerate_hospital(item['hospital_id'])\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '评论成功',\n 'comment_id' : comment_id,\n 'item_id' : item_id or order.item_id\n }\n return jsonify_response(result)\n\n\n\nmy_period_bill_validator = Inputs(\n {\n 'cat' : IntChoiceField(choices=range(1,3), msg='还款日期类型'),\n }\n )\n@wechat_loggin_dec(required=True, validator=my_period_bill_validator, app=True)\ndef my_period_bill():\n cat = request.valid_data.get('cat')\n deadline = get_due_time(cat-1)\n start, end = get_current_period()\n where = or_()\n\n title, thedeadline = deadline_zh(deadline)\n where.append(PeriodPayLog.deadline==deadline)\n if cat==1: #本期包括逾期的\n where.append(\n and_(\n PeriodPayLog.deadline<deadline,\n PeriodPayLog.status==0,\n )\n )\n where.append(#已逾期但在本月还款了的\n and_(\n PeriodPayLog.deadline<deadline,\n PeriodPayLog.status==1,\n PeriodPayLog.repayment_time>=start+day_delta,\n PeriodPayLog.repayment_time<=dt_obj.now(),\n )\n )\n where = and_(\n PeriodPayLog.status.in_([0,1]),\n where)\n logs = CreditService.get_period_pay_logs(request.user_id, where)\n\n total = 0\n repayed = 0\n logs = [i.as_dict() for i in logs]\n for log in logs:\n get_delayed_info(log)\n total += log['fee'] + log['amount'] + log['punish']\n log['create_time_str'] = get_time_str_from_dt(log['create_time'], '%Y.%m.%d')\n if log['status']==1:\n repayed += log['fee'] + log['amount'] + log['punish']\n else:\n if log['deadline']!=str(deadline):\n cacl_punish_fee(log) #预期未还分期 动态计算滞纳金\n total += log['punish']\n\n fetch_order_refs(logs)\n for log in logs:\n log['item_id'] = log['order']['item_id']\n fetch_item_refs(logs, fields=['id', 'title'])\n remain = total - repayed\n result = {\n 'total' : format_price(total),\n 'remain' : format_price(remain),\n 'repayed' : format_price(repayed),\n 'infos' : logs,\n 'title' : title,\n 'deadline' : thedeadline,\n }\n return jsonify_response(result)\n\n\nimport os\n@wechat_loggin_dec(required=True, app=True)\ndef user_home():\n user = UserService.get_user_by_id(request.user_id)\n\n user_credit = CreditService.init_credit(request.user_id)\n where = and_(\n UserCoupon.user_id==request.user_id,\n UserCoupon.status==0,\n UserCoupon.end_time>dt_obj.now()\n )\n coupon_count = CouponService.count_coupon(where)\n\n verified = bool(user_credit.status)\n total = user_credit.total\n remain = user_credit.total - user_credit.used\n apply_status = user_credit.status #0未申请 1申请中 2已通过 3被拒绝\n period_to_pay = 0\n\n deadline = get_due_time(0)\n start, end = get_current_period()\n where = or_(\n )\n where.append(\n and_(\n PeriodPayLog.deadline<=deadline,\n PeriodPayLog.status==0,\n )\n )\n logs = CreditService.get_period_pay_logs(request.user_id, where)\n logs = [i.as_dict() for i in logs]\n has_delayed = False\n for log in logs:\n if not has_delayed and log['status']==0:\n has_delayed = str(dt_obj.now())>log['deadline']\n if log['status']==1: continue\n period_to_pay += log['fee'] + log['amount']\n if not(log['repayment_time']) and str(dt_obj.now())>log['deadline']:\n cacl_punish_fee(log)\n period_to_pay += log['punish']\n remain_days = get_date_delta(str(dt_obj.now())[:19], str(deadline)[:19])\n can_edit_name = not UserService.get_edit_name_log(request.user_id)\n if os.environ.get('APP_ENV')!='production': can_edit_name = True\n result = {\n 'has_delayed' : has_delayed,\n 'can_edit_name' : can_edit_name,\n 'total' : float(total),\n 'remain' : float(remain),\n 'coupon_count' : coupon_count,\n 'apply_status' : apply_status,\n 'user' : user.as_dict(),\n 'period_to_pay' : format_price(period_to_pay), #本期应还\n 'remain_days' : remain_days,\n }\n\n return jsonify_response(result)\n\n\nmy_repayments_validator = Inputs(\n {\n 'offset' : Optional(TextField(min_length=0, max_length=100, msg='分页参数'))\n }\n)\n@wechat_loggin_dec(required=True, validator=my_repayments_validator, app=True)\ndef my_repayments():\n ''' 还款历史 '''\n offset = request.valid_data.get('offset')\n where = and_()\n where.append(and_(\n PeriodPayLog.user_id==request.user_id,\n PeriodPayLog.status==1\n ))\n if offset:\n log_id, pay_time = offset.split('_')\n pay_datetime = dt_obj.fromtimestamp(float(pay_time))\n where.append(or_(PeriodPayLog.repayment_time<pay_datetime, and_(PeriodPayLog.repayment_time==pay_datetime, PeriodPayLog.id<log_id)))\n has_more, logs = CreditService.get_paged_pay_logs(where=where, _sort='repayment_time')\n offset = ''\n if logs: offset = str(logs[-1]['id']) + '_' + str(get_timestamp(logs[-1]['repayment_time']))\n\n fetch_order_refs(logs)\n for log in logs:\n log['item_id'] = log['order']['item_id']\n fetch_item_refs(logs, fields=['id', 'title'])\n result = {\n 'infos' : logs,\n 'has_more' : has_more,\n 'offset' : offset\n }\n return jsonify_response(result)\n\n\n@wechat_loggin_dec(required=False, app=True)\ndef item_cats():\n all_cats = ItemService.get_item_cats()\n all_sub_cats = ItemService.get_item_subcats()\n _, all_recommend_subcats = ItemService.get_paged_recommend_subcats(no_limit=True)\n\n id_order_map = {i['sub_cat_id']:i['sort_order'] for i in all_recommend_subcats}\n recommend_subcat_ids = set(i['sub_cat_id'] for i in all_recommend_subcats)\n recommend_subcats = filter(lambda i:i['id'] in recommend_subcat_ids, all_sub_cats)\n recommend_subcats.sort(key=lambda i: id_order_map[i['id']])\n\n data = [\n {\n 'id': 0,\n 'name':'推荐',\n 'sub_cats':recommend_subcats,\n 'icon' : CAT_ICONS[0],\n 'icon_active' : CAT_ICONS_ACTIVE[0]\n }]\n for cat in all_cats:\n tmp = {'name': cat.name, 'id': cat.id}\n tmp['sub_cats'] = [i for i in all_sub_cats if cat.id in i['cat_id_list']]\n tmp['icon'] = CAT_ICONS.get(cat.id)\n tmp['icon_active'] = CAT_ICONS_ACTIVE.get(cat.id)\n data.append(tmp)\n\n for i in all_sub_cats:\n if i['id'] in recommend_subcat_ids:\n i['cat_id_list'].append(0)\n\n result = {\n 'data':data,\n 'all_sub_cats':all_sub_cats\n }\n return jsonify_response(result)\n #return render_template('user/item_cats.html', nav={2:'active'}, data=data)\n\n\nmy_favs_validator = Inputs(\n {\n 'offset' : Optional(TextField(min_length=0, max_length=100, msg='分页参数'))\n }\n)\n@wechat_loggin_dec(required=True, validator=my_favs_validator, app=True)\ndef my_favs():\n ''' 我的心愿单 '''\n offset = request.valid_data.get('offset')\n where = ItemFav.user_id==request.user_id\n has_more, favs = ItemService.get_paged_fav_items(where=where, offset=offset)\n\n period_choices = CreditService.get_period_choices()\n\n fetch_item_refs(favs, fields=['id', 'image', 'title','support_choice_list','price','orig_price','hospital_id'])\n items = [i['item'] for i in favs]\n print favs, 'favs'\n fetch_hospital_refs(items, fields=['id','name'])\n item_ids = [i['item']['id'] for i in favs]\n activity = ActivityService.get_current_activity()\n activity_id = None\n if activity: activity_id = activity['id']\n price_map = ItemService.get_activity_prices(item_ids, activity_id)\n for i in favs:\n activity_price = price_map.get(i['item']['id'])\n if activity_price: i['item']['price'] = activity_price\n fetch_min_period_info(items)\n offset = ''\n if favs: offset = str(favs[-1]['id'])\n result = {\n 'has_more' : has_more,\n 'infos' : favs,\n 'offset' : offset\n }\n\n return jsonify_response(result)\n\n\nmy_coupons_validator = Inputs(\n {\n 'cat' : IntChoiceField(choices=[1,2,3], msg='优惠券类型'), #1未使用 2已使用 3已过期\n 'offset' : Optional(TextField(min_length=0, max_length=100, msg='分页参数'))\n }\n)\n@wechat_loggin_dec(required=True, validator=my_coupons_validator, app=True)\ndef my_coupons():\n ''' 我的优惠券 '''\n offset = request.valid_data.get('offset')\n cat = request.valid_data.get('cat')\n\n where = None\n filters = [UserCoupon.user_id==request.user_id]\n if cat==1:\n filters.append(\n and_(\n UserCoupon.status==0,\n UserCoupon.end_time>dt_obj.now()\n )\n )\n elif cat==2:\n filters.append(\n and_(\n UserCoupon.status==1\n )\n )\n elif cat==3:\n filters.append(\n and_(\n UserCoupon.status==0,\n UserCoupon.end_time<=dt_obj.now()\n )\n )\n where = and_(*filters)\n has_more, user_coupons = CouponService.get_paged_user_coupons(where=where, offset=offset)\n\n fields = ['id','title']\n fetch_item_refs(user_coupons, fields=fields, keep_id=True)\n\n offset = ''\n if user_coupons: offset = str(user_coupons[-1]['id'])\n\n _, item_cats = ItemService.get_paged_cats()\n _, item_subcats = ItemService.get_paged_sub_cats()\n for coupon in user_coupons:\n coupon['remain_str'] = calc_expire_remain(coupon['end_time'], coupon['status'])\n set_coupon_cat_str(coupon, item_cats, item_subcats)\n result = {\n 'has_more': has_more,\n 'infos' : user_coupons,\n 'offset' : offset,\n }\n return jsonify_response(result)\n\n\n@wechat_loggin_dec(required=True, app=True)\ndef my_apply():\n ''' 我的额度申请 '''\n apply = CreditService.get_apply_dict_by_userid(request.user_id)\n result = {\n 'data': apply\n }\n return jsonify_response(result)\n\n\nhelp_validator = Inputs(\n {\n 'cat_id' : Optional(IdField(msg='分类id')),\n }\n)\n@wechat_loggin_dec(validator=help_validator, app=True)\ndef help():\n ''' 帮助 '''\n cat_id = request.valid_data.get('cat_id')\n\n where = None\n if cat_id: where = HelpCat.id==cat_id\n has_more, cats = DataService.get_paged_helpcats(where=where)\n\n has_more, entries = DataService.get_paged_helpentries()\n\n for cat in cats:\n cat['entry_list'] = [ i for i in entries if i['cat_id']==cat['id'] ]\n if not cat_id:\n cat['entry_list'] = cat['entry_list'][:4]\n\n result = { \n 'data' : cats \n }\n return jsonify_response(result)\n\n\nhelp_html_validator = Inputs(\n {\n 'cat_id' : Optional(IdField(msg='分类id')),\n }\n)\n@wechat_loggin_dec(required=False, validator=help_html_validator, app=True)\ndef help_html():\n ''' 帮助页面 '''\n cat_id = request.valid_data.get('cat_id')\n\n where = None\n if cat_id: where = HelpCat.id==cat_id\n has_more, cats = DataService.get_paged_helpcats(where=where)\n\n has_more, entries = DataService.get_paged_helpentries()\n\n for cat in cats:\n cat['entry_list'] = [ i for i in entries if i['cat_id']==cat['id'] ]\n if not cat_id:\n cat['entry_list'] = cat['entry_list'][:4]\n\n result = { \n 'data' : cats \n }\n return render_template('user/help-center.html', cats=cats, cat_id=cat_id)\n return jsonify_response(result)\n\n\n\nhelp_entry_validator = Inputs(\n {\n 'entry_id' : IdField(msg='条目id')\n }\n)\n@wechat_loggin_dec(required=False, validator=help_entry_validator, app=True)\ndef get_help_entry():\n ''' 帮助条目详情 '''\n entry_id = request.valid_data.get('entry_id')\n\n entry = DataService.get_helpentry_by_id(entry_id)\n result = {\n 'data' : entry\n }\n contact = CONTACT\n return render_template('user/help-center-detail.html', entry=entry, contact=contact)\n return jsonify_response(result)\n\n\n@wechat_loggin_dec(app=True)\ndef apply_credit_page():\n ''' 额度申请 '''\n return render_template('user/apply_one.html')\n\n\nproject_doctor_description_validator = Inputs(\n {\n 'item_id' : IdField(msg='商品id')\n }\n)\n@wechat_loggin_dec(required=False, validator=project_doctor_description_validator, app=True)\ndef project_doctor_description():\n ''' 项目医生图文介绍'''\n item_id = request.args.get('item_id')\n item = ItemService.get_item_dict_by_id(item_id)\n\n return render_template('user/doctor_hospital_desc.html', item=item)\n\n\ndef get_jssdk_js():\n print request\n print request.headers\n print type(request.headers)\n print dir(request.headers)\n\n sign_user = get_cookie('sign_user') or ''\n if sign_user:\n sign_user = '&sign_user='+sign_user\n\n referer = request.headers.get('Referer') or ''\n if '127.0.0.1' in referer:\n sign_user = '&sign_user=' + '2.2074eb5e01c2093a5f5f9586955d5414'\n browser = request.headers.get('User-Agent')\n context = get_jssdk_context(referer)\n text = render_template('js_sdk.js', token=sign_user, **context)\n return js_response(text)\n\n\ndef get_school_list():\n ''' 学校选择列表 '''\n limit = 3000\n fields = ['id', 'name']\n where = School.city_name=='上海'\n _, schools = DataService.get_paged_schools(\n where=where, limit=limit, fields=fields)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'infos' : schools\n }\n return jsonify_response(result)\n\n\n\nrepayment_validator = Inputs(\n {\n 'data' : JsonField(msg='请选择还款数据')\n }\n)\n@wechat_loggin_dec(validator=repayment_validator, app=True)\ndef repayment():\n ''' 选择还款'''\n data = request.valid_data.get('data')\n\n user_id = request.user_id\n pay_method = None\n price = 0\n assert data, '请选择还款'\n for log in data:\n assert str(log.get['amount']).isdigit() and str(log.get['fee']).isdigit() and str(log.get['punish']).isdigit(), '数据格式错误'\n price += float(log['punish']) + float(log['fee']) + float(log['amount'])\n coupon_id = None\n order_no = OrderService.create_no()\n repayment_id = OrderService.repayment(user_id, pay_method, coupon_id, price, json.dumps(data), order_no)\n\n msg = ''\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : msg,\n 'repayment_id' : repayment_id\n }\n return jsonify_response(result)\n\n\n\n\nhospital_detail_validator = Inputs(\n {\n 'hospital_id': IdField(msg='医院id')\n }\n)\n@wechat_loggin_dec(required=False, validator=hospital_detail_validator)\ndef hospital_detail():\n ''' 医院详情 '''\n hospital_id = request.valid_data.get('hospital_id')\n fields = ['id', 'name', 'photo_list', 'working_time', 'phone', 'long_lat', 'desc', 'tag_list', 'addr']\n hospital = ItemService.get_hospital_dict_by_id(hospital_id, fields=fields)\n\n where = Item.hospital_id==hospital_id\n fields = ['id', 'photo_list', 'title', 'price', 'orig_price', 'support_choice_list', 'image', 'has_fee']\n has_more, items = ItemService.get_paged_items(where=where, fields=fields, limit=5)\n\n fetch_min_period_info(items)\n result = {\n 'code': ResponseCode.SUCCESS,\n 'msg': '',\n 'hospital': hospital,\n 'infos': items\n }\n return render_template('user/hospital_detail.html', **result)\n return jsonify_response(result)\n\n\n\n@wechat_loggin_dec(required=None)\ndef get_city_list():\n ''' 城市列表 '''\n has_more, infos = DataService.get_paged_city_list()\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'infos' : infos,\n }\n return jsonify_response(result)\n\n\n\nupload_image_validator = Inputs(\n {\n 'image_cat' : ChoiceField(choices=['avatar', 'comment', 'apply', 'room'], msg='图片类型')\n }\n )\n@wechat_loggin_dec(validator=upload_image_validator)\ndef upload_image():\n try:\n file = request.files['file']\n img_cat = request.valid_data.get('image_cat')\n code = 0\n msg = '上传成功'\n content = file.read()\n key = img_cat+ '/' + str(time.time()) + '.jpg'\n upload_img(key, content)\n if img_cat=='avatar':\n UserService.update_user(request.user_id, avatar=key)\n return jsonify_response({\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'image': key,\n 'fullpath': prefix_img_domain(key)\n })\n\n except Exception as e:\n import traceback\n traceback.print_exc()\n return jsonify_response({'msg':'服务器异常','code': 10000})\n\n\n\napply_credit_post_validator = Inputs(\n {\n# 'name' : Optional(TextField(min_length=0, max_length=100, msg='姓名')),\n# 'id_no' : TextField(min_length=0, max_length=100, msg='身份证号'),\n# 'school' : TextField(min_length=0, max_length=100, msg='学校'),\n# 'enrollment_time' : TextField(min_length=0, max_length=100, msg='入学时间'),\n 'graduate_time' : REGField(pattern='\\d{4}-\\d{1,2}', msg='请输入毕业时间格式如:2015-01'),\n# 'major' : TextField(min_length=0, max_length=100, msg='专业'),\n# 'stu_no' : TextField(min_length=0, max_length=100, msg='学号'),\n# 'stu_education' : TextField(min_length=0, max_length=100, msg='学历'),\n# 'addr' : TextField(min_length=0, max_length=100, msg='地址'),\n 'parent_contact' : TextField(min_length=0, max_length=100, msg='父母联系方式'),\n 'chsi_name' : TextField(min_length=0, max_length=100, msg='学信网账号'),\n 'chsi_passwd' : TextField(min_length=0, max_length=100, msg='学信网密码'),\n 'body_choice_ids' : Optional(TextField(min_length=0, max_length=100, msg='你满意的部位')),\n 'body_choice_text' : Optional(TextField(min_length=0, max_length=100, msg='其他内容')),\n }\n )\n@wechat_loggin_dec(validator=apply_credit_post_validator)\ndef apply_credit_post():\n# request.valid_data['enrollment_time'] = '{}-01 00:00:00'.format(request.valid_data['enrollment_time'])\n request.valid_data['graduate_time'] = '{}-01 00:00:00'.format(request.valid_data['graduate_time'])\n body_choice_ids = request.valid_data['body_choice_ids']\n body_choice_text = request.valid_data['body_choice_text']\n \n apply_id = CreditService.add_apply(request.user_id, **request.valid_data)\n if not apply_id:\n where = and_(\n CreditApply.user_id==request.user_id,\n CreditApply.status!=APPLY_STATUS.VERIFIED\n )\n request.valid_data['create_time'] = dt_obj.now()\n request.valid_data['status'] = 1\n CreditService.update_apply(where, **request.valid_data)\n\n CreditService.update_user_credit_status(request.user_id, CREDIT_STATUS.VERIFYING)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\n\napply_credit_photo_validator = Inputs(\n {\n 'id_card_photo' : TextField(min_length=0, max_length=100, msg='身份证号码'),\n 'stu_card_photo' : TextField(min_length=0, max_length=100, msg='学生证号'),\n }\n )\n@wechat_loggin_dec(validator=apply_credit_photo_validator)\ndef apply_credit_photo():\n ''' '''\n id_card_photo = request.valid_data.get('id_card_photo')\n stu_card_photo = request.valid_data.get('stu_card_photo')\n\n where = CreditApply.user_id==request.user_id\n CreditService.update_apply(where, id_card_photo=id_card_photo, stu_card_photo=stu_card_photo, status=APPLY_STATUS.SECOND_STEP)\n CreditService.update_user_credit_status(request.user_id, CREDIT_STATUS.VERIFYING)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\nedit_name_validator = Inputs(\n {\n 'name' : TextField(min_length=0, max_length=100, msg='修改名字'),\n }\n )\n@wechat_loggin_dec(required=True, validator=edit_name_validator, app=True)\ndef edit_name():\n ''' 修改名字 '''\n name = request.valid_data.get('name')\n print name\n count = UserService.update_name(request.user_id, name)\n\n if count:\n UserService.add_edit_name_log(request.user_id)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '修改成功'\n }\n return jsonify_response(result)\n\n\n\ndef get_current_city_id():\n ''' 获取当期城市id '''\n city_id = request.valid_data.get('city_id')\n city_code = get_cookie('choose_city_code') or get_cookie('city_code')\n if city_id:\n return city_id\n elif city_code:\n city = DataService.get_city_by_baidu_city_code(city_code)\n if city: return city.id\n\n return 1\n\n\n\nitem_list_html_validator = Inputs(\n {\n 'sub_cat_id' : Optional(IdField(msg='分类id')),\n 'hospital_id' : Optional(IdField(msg='医院id')),\n 'city_id' : Optional(IdField(msg='城市id')),\n 'offset' : Optional(TextField(min_length=1, max_length=100, msg='分页参数')),\n 'sort_type' : Optional(IntChoiceField(choices=[1,2,3,4], msg='排序选项')),\n }\n )\n@wechat_loggin_dec(required=False, validator=item_list_html_validator)\ndef item_list_html():\n ''' 商品列表 '''\n sub_cat_id = request.valid_data.get('sub_cat_id')\n sort_type = request.valid_data.get('sort_type') or 1\n\n order_choices = [\n {'id':1, 'name':'综合排序'},\n {'id':2, 'name':'销量优先'},\n {'id':3, 'name':'低价优先'},\n {'id':4, 'name':'高价优先'},\n ]\n has_more, citys = DataService.get_paged_cities()\n\n\n cat_id = None\n subcat = None\n if sub_cat_id:\n subcat = ItemService.get_subcat_dict_by_id(sub_cat_id)\n\n sort_type_obj = None\n if sort_type:\n for i in order_choices:\n if i['id'] == sort_type:\n sort_type_obj = i\n \n all_cats = ItemService.get_item_cats()\n all_sub_cats = ItemService.get_item_subcats()\n _, all_recommend_subcats = ItemService.get_paged_recommend_subcats(limit=1000)\n\n id_order_map = {i['sub_cat_id']:i['sort_order'] for i in all_recommend_subcats}\n recommend_subcat_ids = set(i['sub_cat_id'] for i in all_recommend_subcats)\n recommend_subcats = filter(lambda i:i['id'] in recommend_subcat_ids, all_sub_cats)\n recommend_subcats.sort(key=lambda i: id_order_map[i['id']])\n\n item_cat = [\n {\n 'id': 0,\n 'name':'推荐',\n 'sub_cats':recommend_subcats,\n 'icon' : CAT_ICONS[0],\n 'icon_active' : CAT_ICONS_ACTIVE[0]\n }]\n for cat in all_cats:\n tmp = {'name': cat.name, 'id': cat.id}\n tmp['sub_cats'] = [i for i in all_sub_cats if cat.id in i['cat_id_list']]\n tmp['icon'] = CAT_ICONS.get(cat.id) or ''\n tmp['icon_active'] = CAT_ICONS_ACTIVE.get(cat.id) or ''\n item_cat.append(tmp)\n sort_type_obj = sort_type_obj or order_choices[0]\n subcat = subcat or item_cat[0]['sub_cats'][0]\n\n city_id = get_current_city_id()\n city = None\n for the_city in citys:\n if the_city['id']==city_id: city = the_city\n\n for i in all_sub_cats:\n if i['id'] in recommend_subcat_ids:\n i['cat_id_list'].append(0)\n city = city or citys[0]\n result = {\n 'order_choices': order_choices,\n 'data': item_cat,\n 'all_sub_cats':all_sub_cats,\n 'citys': citys,\n 'sort_type_obj':sort_type_obj,\n 'city': city,\n 'subcat': subcat\n }\n if request.args.get('json'):\n return jsonify_response(result)\n return render_template('user/item_list.html', **result)\n\n\nhospital_list_html_validator = Inputs(\n {\n 'sub_cat_id' : Optional(IdField(msg='分类id')),\n 'hospital_id' : Optional(IdField(msg='医院id')),\n 'city_id' : Optional(IdField(msg='城市id')),\n 'offset' : Optional(TextField(min_length=1, max_length=100, msg='分页参数')),\n 'sort_type' : Optional(IntChoiceField(choices=[1,2,3,4], msg='排序选项')),\n }\n )\n@wechat_loggin_dec(required=False, validator=hospital_list_html_validator)\ndef hospital_list_html():\n ''' 医院列表 '''\n sub_cat_id = request.valid_data.get('sub_cat_id')\n sort_type = request.valid_data.get('sort_type') or 1\n\n order_choices = HOSPITAL_ORDER_CHOICES\n has_more, citys = DataService.get_paged_cities()\n\n\n cat_id = None\n subcat = None\n if sub_cat_id:\n subcat = ItemService.get_subcat_dict_by_id(sub_cat_id)\n\n sort_type_obj = None\n if sort_type:\n for i in order_choices:\n if i['id'] == sort_type:\n sort_type_obj = i\n \n all_cats = ItemService.get_item_cats()\n all_sub_cats = ItemService.get_item_subcats()\n _, all_recommend_subcats = ItemService.get_paged_recommend_subcats(limit=1000)\n\n id_order_map = {i['sub_cat_id']:i['sort_order'] for i in all_recommend_subcats}\n recommend_subcat_ids = set(i['sub_cat_id'] for i in all_recommend_subcats)\n recommend_subcats = filter(lambda i:i['id'] in recommend_subcat_ids, all_sub_cats)\n recommend_subcats.sort(key=lambda i: id_order_map[i['id']])\n total_cat = {'id': 0, 'name':'全部', 'cat_id_list': [0]\n }\n all_sub_cats.insert(0, total_cat)\n recommend_subcats.insert(0, total_cat)\n item_cat = [\n {\n 'id': 0,\n 'name':'推荐',\n 'sub_cats':recommend_subcats,\n 'icon' : CAT_ICONS[0],\n 'icon_active' : CAT_ICONS_ACTIVE[0]\n }]\n for cat in all_cats:\n tmp = {'name': cat.name, 'id': cat.id}\n tmp['sub_cats'] = [i for i in all_sub_cats if cat.id in i['cat_id_list']]\n tmp['icon'] = CAT_ICONS.get(cat.id) or ''\n tmp['icon_active'] = CAT_ICONS_ACTIVE.get(cat.id) or ''\n item_cat.append(tmp)\n sort_type_obj = sort_type_obj or order_choices[0]\n subcat = subcat or item_cat[0]['sub_cats'][0]\n\n city_id = get_current_city_id()\n city = None\n for the_city in citys:\n if the_city['id']==city_id: city = the_city\n\n for i in all_sub_cats:\n if i['id'] in recommend_subcat_ids:\n i['cat_id_list'].append(0)\n city = city or citys[0]\n result = {\n 'order_choices': order_choices,\n 'data': item_cat,\n 'all_sub_cats':all_sub_cats,\n 'citys': citys,\n 'sort_type_obj':sort_type_obj,\n 'city': city,\n 'subcat': subcat\n }\n if request.args.get('json'):\n return jsonify_response(result)\n return render_template('user/hospital_list.html', **result)\n\n\n\n@wechat_loggin_dec(required=False)\ndef menu_credit_apply():\n ''' 额度申请菜单入口 '''\n if not request.user_id:\n return redirect('/static/user/login.html?next=/user/menu_credit_apply/')\n #return send_from_directory('static/user/', 'login.html')\n apply = CreditService.get_apply_dict_by_userid(request.user_id)\n if apply:\n# if apply['status']==1:\n# return redirect('static/user/applyer-pic.html')\n predict_time = str(get_next_working_day(str(apply['create_time'])))[:10]\n return render_template('user/apply_result.html', apply=apply, predict_time=predict_time)\n else:\n return redirect('static/user/applyer-infor.html')\n\n\n\n\nmy_order_bill_validator = Inputs(\n {\n 'order_id' : IdField(msg='订单id')\n }\n )\n@wechat_loggin_dec(required=True, validator=my_order_bill_validator, app=True)\ndef my_order_bill():\n order_id = request.valid_data.get('order_id')\n order = OrderService.get_user_order(order_id, request.user_id)\n assert order, '订单不存在'\n\n where = or_(\n )\n where.append(PeriodPayLog.order_id==order_id)\n\n order = OrderService.get_order_by_id(order_id)\n item = ItemService.get_item_dict_by_id(order.item_id)\n hospital = ItemService.get_hospital_dict_by_id(item['hospital_id'], fields=['id','name'])\n logs = CreditService.get_period_pay_logs(request.user_id, where)\n\n total = 0\n repayed = 0\n logs = [i.as_dict() for i in logs]\n for log in logs:\n get_delayed_info(log)\n total += log['fee'] + log['amount'] + log['punish']\n if log['status']==1:\n repayed += log['fee'] + log['amount'] + log['punish']\n else:\n if log['delayed']:\n cacl_punish_fee(log) #预期未还分期 动态计算滞纳金\n total += log['punish']\n\n fetch_order_refs(logs)\n for log in logs:\n log['item_id'] = log['order']['item_id']\n fetch_item_refs(logs, fields=['id', 'title'])\n remain = total - repayed\n item['price'] = format_price(order.total)\n\n result = {\n 'item' : item,\n 'total' : format_price(total),\n 'hospital' : hospital,\n 'repayed' : format_price(repayed),\n 'remain' : format_price(remain),\n 'infos' : logs,\n }\n return jsonify_response(result)\n\n\n\nhospital_item_list_validator = Inputs(\n {\n 'hospital_id' : IdField(msg='医院id')\n }\n )\n@wechat_loggin_dec(required=False, validator=hospital_item_list_validator)\ndef hospital_item_list():\n hospital_id = request.valid_data.get('hospital_id')\n\n where = and_()\n where.append(Item.status==1)\n\n if hospital_id:\n where.append(Item.hospital_id==hospital_id)\n\n fields = ['id', 'hospital_id', 'title', 'price', 'orig_price', 'support_choice_list', 'image', 'has_fee']\n has_more, items = ItemService.get_paged_items(where=where, fields=fields)\n\n fetch_min_period_info(items)\n fetch_hospital_refs(items, fields=['id','name'])\n offset = ''\n if items: offset = str(items[-1]['id']) + '_' + ''\n print offset, 'offset'\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'has_more' : has_more,\n 'infos' : items,\n 'offset' : offset\n }\n return render_template('user/hospital_item_list.html', **result)\n return jsonify_response(result)\n\n\n\n\n\norder_pay_success_validator = Inputs(\n {\n 'order_id' : IdField(msg='订单id')\n }\n )\n@wechat_loggin_dec(required=False, validator=order_pay_success_validator)\ndef order_pay_success():\n ''' 支付成功跳转页面 '''\n order_id = request.valid_data.get('order_id')\n has_more, infos = ItemService.get_paged_items(limit=2)\n fetch_hospital_refs(infos)\n fetch_min_period_info(infos)\n context = {\n 'order_id' : order_id,\n 'infos' : infos\n }\n return render_template('user/order_pay_success.html', **context)\n\n\n\nrepayment_pay_success_validator = Inputs(\n {\n 'repayment_id' : IdField(msg='还款id')\n }\n )\n@wechat_loggin_dec(required=False, validator=repayment_pay_success_validator)\ndef repayment_pay_success():\n ''' 还款成功跳转页面 '''\n\n return render_template('user/repayment_pay_success.html')\n\n\n\ncancel_order_validator = Inputs(\n {\n 'order_id' : IdField(msg='订单id')\n }\n )\n@wechat_loggin_dec(validator=cancel_order_validator)\ndef cancel_order():\n ''' 取消订单 '''\n order_id = request.valid_data.get('order_id')\n order = OrderService.get_user_order(order_id, request.user_id)\n assert order, '订单不存在'\n where = Order.status==ORDER_STATUS.PAY_SUCCESS\n count = OrderService.update_order_status(order_id, ORDER_STATUS.CANCELED, request.user_id, where)\n if count:\n if order.credit_amount:\n repayment_amount = OrderService.order_repayment_logs_amount(order_id)\n remain_to_repayment = order.credit_amount - repayment_amount\n CreditService.modify_credit(request.user_id, -remain_to_repayment)\n CreditService.cancel_pay_logs(order_id)\n if order.coupon_id:\n CouponService.update_user_coupon_status(UserCoupon.id==order.coupon_id, 0)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '取消成功'\n }\n return jsonify_response(result)\n\n\ncancel_pay_validator = Inputs(\n {\n 'order_id' : IdField(msg='订单id')\n }\n )\n@wechat_loggin_dec(validator=cancel_pay_validator)\ndef cancel_pay():\n ''' 取消支付 '''\n order_id = request.valid_data.get('order_id')\n order = OrderService.get_user_order(order_id, request.user_id)\n assert order, '订单不存在'\n where = Order.status.in_([ORDER_STATUS.NEW_ORDER, ORDER_STATUS.TO_PAY])\n count = OrderService.update_order_status(order_id, ORDER_STATUS.CANCEL_BEFORE_PAY, request.user_id, where)\n if count:\n if order.credit_amount:\n CreditService.modify_credit(request.user_id, -(order.credit_amount))\n if order.coupon_id:\n CouponService.update_user_coupon_status(UserCoupon.id==order.coupon_id, 0)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '取消成功'\n }\n return jsonify_response(result)\n\n\n\nfinish_order_validator = Inputs(\n {\n 'order_id' : IdField(msg='订单id')\n }\n )\n@wechat_loggin_dec(validator=finish_order_validator)\ndef finish_order():\n ''' 用户完成订单 '''\n order_id = request.valid_data.get('order_id')\n order = OrderService.get_user_order(order_id, request.user_id)\n assert order, '订单不存在'\n where = and_(\n Order.id==order_id,\n Order.user_finished==False,\n Order.status.in_([ORDER_STATUS.PAY_SUCCESS, ORDER_STATUS.FINISH])\n )\n count = OrderService.update_order(where, user_finished=True)\n if count:\n ItemService.incr_item_count(order.item_id)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '完成订单'\n }\n return jsonify_response(result)\n\n\n\nhospital_location_validator = Inputs(\n {\n 'hospital_id' : IdField(msg='医院id')\n }\n )\n@wechat_loggin_dec(required=False, validator=hospital_location_validator)\ndef hospital_location():\n hospital_id = request.valid_data.get('hospital_id')\n hospital = ItemService.get_hospital_dict_by_id(hospital_id)\n\n return render_template('user/hospital-location.html', hospital=hospital)\n\n\n@wechat_loggin_dec(required=False)\ndef meifenfen_city():\n city_code = get_cookie('city_code')\n city_name = get_cookie('city_name')\n city = None\n if city_code:\n city = DataService.get_city_by_baidu_city_code(city_code)\n _, citys = DataService.get_paged_city_list()\n cat = 1 #1无法定位 2城市未开通 3城市已开通\n if city_code and not city:\n cat = 2\n elif city:\n cat = 3\n print city_name, city_code, type(city_name)\n context = {\n 'city' : city,\n 'citys' :citys,\n 'city_name': city_name,\n 'city_code': city_code,\n 'cat' : cat \n }\n if request.args.get('json'):\n response = jsonify_response(context)\n response = template_response(render_template('user/meifenfen_city.html', **context))\n if city:\n set_cookie(response, 'city_id', str(city.id), 86400*365)\n return response\n\n\n@wechat_loggin_dec(required=False, need_openid=True)\ndef meifenfen_index():\n banners = [\n {'image': 'http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1aco140fa18uljmo17f23cvvu111456800706495.jpg',\n 'link':'http://{}/static/user/Activities/home.html'.format(SERVER_NAME),\n },\n {'image': 'http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a5cd7ihe2aokci1uqdpfk1ivq1banner_01.jpg',\n 'link':'http://{}/static/user/banner1.html'.format(SERVER_NAME)},\n {'image': 'http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a53eou161cs8mku16tm1h91arh1banner_02.jpg',\n 'link':'http://{}/static/user/banner2.html'.format(SERVER_NAME)},\n {'image': 'http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a5cd7ihe2aokci1uqdpfk1ivq1banner_03.jpg',\n 'link':'http://{}/user/menu_credit_apply/'.format(SERVER_NAME)},\n ]\n city = None\n city_name = get_cookie('choose_city_name') or get_cookie('city_name')\n city_code = get_cookie('choose_city_code') or get_cookie('city_code')\n if city_code:\n city = DataService.get_city_by_baidu_city_code(city_code)\n _, recommend_sub_cats = ItemService.get_paged_recommend_subcats(_sort='sort_order', _sort_dir='ASC')\n fetch_item_subcat_refs(recommend_sub_cats)\n\n current_activity = ActivityService.get_current_activity() or {}\n where = ActivityItem.activity_id==current_activity.get('id')\n fields = ('id', 'item_id', 'price', 'image')\n _, activity_items = ItemService.get_paged_activity_items(fields=fields, where=where, _sort='sort_order', _sort_dir='ASC')\n fields = ('id', 'item_id', 'image', 'desc')\n where = None\n _, recommend_items = ItemService.get_paged_recommend_items(fields=fields, where=where, _sort='sort_order', _sort_dir='ASC')\n\n img_keys = [get_img_key(i['image']) for i in recommend_items]\n img_sizes = DataService.get_imgs_size_by_keys(img_keys)\n img_key_size_map = {i['key']:{'width':i['width'],'height':i['height']} for i in img_sizes}\n print img_key_size_map\n for rec in recommend_items:\n key = get_img_key(rec['image'])\n rec['width'] = img_key_size_map[key]['width']\n rec['height'] = img_key_size_map[key]['height']\n fields = ['id', 'hospital_id', 'title', 'price', 'orig_price', 'has_fee', 'support_choice_list']\n fetch_item_refs(chain(activity_items, recommend_items), fields=fields)\n\n recommend_sub_cats = [\n {'image': 'http://www.meifenfen.com/static/user/img/home-btn1.png', 'id':5},\n {'image': 'http://www.meifenfen.com/static/user/img/home-btn2.png', 'id':8},\n {'image': 'http://www.meifenfen.com/static/user/img/home-btn3.png', 'id':3},\n ]\n first_activity_item = None\n if activity_items:\n first_activity_item = activity_items[0]\n first_activity_item['hospital'] = ItemService.get_hospital_dict_by_id(first_activity_item['item']['hospital_id'])\n item_dict_list = [i['item'] for i in chain(activity_items, recommend_items)]\n item_list = []\n for i in item_dict_list:\n if i not in item_list:\n item_list.append(i)\n for item in activity_items:\n item['item']['price'] = item['price']\n fetch_min_period_info(item_list)\n context = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'recommend_sub_cats' : recommend_sub_cats,\n 'activity_items' : activity_items,\n 'recommend_items' : recommend_items,\n 'activity' : current_activity,\n 'banners' : banners,\n 'city_code' : city_code,\n 'city_name' : city_name,\n 'city' : city.as_dict() if city else None\n }\n\n js_sdk_context = get_jssdk_context()\n if request.args.get('json'):\n return jsonify_response(context)\n return render_template('user/meifenfen.html', **context)\n\n\n\n@wechat_loggin_dec(required=False, need_openid=True)\ndef meifenfen_new_index():\n ''' 新首页 '''\n banners = [\n {'image': 'http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1aco140fa18uljmo17f23cvvu111456800706495.jpg',\n 'link':'http://{}/static/user/Activities/home.html'.format(SERVER_NAME),\n },\n {'image': 'http://7xnpdb.com2.z0.glb.qiniucdn.com/redpack_banner.jpg',\n 'link': 'http://www.meifenfen.com/user/redpack_index/'\n },\n {'image': 'http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a5cd7ihe2aokci1uqdpfk1ivq1banner_01.jpg',\n 'link':'http://{}/static/user/banner1.html'.format(SERVER_NAME)},\n {'image': 'http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a53eou161cs8mku16tm1h91arh1banner_02.jpg',\n 'link':'http://{}/static/user/banner2.html'.format(SERVER_NAME)},\n {'image': 'http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a5cd7ihe2aokci1uqdpfk1ivq1banner_03.jpg',\n 'link':'http://{}/user/menu_credit_apply/'.format(SERVER_NAME)},\n ]\n city_id = get_cookie('city_id')\n city = DataService.get_city_dict_by_id(city_id)\n _, recommend_sub_cats = ItemService.get_paged_recommend_subcats(_sort='sort_order', _sort_dir='ASC')\n fetch_item_subcat_refs(recommend_sub_cats)\n\n current_activity = ActivityService.get_current_activity() or {}\n where = ActivityItem.activity_id==current_activity.get('id')\n fields = ('id', 'item_id', 'price', 'image')\n _, activity_items = ItemService.get_paged_activity_items(fields=fields, where=where, _sort='sort_order', _sort_dir='ASC')\n fields = ('id', 'item_id', 'image', 'desc')\n where = None\n _, recommend_items = ItemService.get_paged_recommend_items(fields=fields, where=where, _sort='sort_order', _sort_dir='ASC')\n\n img_keys = [get_img_key(i['image']) for i in recommend_items]\n img_sizes = DataService.get_imgs_size_by_keys(img_keys)\n img_key_size_map = {i['key']:{'width':i['width'],'height':i['height']} for i in img_sizes}\n print img_key_size_map\n for rec in recommend_items:\n key = get_img_key(rec['image'])\n rec['width'] = img_key_size_map[key]['width']\n rec['height'] = img_key_size_map[key]['height']\n fields = ['id', 'hospital_id', 'title', 'price', 'orig_price', 'has_fee', 'support_choice_list']\n fetch_item_refs(chain(activity_items, recommend_items), fields=fields)\n\n recommend_sub_cats = [\n {'image': 'http://www.meifenfen.com/static/user/img/home-btn1.png', 'id':5},\n {'image': 'http://www.meifenfen.com/static/user/img/home-btn2.png', 'id':8},\n {'image': 'http://www.meifenfen.com/static/user/img/home-btn3.png', 'id':3},\n ]\n first_activity_item = None\n if activity_items:\n first_activity_item = activity_items[0]\n first_activity_item['hospital'] = ItemService.get_hospital_dict_by_id(first_activity_item['item']['hospital_id'])\n item_dict_list = [i['item'] for i in chain(activity_items, recommend_items)]\n item_list = []\n for i in item_dict_list:\n if i not in item_list:\n item_list.append(i)\n for item in activity_items:\n item['item']['price'] = item['price']\n\n fetch_min_period_info(item_list)\n\n where = BeautyEntry.status==1\n _, tutorials = TutorialService.get_paged_tutorial_entries(where=where)\n tutorials = tutorials[:2]\n tutorial_tags = ['原理', '手法', '案例', '大人说']\n _sort_dir = 'ASC'\n _sort = 'sort_order'\n _, recommend_hospitals = ItemService.get_paged_recommend_hospitals(_sort_dir=_sort_dir, _sort=_sort)\n fetch_hospital_refs(recommend_hospitals)\n recommend_hospitals = recommend_hospitals[:3]\n for tutorial in tutorials:\n tutorial['create_time'] = get_time_str_from_dt(tutorial['create_time'], '%-m.%-d')\n context = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'tutorials' : tutorials,\n 'recommend_sub_cats' : recommend_sub_cats,\n 'activity_items' : activity_items,\n 'recommend_items' : recommend_items,\n 'activity' : current_activity,\n 'banners' : banners,\n 'tutorial_tags' : tutorial_tags,\n 'recommend_hospitals' : recommend_hospitals,\n 'city' : city\n }\n\n js_sdk_context = get_jssdk_context()\n if request.args.get('json'):\n return jsonify_response(context)\n return render_template('user/meifenfen_new.html', **context)\n\n\n\n\n@wechat_loggin_dec(required=False, need_openid=False)\ndef api_doc():\n ''' 接口文档 '''\n return send_from_directory('static', 'doc.html')\n\n\n\nmei_tutorials_validator = Inputs(\n {\n 'cat' : Optional(IntChoiceField(choices=[1,2,3], msg='攻略类型')), #1最新 2最早 3最热\n 'offset' : Optional(TextField(min_length=0, max_length=1000, msg='分页参数'))\n }\n )\n@wechat_loggin_dec(required=False, validator=mei_tutorials_validator)\ndef mei_tutorials():\n ''' 美攻略 '''\n cat = request.valid_data.get('cat')\n offset = request.valid_data.get('offset')\n\n offset_id = None\n _sort = 'id'\n _sort_dir = 'DESC'\n filters = [BeautyEntry.status==1]\n if cat==1:\n if offset: filters.append(BeautyEntry.id<offset)\n if cat==2:\n _sort_dir = 'ASC'\n if offset: filters.append(BeautyEntry.id>offset)\n elif cat==3:\n _sort = 'view_count'\n _sort_dir = 'DESC'\n if offset and len((offset or '').split('_')):\n view_count, offset_id = offset.split('_')\n where = or_(\n and_(\n BeautyEntry.view_count==view_count,\n BeautyEntry.id<offset_id\n ),\n and_(\n BeautyEntry.view_count<view_count,\n )\n )\n filters.append(where)\n where = and_(*filters)\n has_more, infos = TutorialService.get_paged_tutorial_entries(\n where=where,\n _sort=_sort, _sort_dir=_sort_dir)\n offset = ''\n if infos:\n if cat!=3:\n offset = str(infos[-1][_sort])\n else:\n offset = '{}_{}'.format(infos[-1]['view_count'], infos[-1]['id'])\n for info in infos:\n info['create_time'] = get_time_str_from_dt(info['create_time'], '%-m-%-d')\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'has_more' : has_more,\n 'cat' : cat,\n 'infos' : infos,\n 'offset' : offset\n }\n return jsonify_response(result)\n\n\n\ntutorial_detail_validator = Inputs(\n {\n 'tutorial_id': IdField(msg='攻略id')\n }\n )\n@wechat_loggin_dec(required=False, validator=tutorial_detail_validator)\ndef tutorial_detail():\n ''' 美攻略 '''\n tutorial_id = request.valid_data.get('tutorial_id')\n\n tutorial = TutorialService.get_tutorial(tutorial_id)\n assert tutorial, '美攻略不存在'\n item_ids = tutorial['item_id_list']\n items = ItemService.get_items_by_ids(item_ids)\n fetch_min_period_info(items)\n fetch_hospital_refs(items, fields=['id','name'])\n TutorialService.incr_tutorial_view_count(tutorial_id)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'tutorial' : tutorial,\n 'infos' : items,\n }\n return jsonify_response(result)\n\n\n\ndaily_coupons_validator = Inputs(\n {\n 'offset' : Optional(TextField(min_length=0, max_length=1000, msg='分页参数'))\n }\n )\n@wechat_loggin_dec(required=False, validator=daily_coupons_validator)\ndef daily_coupons():\n ''' 每日优惠券 '''\n offset = request.valid_data.get('offset')\n now = dt_obj.now()\n where = and_(\n DailyCoupon.start_time<now,\n DailyCoupon.end_time>now\n )\n limit = 1000\n _sort = 'start_time'\n _sort_dir = 'DESC'\n has_more, coupons = TutorialService.get_paged_daily_coupons(\n limit=1000, where=where, offset=offset, _sort=_sort, _sort_dir=_sort_dir\n )\n from collections import defaultdict\n datas = defaultdict(list)\n\n fetch_coupon_refs(coupons)\n set_coupon_use_time(coupons)\n for coupon in coupons:\n coupon['create_time_str'] = format_dt(coupon['start_time'])\n for coupon in coupons:\n datas[coupon['create_time_str']].append(coupon)\n daily_ids = [i['id'] for i in coupons]\n daily_received_map = TutorialService.get_user_daily_by_ids(request.user_id, daily_ids)\n for i in coupons:\n i['has_received'] = bool(daily_received_map.get(i['id']))\n\n offset = ''\n if coupons:\n offset = str(coupons[-1][_sort])\n\n infos_by_day = []\n for k,v in datas.items():\n tmp = {\n 'title': k,\n 'infos': v,\n #'note': '每日10点,惊喜不断!'\n }\n if tmp['infos'][0]['title']:\n tmp['note'] = tmp['infos'][0]['title']\n else:\n tmp['note'] = ''\n infos_by_day.append(tmp)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'infos' : infos_by_day,\n 'has_more' : has_more,\n 'offset' : offset \n }\n\n return jsonify_response(result)\n\n\n\nreceive_coupon_validator = Inputs(\n {\n 'daily_id' : IdField(msg='请选择活动')\n }\n )\n@wechat_loggin_dec(required=True, validator=receive_coupon_validator)\ndef receive_coupon():\n ''' 领取每日优惠券 '''\n daily_id = request.valid_data.get('daily_id')\n\n daily = TutorialService.get_user_daily(request.user_id, daily_id)\n\n assert not daily, '您已领取过'\n\n daily_coupon = TutorialService.get_daily_coupon(daily_id)\n assert daily_coupon, '活动不存在'\n assert daily_coupon['total']>daily_coupon['sent'], '已领取完'\n\n count = TutorialService.incr_daily_coupon_received(daily_id)\n assert count, '领取完了'\n\n count = TutorialService.send_daily_coupon(request.user_id, daily_id)\n if count:\n CouponService.send_user_coupon(request.user_id, daily_coupon['coupon_id'])\n daily_coupon = TutorialService.get_daily_coupon(daily_id)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '领取成功',\n 'count' : daily_coupon['remain']\n }\n\n return jsonify_response(result)\n\n\nresend_user_coupon_validator = Inputs(\n {\n 'user_coupon_ids' : TextField(min_length=1, max_length=100, msg='逗号分隔的优惠券id字符串'),\n 'phone' : MobileField(msg='用户手机号'),\n }\n )\n@wechat_loggin_dec(required=True, validator=resend_user_coupon_validator, app=True)\ndef resend_user_coupon():\n phone = request.valid_data.get('phone')\n user_coupon_ids = request.valid_data.get('user_coupon_ids')\n user_coupon_ids = str_to_int_list(user_coupon_ids)\n user = UserService.get_user_by_phone(phone)\n assert user, '手机号对应用户不存在'\n assert user.id!=request.user_id, '不能转赠给自己'\n\n for user_coupon_id in user_coupon_ids:\n CouponService.resend_user_coupon(request.user_id, user.id, user_coupon_id)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '转赠成功'\n }\n return jsonify_response(result)\n\n\n\n\n@wechat_loggin_dec(required=False)\ndef set_open_id():\n result = {}\n response= jsonify_response(result, with_response=True)\n set_cookie(response, 'open_id', 'o56qvw-ThtwfthGGlZ-XbH-3fjRc', 86400*30)\n return response\n\n\n@wechat_loggin_dec(required=False, need_openid=True)\ndef login_link():\n print 'login'\n return send_from_directory('static', 'user/login.html')\n\n\n@wechat_loggin_dec(required=False, need_openid=True)\ndef wechat_room_link():\n print 'wechat_room_link'\n return send_from_directory('static', 'user/Activities/home.html')\n\n\n\n" }, { "alpha_fraction": 0.6344262361526489, "alphanum_fraction": 0.6754098534584045, "avg_line_length": 22.461538314819336, "blob_id": "288d09343bc569e8dbf4d36e548ee26f4537fbe0", "content_id": "2957317294d8443ee21c270f1b1ec56cdb053341", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 610, "license_type": "no_license", "max_line_length": 89, "num_lines": 26, "path": "/migrations/versions/10f3ed6c72ed_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 10f3ed6c72ed\nRevises: 75f96105f81\nCreate Date: 2015-11-27 15:05:27.624606\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '10f3ed6c72ed'\ndown_revision = '75f96105f81'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('repayment', sa.Column('data', sa.String(length=10000), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('repayment', 'data')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5885894298553467, "alphanum_fraction": 0.59796142578125, "avg_line_length": 33.45283126831055, "blob_id": "4a8ba88268034d7c7afdfd99758b2f8d6f8f3aa1", "content_id": "dc05c886ccc7b382ca67d36c1cf886c333417685", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15278, "license_type": "no_license", "max_line_length": 129, "num_lines": 424, "path": "/user/room_design.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import request\nfrom flask import redirect\nfrom flask import render_template\nfrom flask import send_from_directory\n\nfrom sqlalchemy import and_\nfrom sqlalchemy import or_\nfrom models import db\nfrom models import School\nfrom models import RoomDesignDetail\nfrom util.utils import jsonify_response\nfrom util.utils import random_str\nfrom util.utils import str_to_int_list\nfrom util.utils import comma_str_to_list\nfrom util.decorators import wechat_loggin_dec\nfrom util.validators import Optional\nfrom util.validators import Inputs\nfrom util.validators import MobileField\nfrom util.validators import TextField\nfrom util.validators import IdField\nfrom util.validators import IntChoiceField\nfrom util.sign import sign_user\nfrom util.sign import set_cookie\nfrom util.sign import del_cookie\nfrom ops.bulks import fetch_user_refs\nfrom ops.item import ItemService\nfrom ops.data import DataService\nfrom ops.user import UserService\nfrom ops.redpack import RedpackService\nfrom ops.promote import PromoteService\nfrom ops.cache import RoomDesignVoteCounter\nfrom ops.room_design import RoomDesignService\nfrom constants import ResponseCode\nfrom thirdparty.sms import send_sms\nfrom thirdparty.sms import gen_vcode\nfrom thirdparty.wechat import exchange_code_for_token\nfrom settings import MAX_TODAY_PASSWD_ATTEMPT\nfrom settings import MAX_TODAY_VCODE_ATTEMPT\nfrom settings import CONTACT\nfrom constants import VOTE_COUNT_SOURCE_MAP\n\n\n\n\ndef set_tip_msg(rank, is_myself=True):\n ''' 票数文案 '''\n rank_50 = RoomDesignVoteCounter.get_vote_by_rank(50) or 0\n vote = RoomDesignVoteCounter.get_vote_by_rank(rank) or 0\n dif = rank_50 - vote\n if not is_myself:\n dif = (RoomDesignVoteCounter.get_vote_by_rank(rank-1) - vote) if rank >1 else 0\n if rank==1:\n return '第一名'\n else:\n return '距离上一名还差{}票'.format(dif)\n if rank>50:\n if dif < 500:\n return '您只差{}票就可以获得入围大礼包了哦,加油!'.format(dif)\n if dif > 500:\n vote_firends = dif/50\n return '您距离入围大礼包只差{}-{}个好友来帮忙咯'.format(vote_firends, vote_firends*2)\n elif 21<rank<50:\n dif = RoomDesignVoteCounter.get_vote_by_rank(rank-1) - vote\n if dif < 500:\n return '距您上一名还差{}票'.format(dif)\n if dif > 500:\n vote_firends = dif/50\n return '您距离入围大礼包只差{}-{}个好友来帮忙咯'.format(vote_firends, vote_firends*2)\n else:\n dif = RoomDesignVoteCounter.get_vote_by_rank(1) - vote\n if dif < 500:\n return '您距离2000元红包只差{}票了哦'.format(dif)\n if dif > 500:\n vote_firends = dif/50\n return '您距离2000元红包只差{}-{}个好友来帮忙咯'.format(vote_firends, vote_firends*2)\n \n \n \n\nroom_detail_validator = Inputs(\n {\n 'room_id' : IdField(msg='请输入寝室id'),\n }\n)\n@wechat_loggin_dec(required=False, validator=room_detail_validator, app=True)\ndef get_room_detail():\n ''' 获取寝室详情 '''\n room_id = request.valid_data.get('room_id')\n user = RedpackService.get_qruser_by_openid(request.open_id)\n has_followed= bool(user and user.nickname)\n privileges = None\n if request.user_id:\n privileges = RoomDesignService.get_user_vote_privilede(request.user_id)\n\n room = RoomDesignService.get_room_dict_by_id(room_id)\n assert room, '寝室不存在'\n is_myself = room['user_id'] == request.user_id\n vote_count = RoomDesignVoteCounter.incr(room['id'], 0)\n rank = RoomDesignVoteCounter.rank(room['id'])\n pre_diff = RoomDesignVoteCounter.get_vote_by_rank(rank-1)-vote_count if rank>1 else 0\n room['rank']= rank\n where = RoomDesignDetail.user_id==request.user_id\n has_attend = bool(RoomDesignService.get_room(where))\n\n note = set_tip_msg(rank, is_myself) if rank else ''\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '寝室详情',\n 'has_followed': has_followed,\n 'room' : room,\n 'note' : note,\n 'vote_count': vote_count,\n 'privileges': privileges,\n 'pre_diff' : pre_diff,\n 'has_attend': has_attend,\n 'is_myself' : is_myself\n }\n return jsonify_response(result)\n\n\napply_room_validator = Inputs(\n {\n 'school_id' : IdField(msg='请选择学校'),\n 'phone' : MobileField(min_length=1, max_length=100, msg='请输入手机号'),\n 'room_name' : TextField(min_length=1, max_length=100, msg='请给您的寝室取一个独一无二的名字'),\n 'applyer_name' : TextField(min_length=1, max_length=100, msg='请输入参赛者的名字'),\n 'addr' : TextField(min_length=1, max_length=100, msg='请输入地址'),\n }\n)\n@wechat_loggin_dec(required=True, validator=apply_room_validator, app=True)\ndef apply_room():\n phone = request.valid_data.get('phone')\n school_id = request.valid_data.get('school_id')\n room_name = request.valid_data.get('room_name')\n applyer_name= request.valid_data.get('applyer_name')\n addr = request.valid_data.get('addr')\n\n apply_no = RoomDesignVoteCounter.incr_apply_no()\n pics = None\n\n where = RoomDesignDetail.user_id==request.user_id\n my_room = RoomDesignService.get_room(where)\n \n has_attend = bool(my_room)\n assert not has_attend, '您已参与过了'\n\n room_id = RoomDesignService.create_room(request.user_id, room_name, applyer_name, apply_no, phone, addr, school_id, pics)\n RoomDesignVoteCounter.add_score(0)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'room_id' : room_id\n }\n return jsonify_response(result)\n\n\nroom_list_validator = Inputs(\n {\n 'school_id': Optional(IdField(msg='请选择学校')),\n 'cat' : IntChoiceField(choices=[1,2], msg='列表类型'), #1最新参与 2全部排名\n 'offset' : Optional(TextField(min_length=0, max_length=100, msg='请输入分页参数')),\n })\n@wechat_loggin_dec(required=False, validator=room_list_validator, app=True)\ndef room_list():\n cat = request.valid_data.get('cat')\n offset = request.valid_data.get('offset')\n school_id = request.valid_data.get('school_id')\n\n where = None\n filters = []\n if cat==1:\n _sort = 'id'\n if offset: filters.append(RoomDesignDetail.id<offset)\n elif cat==2:\n _sort = 'vote_count'\n if offset and len((offset or '').split('_'))==2: #挺烦的分页\n vote_count, offset_id = (offset or '').split('_')\n query = or_(\n and_(\n RoomDesignDetail.vote_count==vote_count,\n RoomDesignDetail.id<offset_id\n ),\n and_(\n RoomDesignDetail.vote_count<vote_count\n )\n )\n filters.append(query)\n if school_id:\n filters.append(RoomDesignDetail.school_id==school_id)\n filters.append(RoomDesignDetail.pics_count>0)\n if filters: where = and_(*filters)\n has_more, rooms = RoomDesignService.get_paged_rooms(where=where, _sort=_sort) \n for room in rooms:\n room['rank'] = RoomDesignVoteCounter.rank(room['id'])\n rank = room['rank']\n vote_count = RoomDesignVoteCounter.incr(room['id'], 0)\n pre_diff = RoomDesignVoteCounter.get_vote_by_rank(rank-1)-vote_count if rank>1 else 0\n room['note'] = set_tip_msg(rank, is_myself=False)\n room['pre_diff']= pre_diff\n\n offset = ''\n if rooms:\n if cat==1:\n offset = str(rooms[-1]['id'])\n else:\n offset = '{}_{}'.format(str(rooms[-1]['vote_count']), str(rooms[-1]['id']))\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'infos' : rooms,\n 'has_more' : has_more,\n 'offset' : offset\n }\n return jsonify_response(result)\n\n\nadd_room_pics_validators = Inputs(\n {\n 'room_id' : IdField(msg='请选择寝室'),\n 'pics' : TextField(min_length=1, max_length=100, msg='逗号分隔的图片链接字符串'), \n })\n@wechat_loggin_dec(required=True, validator=add_room_pics_validators)\ndef add_room_pics():\n ''' 添加寝室图片 '''\n pics = request.valid_data.get('pics')\n room_id = request.valid_data.get('room_id')\n where = RoomDesignDetail.id==room_id\n\n room = RoomDesignService.get_room_dict_by_id(room_id)\n assert room, '寝室不存在'\n\n pic_list = comma_str_to_list(pics)\n assert len(pic_list), '请上传图片'\n #assert len(pic_list)==4, '必须上传4张图'\n pics_count=len(filter(bool, pic_list))\n count = RoomDesignService.update_room(where, pics=pics, pics_count=pics_count)\n\n RoomDesignService.set_school_pics_count(room['school_id'])\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '添加成功'\n }\n return jsonify_response(result)\n\n\n\nschool_rooms_validator = Inputs(\n {\n 'offset' : Optional(TextField(min_length=0, max_length=100, msg='分页参数')), \n })\n@wechat_loggin_dec(required=False, validator=school_rooms_validator, app=True)\ndef school_rooms():\n ''' 学校风采 '''\n offset = request.valid_data.get('offset')\n where = and_(\n School.city_name=='上海',\n School.pics_count>0\n )\n\n limit = 100\n fields = ['id', 'name']\n has_more, schools = DataService.get_paged_schools(where=where, fields=fields, limit=limit)\n\n for i in schools:\n i['count'] = RoomDesignService.count_school_pics(i['id'])\n offset = ''\n if schools:\n offset = str(schools[-1]['id'])\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'infos' : schools,\n 'has_more' : has_more,\n 'offset' : offset\n }\n return jsonify_response(result)\n\n\nvote_room_validator = Inputs(\n {\n 'room_id' : IdField(msg='请选择寝室'),\n 'source' : IntChoiceField(choices=[1,2,3], msg='投票类型'), #1申请额度通过 2成功完成一单 3普通投票\n })\n@wechat_loggin_dec(required=True, validator=vote_room_validator, app=True)\ndef vote_room():\n ''' 投票 '''\n room_id = request.valid_data.get('room_id')\n source = request.valid_data.get('source')\n\n privileges = RoomDesignService.get_user_vote_privilede(request.user_id)\n\n privilege_map = {i['id']:i['status'] for i in privileges}\n\n assert privilege_map[source]!=1, '您已投过了'\n assert privilege_map[source]!=-1, '您没有投票机会,快去申请额度或下单吧'\n\n current_score = RoomDesignVoteCounter.incr(room_id, 0)\n count = 1\n if source!=3:\n count = RoomDesignService.update_vote_privilege_status(request.user_id, source)\n vote_count = VOTE_COUNT_SOURCE_MAP[source]\n\n if count:\n RoomDesignService.incr_room_vote(room_id, vote_count)\n RoomDesignVoteCounter.incr(room_id, vote_count)\n RoomDesignService.add_vote_log(room_id, request.user_id, source)\n\n if not RoomDesignVoteCounter.exists_score(current_score):\n if current_score>0: RoomDesignVoteCounter.remove_score(current_score)\n current_score = RoomDesignVoteCounter.incr(room_id, 0)\n RoomDesignVoteCounter.add_score(current_score)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '投票成功'\n }\n return jsonify_response(result)\n\n\n@wechat_loggin_dec(required=False)\ndef room_index():\n ''' 活动首页 '''\n limit = 2\n first = []\n second = []\n third = []\n\n user = RedpackService.get_qruser_by_openid(request.open_id)\n has_followed= bool(user and user.nickname)\n\n where = RoomDesignDetail.user_id==request.user_id\n my_room = RoomDesignService.get_room(where)\n \n has_attend = bool(my_room)\n if my_room: my_room = my_room.as_dict()\n _sort = 'id'\n where = RoomDesignDetail.pics_count>0\n has_more, first = RoomDesignService.get_paged_rooms(_sort=_sort, limit=limit, where=where)\n _sort = 'vote_count'\n has_more, second = RoomDesignService.get_paged_rooms(_sort=_sort, limit=limit, where=where) \n\n where = and_(\n School.city_name=='上海',\n School.pics_count>0\n )\n _, schools = DataService.get_paged_schools(where=where, limit=4, fields=['id', 'name'])\n for i in schools:\n i['count'] = RoomDesignService.count_school_pics(i['id'])\n\n for room in first+second:\n room['rank'] = RoomDesignVoteCounter.rank(room['id'])\n rank = room['rank']\n vote_count = RoomDesignVoteCounter.incr(room['id'], 0)\n pre_diff = RoomDesignVoteCounter.get_vote_by_rank(rank-1)-vote_count if rank>1 else 0\n room['note'] = set_tip_msg(rank, is_myself=False)\n room['pre_diff']= pre_diff\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'has_followed': has_followed,\n 'my_room' : my_room,\n 'first' : first,\n 'second' : second,\n 'third' : schools,\n 'has_attend': has_attend\n }\n return jsonify_response(result)\n\n\n\nroom_search_validator = Inputs(\n {\n 'keyword' : TextField(min_length=1, max_length=100, msg='请输入关键字'),\n })\n@wechat_loggin_dec(required=False, validator=room_search_validator, app=True)\ndef room_search():\n ''' 搜索寝室 '''\n keyword = request.valid_data.get('keyword')\n\n where = or_(\n RoomDesignDetail.phone==keyword,\n RoomDesignDetail.room_name==keyword,\n RoomDesignDetail.apply_no==keyword\n )\n room = RoomDesignService.get_room(where)\n room_id = None\n if room: room_id = room.id\n room_exist = bool(room)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'room_exist': room_exist,\n 'room_id' : room_id\n }\n return jsonify_response(result)\n\n\n@wechat_loggin_dec(required=True)\ndef get_vote_priviledges():\n ''' 用户投票机会详情 '''\n user = RedpackService.get_qruser_by_openid(request.open_id)\n has_followed= bool(user and user.nickname)\n\n privileges = RoomDesignService.get_user_vote_privilede(request.user_id)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'has_followed': has_followed,\n 'privileges': privileges,\n }\n return jsonify_response(result)\n\n\n\n\n\n@wechat_loggin_dec(required=False, need_openid=True)\ndef room_about():\n print 'room_about'\n return send_from_directory('static', 'user/Activities/about.html')\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.626427412033081, "alphanum_fraction": 0.6786296963691711, "avg_line_length": 22.576923370361328, "blob_id": "96c04c8f4d38a1c75e7f1408f10486e7e2bb5196", "content_id": "c6dc32742ceb23b920ec52709aa77819274b3007", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 613, "license_type": "no_license", "max_line_length": 87, "num_lines": 26, "path": "/migrations/versions/4f4ce8bff86a_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4f4ce8bff86a\nRevises: 11da3b568bd2\nCreate Date: 2016-01-09 17:18:22.196431\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4f4ce8bff86a'\ndown_revision = '11da3b568bd2'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('beauty_entry', sa.Column('view_count', sa.Integer(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('beauty_entry', 'view_count')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5833730697631836, "alphanum_fraction": 0.5844857692718506, "avg_line_length": 30.752525329589844, "blob_id": "333254ba0d18f934b2d0d29df12a3c2a7b5fafd2", "content_id": "b9597e6babc7d1dbb2b5c66036aae8544c6c1dd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6429, "license_type": "no_license", "max_line_length": 136, "num_lines": 198, "path": "/ops/trial.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import and_\nfrom sqlalchemy import func\nfrom util.sqlerr import SQL_DUPLICATE\nfrom util.sqlerr import SQL_DUPLICATE_PHONE\nfrom util.utils import dt_obj\n\nfrom models import db\nfrom models import Trial\nfrom models import TrialApply\nfrom models import TrialComment\nfrom ops.utils import get_items\nfrom ops.utils import get_page\nfrom ops.utils import count_items\nfrom thirdparty.wechat import wechat\nfrom thirdparty.wechat import create_qrcode\nfrom thirdparty.qn import upload_img\nfrom settings import celery\n\n\n\n\nclass TrialService(object):\n\n @staticmethod\n def get_trial(trial_id):\n ''' '''\n trial = Trial.query.filter(Trial.id==trial_id).first()\n if trial: return trial.as_dict()\n\n @staticmethod\n def get_user_apply(user_id, trial_id):\n ''' '''\n query = and_(\n TrialApply.user_id==user_id,\n TrialApply.trial_id==trial_id)\n apply = TrialApply.query.filter(query).first()\n if apply: return apply.as_dict()\n\n @staticmethod\n def get_trial_applies_by_user_ids(trial_id, user_ids):\n ''' 使用申请 '''\n query = and_(\n TrialApply.trial_id==trial_id,\n TrialApply.user_id.in_(user_ids)\n )\n applies = TrialApply.query.filter(query).all()\n return [ i.as_dict() for i in applies]\n\n @staticmethod\n def create_trial(title, image, cat, total, start_time, end_time, rules, process, coupon_id=None):\n ''' 创建试用 '''\n trial = Trial(\n process=process,\n title=title, image=image, cat=cat, total=total, start_time=start_time, end_time=end_time, rules=rules, coupon_id=coupon_id\n )\n db.session.add(trial)\n db.session.commit()\n return trial.id\n\n @staticmethod\n def update_trial(item_id, **kw):\n ''' 更新试用 '''\n count = Trial.query.filter(Trial.id==item_id).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def add_apply(user_id, name, phone, school, trial_id, content, sex, addr):\n try:\n trial = Trial.query.filter(Trial.id==trial_id).first()\n assert trial, '试用商品不存在'\n cat = trial.cat\n apply = TrialApply(\n addr=addr, cat=cat, user_id=user_id, name=name, phone=phone, school=school, trial_id=trial_id, content=content, sex=sex)\n db.session.add(apply)\n db.session.commit()\n return apply.id\n except Exception as e:\n import traceback\n traceback.print_exc()\n db.session.rollback()\n if SQL_DUPLICATE.search(str(e)):\n assert 0, '您已提交过申请'\n assert 0, '申请失败'\n\n @staticmethod\n def comment(trial_id, user_id, content, photos):\n ''' 试用体会 '''\n comment = TrialComment(trial_id=trial_id, user_id=user_id, content=content, photos=photos)\n db.session.add(comment)\n db.session.commit()\n return comment.id\n\n @staticmethod\n def get_paged_trial_comments(**kw):\n return get_page(TrialComment, {}, **kw)\n\n @staticmethod\n def get_paged_trials(**kw):\n ''' 试用列表 '''\n return get_page(Trial, {}, **kw)\n\n @staticmethod\n def count_trial(where=None):\n ''' '''\n return count_items(Trial, where)\n\n @staticmethod\n def count_apply(where=None):\n return count_items(TrialApply, where)\n\n @staticmethod\n def incr_trial_apply_count(trial_id):\n ''' 试用人气加1 '''\n count = Trial.query.filter(Trial.id==trial_id).update({'apply_count':Trial.apply_count+1})\n db.session.commit()\n return count\n\n @staticmethod\n def get_paged_apply_user_list(**kw):\n ''' 申请用户列表 '''\n return get_page(TrialApply, {}, **kw)\n\n @staticmethod\n def update_apply_status(where, to_status):\n ''' 申请状态 '''\n count = TrialApply.query.filter(where).update({'status':to_status})\n db.session.commit()\n return count\n\n @staticmethod\n def update_apply(where, **kw):\n ''' '''\n count = TrialApply.query.filter(where).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def incr_trial_sent_count(trial_id):\n ''' 试用发放数加1 '''\n count = Trial.query.filter(Trial.id==trial_id).update({'sent':Trial.sent+1})\n db.session.commit()\n return count\n\n @staticmethod\n def get_apply(apply_id):\n apply = TrialApply.query.filter(TrialApply.id==apply_id).first()\n if apply: return apply.as_dict()\n\n @staticmethod\n def get_trial_apply(user_id, trial_id):\n query = and_(\n TrialApply.user_id==user_id,\n TrialApply.trial_id==trial_id\n )\n apply = TrialApply.query.filter(query).first()\n if apply: return apply.as_dict()\n\n @staticmethod\n def get_trial_comment(trial_id, user_id):\n ''' '''\n query = and_(\n TrialComment.trial_id==trial_id,\n TrialComment.user_id==user_id\n )\n return TrialComment.query.filter(query).first()\n\n @staticmethod\n def count_user_apply(user_ids, status=None):\n query = and_()\n query.append(TrialApply.user_id.in_(user_ids))\n if status:\n query.append(TrialApply.status==status)\n rows = db.session.query(TrialApply.user_id, func.count(TrialApply.id)).filter(query).group_by(TrialApply.user_id).all()\n print rows\n return dict(rows)\n\n @staticmethod\n def check_exist_order(sort_order):\n query = and_(\n Trial.sort_order==sort_order,\n Trial.end_time>=dt_obj.now()\n )\n return db.session.query(Trial).filter(query).first()\n\n @staticmethod\n def get_latest_apply(user_id):\n ''' 获取用户最近一次申请 '''\n apply = TrialApply.query.filter(TrialApply.user_id==user_id).order_by(TrialApply.id.desc()).first()\n if apply: return apply.as_dict()\n\n @staticmethod\n def get_trial_apply_by_user_ids(user_ids):\n ''' '''\n applys = TrialApply.query.filter(TrialApply.user_id.in_(user_ids)).all()\n return [i.as_dict() for i in applys]\n\n\n\n\n" }, { "alpha_fraction": 0.6364477276802063, "alphanum_fraction": 0.6771507859230042, "avg_line_length": 29.885713577270508, "blob_id": "6977d5eacab22d052295ec7dc67de2aeda82ad44", "content_id": "f0ed94a1c5049f34f080d1bf2c3db749d30dbf58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1081, "license_type": "no_license", "max_line_length": 113, "num_lines": 35, "path": "/migrations/versions/c2bb73ecf64_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: c2bb73ecf64\nRevises: 3c990682c3f0\nCreate Date: 2016-01-04 19:47:55.690716\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'c2bb73ecf64'\ndown_revision = '3c990682c3f0'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('alipay_order_user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('order_no', sa.String(length=100), nullable=True),\n sa.Column('buyer_email', sa.String(length=100), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('order_no')\n )\n op.create_index(op.f('ix_alipay_order_user_buyer_email'), 'alipay_order_user', ['buyer_email'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_alipay_order_user_buyer_email'), table_name='alipay_order_user')\n op.drop_table('alipay_order_user')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6338461637496948, "alphanum_fraction": 0.692307710647583, "avg_line_length": 24, "blob_id": "47028423f142fa8b16167e57cf1fd96a091791ad", "content_id": "c56d31034dfb7e01fc7239af8a048eab573c0796", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 650, "license_type": "no_license", "max_line_length": 88, "num_lines": 26, "path": "/migrations/versions/53a9d06e37ce_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 53a9d06e37ce\nRevises: 2eed88b994ed\nCreate Date: 2015-10-31 14:18:39.072291\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '53a9d06e37ce'\ndown_revision = '2eed88b994ed'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('item', 'item_no')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('item', sa.Column('item_no', mysql.VARCHAR(length=50), nullable=True))\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6237457990646362, "alphanum_fraction": 0.670568585395813, "avg_line_length": 22, "blob_id": "bf613c6e36ee39866a6028037d7341f16eff6513", "content_id": "27ec9124b342068eda18749da85e3fe2184ddbc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 598, "license_type": "no_license", "max_line_length": 79, "num_lines": 26, "path": "/migrations/versions/38dd6746c99b_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 38dd6746c99b\nRevises: 42d4367e28b2\nCreate Date: 2015-12-10 17:50:20.145840\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '38dd6746c99b'\ndown_revision = '42d4367e28b2'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('coupon', sa.Column('is_trial', sa.Boolean(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('coupon', 'is_trial')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5581498742103577, "alphanum_fraction": 0.5656623840332031, "avg_line_length": 33.847877502441406, "blob_id": "41b89da1658b5c91e34f38b6aff7bf6654a74713", "content_id": "7e5bf6afbfddcb53da2e90951aedca9fc0ec1171", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103926, "license_type": "no_license", "max_line_length": 143, "num_lines": 2899, "path": "/admin/views.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport json\nimport time\nimport math\nimport pickle\nfrom base64 import b64decode\nfrom sqlalchemy import and_\nfrom sqlalchemy import or_\nfrom sqlalchemy import not_\nfrom flask import request\nfrom flask import redirect\nfrom flask import Blueprint\nfrom flask import render_template\nfrom flask import make_response\n\nfrom util.utils import date_to_datetime\nfrom util.utils import jsonify_response\nfrom util.utils import template_response\nfrom util.utils import prefix_img_domain\nfrom util.utils import abbreviated_pages\nfrom util.utils import get_due_time\nfrom util.utils import format_price\nfrom util.utils import get_current_period\nfrom util.utils import cacl_punish_fee\nfrom util.utils import gen_item_no\nfrom util.utils import trans_list\nfrom util.utils import dt_obj\nfrom util.sign import gen_token\nfrom util.sign import del_cookie\nfrom util.sign import set_cookie\nfrom util.sign import get_cookie\nfrom util.decorators import admin_json_dec\nfrom util.validators import Optional\nfrom util.validators import Inputs\nfrom util.validators import MobileField\nfrom util.validators import TextField\nfrom util.validators import IdField\nfrom util.validators import IntChoiceField\nfrom util.validators import FloatField\nfrom util.validators import IntChoicesField\nfrom util.validators import BoolChoiceField\nfrom util.validators import BoolIntChoiceField\nfrom util.validators import IntField\nfrom util.validators import REGField\nfrom util.utils import set_coupon_use_time\n\nfrom models import db\nfrom models import Activity\nfrom models import AdminUser\nfrom models import Item\nfrom models import School\nfrom models import Order\nfrom models import User\nfrom models import Promoter\nfrom models import Trial\nfrom models import DailyUser\nfrom models import ItemSubCat\nfrom models import ActivityItem\nfrom models import CreditApply\nfrom models import RecommendItem\nfrom models import Hospital\nfrom models import RecommendSubcat\nfrom models import PeriodPayLog\nfrom models import TrialApply\nfrom models import RecommendHospital\nfrom models import RedpackUserQuestion\nfrom models import RoomDesignDetail\nfrom ops.cache import ChsiCache\nfrom ops.cache import AdminInvalidUserPasswdCache\nfrom ops.cache import RoomDesignVoteCounter\nfrom ops.common import pay_success_action\nfrom ops.beauty_tutorial import TutorialService\nfrom ops.admin import AdminService\nfrom ops.redpack import RedpackService\nfrom ops.data import DataService\nfrom ops.promote import PromoteService\nfrom ops.item import ItemService\nfrom ops.user import UserService\nfrom ops.comment import CommentService\nfrom ops.credit import CreditService\nfrom ops.trial import TrialService\nfrom ops.coupon import CouponService\nfrom ops.bulks import fetch_item_cat_refs\nfrom ops.bulks import fetch_user_refs\nfrom ops.bulks import fetch_item_refs\nfrom ops.bulks import fetch_order_refs\nfrom ops.bulks import fetch_item_subcat_refs\nfrom ops.bulks import fetch_hospital_refs\nfrom ops.bulks import fetch_question_refs\nfrom ops.bulks import fetch_qrcodeuser_refs\nfrom ops.bulks import fetch_wechatinfo_refs\nfrom ops.bulks import fetch_servicecode_refrence\nfrom ops.bulks import fetch_coupon_refs\nfrom ops.bulks import fetch_school_refs\nfrom ops.order import OrderService\nfrom ops.order import set_order_status\nfrom ops.hospital import HospitalService\nfrom ops.activity import ActivityService\nfrom ops.room_design import RoomDesignService\nfrom thirdparty.qn import gen_qn_token\nfrom thirdparty.qn import upload_img\nfrom thirdparty.sms import send_sms_apply_success\nfrom thirdparty.sms import send_sms_apply_reject\nfrom thirdparty.sms import send_sms_refund\nfrom thirdparty.chsi import login_xuexin\nfrom thirdparty.chsi import refresh_chsi_captcha\nfrom thirdparty.chsi import get_chsi_info\nfrom thirdparty.wx_pay import refund_order\nfrom thirdparty.wx_pay import refund_repayment\nfrom thirdparty.wx_app_pay import refund_order as refund_app_order\nfrom thirdparty.wx_app_pay import refund_repayment as wxapp_refund_repayment\nfrom constants import ResponseCode\nfrom constants import APPLY_STATUS\nfrom constants import ORDER_ADMIN_STATUS\nfrom constants import ORDER_STATUS\nfrom constants import ORDER_STATUS_LABEL\nfrom constants import ADMIN_ORDER_STATUS_CHOICES\nfrom constants import ORDER_ADMIN_STATUS_MAP\nfrom constants import CREDIT_STATUS\nfrom constants import PAY_METHOD\nfrom constants import BODY_LABEL\n\ndef index():\n ''' http://flask.pocoo.org/docs/0.10/blueprints/#templates '''\n return render_template('admin/index.html')\n\n\nlogin_validator = Inputs(\n {\n 'name' : TextField(min_length=1, max_length=100, msg='用户名'),\n 'passwd' : TextField(min_length=1, max_length=100, msg='密码')\n }\n )\n@admin_json_dec(required=False, validator=login_validator)\ndef login():\n name = request.valid_data.get('name')\n passwd = request.valid_data.get('passwd')\n admin = AdminService.get_admin(name)\n count = AdminInvalidUserPasswdCache.incr(name)\n assert count<10, '今日密码错误次数超限'\n if admin and admin.passwd==passwd:\n response = jsonify_response({'code':ResponseCode.SUCCESS}, with_response=True)\n token = gen_token(name)\n set_cookie(response, 'name', name, 86400*30)\n set_cookie(response, 'token', token, 86400*30)\n set_cookie(response, 'cat', str(admin.cat or 0), 86400*30)\n AdminInvalidUserPasswdCache.incr(name, -1)\n return response\n assert 0, '用户名或密码错误'\n\n\n@admin_json_dec()\ndef logout():\n response = make_response(redirect('/admin'))\n del_cookie(response, 'name')\n del_cookie(response, 'token')\n return response\n\n\n@admin_json_dec(roles=[0, 1, 2, 5])\ndef get_city_list(required=True, validator=None):\n has_more, cities = DataService.get_paged_cities()\n\n result = {\n 'infos':cities\n }\n return jsonify_response(result)\n\n\nnew_city_validator = Inputs(\n {\n 'name' : TextField(min_length=1, max_length=100, msg='城市名'),\n }\n )\n@admin_json_dec(required=True, validator=new_city_validator)\ndef new_city():\n name = request.valid_data.get('name')\n print name\n city_id = DataService.create_city(name)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : ''}\n return jsonify_response(result)\n\n\nitem_list_validator = Inputs(\n {\n 'keyword' : Optional(TextField(min_length=1, max_length=100, msg='搜索关键字')),\n 'sub_cat_id' : Optional(IdField(msg='子分类id')),\n 'sub_cat_id' : Optional(IdField(msg='子分类id')),\n 'hospital_id' : Optional(IdField(msg='医院id')),\n 'activity_id' : Optional(IdField(msg='活动id')),\n 'page' : Optional(IdField(msg='页数')),\n 'is_recommend' : Optional(IntChoiceField(choices=[0,1], msg='是否推荐'))\n }\n )\n@admin_json_dec(required=True, validator=item_list_validator)\ndef get_item_list():\n\n keyword = request.valid_data.get('keyword')\n sub_cat_id = request.valid_data.get('sub_cat_id')\n activity_id = request.valid_data.get('activity_id')\n hospital_id = request.valid_data.get('hospital_id')\n page = request.valid_data.get('page') or 1\n is_recommend = request.valid_data.get('is_recommend') or None\n\n limit = 10\n start = (page-1)*limit\n filters = []\n order_by = None\n join = None\n if keyword: filters.append(Item.title.like('%{}%'.format(keyword)))\n if sub_cat_id:\n query = or_(\n Item.sub_cat_ids==sub_cat_id,\n Item.sub_cat_ids.like('%,{}'.format(sub_cat_id)),\n Item.sub_cat_ids.like('%,{},%'.format(sub_cat_id)),\n Item.sub_cat_ids.like('{},%'.format(sub_cat_id))\n )\n filters.append(query)\n if hospital_id: filters.append(Item.hospital_id==hospital_id)\n if activity_id:\n subquery = db.session.query(ActivityItem.item_id).filter(ActivityItem.activity_id==activity_id).subquery()\n filters.append(Item.id.in_(subquery))\n if is_recommend:\n subquery = db.session.query(RecommendItem.item_id).subquery()\n filters.append(Item.id.in_(subquery))\n order_by = RecommendItem.sort_order.asc()\n join = RecommendItem\n where = None\n if filters:\n where = and_(*filters)\n if request.admin.city_id:\n city_item_suq = db.session.query(Hospital.id).filter(Hospital.city_id==request.admin.city_id).subquery()\n item_query = Item.hospital_id.in_(city_item_suq)\n if where is not None:\n where = and_(\n where,\n item_query\n )\n else:\n where = item_query\n total = ItemService.count_items(where)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n has_more, item_list = ItemService.get_the_paged_items(\n limit=limit, start=start, where=where, join=join, order_by=order_by)\n\n item_ids = [ i['id'] for i in item_list]\n exists_recommend = ItemService.exists_recommend_item_ids(item_ids)\n for i in item_list:\n i['is_recommend'] = i['id'] in exists_recommend\n i['sort_order'] = exists_recommend.get(i['id'], 0)\n\n fetch_item_subcat_refs(item_list)\n sub_cats = [i['sub_cat'] for i in item_list]\n print sub_cats\n fetch_item_cat_refs(sub_cats)\n result = {\n 'infos' : item_list,\n 'page_info' : page_info\n }\n\n return jsonify_response(result)\n\n\nitem_edit_validator = Inputs(\n {\n 'title' : TextField(min_length=1, max_length=100, msg='商品名'),\n 'item_no' : Optional(TextField(min_length=0, max_length=100, msg='项目编号')),\n 'image' : TextField(min_length=1, max_length=1000, msg='商品小图'),\n 'photos' : Optional(TextField(min_length=0, max_length=1000, msg='图片列表')),\n 'surgery_desc' : TextField(min_length=1, max_length=1000000, msg='项目介绍'),\n 'doctor_desc' : TextField(min_length=1, max_length=1000000, msg='医生介绍'),\n 'note' : TextField(min_length=1, max_length=10000, msg='特别提醒'),\n 'use_time' : TextField(min_length=1, max_length=10000, msg='使用时间'),\n 'has_fee' : BoolIntChoiceField(msg='是否免息'),\n 'direct_buy' : BoolIntChoiceField(msg='是否直购'),\n #'sub_cat_id' : Optional(IdField(msg='子分类id')),\n 'sub_cat_ids' : TextField(min_length=1, max_length=1000, msg='请选择分类'),\n 'hospital_id' : IdField(msg='医院id'),\n 'price' : FloatField(msg='价格'),\n 'orig_price' : FloatField(msg='原价'),\n 'support_choice_list': IntChoicesField(choices=[1,2,3,4,5,6], msg='支持哪些分期选项'),\n }\n )\n@admin_json_dec(required=True, validator=item_edit_validator, roles=[0,1,5])\ndef item_edit(item_id=None):\n title = request.valid_data.get('title')\n sub_cat_id = request.valid_data.get('sub_cat_id')\n sub_cat_ids = request.valid_data.get('sub_cat_ids')\n price = request.valid_data.get('price')\n orig_price = request.valid_data.get('orig_price')\n hospital_id = request.valid_data.get('hospital_id')\n item_no = request.valid_data.get('item_no')\n image = request.valid_data.get('image')\n has_fee = request.valid_data.get('has_fee')\n direct_buy = request.valid_data.get('direct_buy')\n photos = request.valid_data.get('photos') or ''\n doctor_desc = request.valid_data.get('doctor_desc')\n use_time = request.valid_data.get('use_time')\n note = request.valid_data.get('note')\n surgery_desc = request.valid_data.get('surgery_desc')\n support_choice_list = request.valid_data.get('support_choice_list')\n support_choices = ','.join(map(str, support_choice_list))\n sub_cat_id = 1\n if item_id:\n assert item_no, '请输入商品编号'\n ItemService.update_item(item_id,\n title=title,\n sub_cat_id=sub_cat_id,\n sub_cat_ids=sub_cat_ids,\n price=price,\n orig_price=orig_price,\n support_choices=support_choices,\n hospital_id=hospital_id, item_no=item_no,\n photos=photos, surgery_desc=surgery_desc,\n doctor_desc=doctor_desc, image=image, direct_buy=direct_buy,\n has_fee=has_fee, use_time=use_time, note=note\n )\n else:\n item_no = item_no or gen_item_no()\n item_id = ItemService.create_item(\n title, hospital_id, sub_cat_id, sub_cat_ids, price, orig_price, item_no, support_choices, photos, surgery_desc, doctor_desc, image,\n has_fee, direct_buy, use_time, note)\n\n\n return jsonify_response({'item_id': item_id})\n\n\nedit_itemcat_validator = Inputs(\n {\n 'name' : TextField(min_length=1, max_length=100, msg='分类名'),\n }\n )\n@admin_json_dec(required=True, validator=edit_itemcat_validator)\ndef edit_itemcat(cat_id=None):\n name = request.valid_data.get('name')\n if cat_id:\n msg = '修改成功'\n ItemService.update_cat(cat_id, **request.valid_data)\n else:\n msg = '添加成功'\n ItemService.create_cat(name)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : msg\n }\n return jsonify_response(result)\n\n\nedit_itemsubcat_validator = Inputs(\n {\n 'name' : TextField(min_length=1, max_length=100, msg='小类名'),\n 'icon' : TextField(min_length=1, max_length=1000, msg='图标'),\n 'cat_ids' : TextField(min_length=1, max_length=1000, msg='逗号分隔的父分类id'),\n 'cat_id' : Optional(IdField(msg='分类id')),\n }\n )\n@admin_json_dec(required=True, validator=edit_itemsubcat_validator)\ndef edit_itemsubcat(sub_cat_id=None):\n ''' 编辑/添加小类 '''\n name = request.valid_data.get('name')\n icon = request.valid_data.get('icon')\n cat_id = request.valid_data.get('cat_id')\n cat_ids = request.valid_data.get('cat_ids')\n if sub_cat_id:\n msg = '修改成功'\n ItemService.update_subcat(sub_cat_id=sub_cat_id, **request.valid_data)\n else:\n msg = '添加成功'\n desc = ''\n ItemService.create_sub_cat(cat_id, name, icon, desc, cat_ids)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : msg\n }\n return jsonify_response(result)\n\n\nnew_period_pay_choice_validator = Inputs(\n {\n 'period_count' : IntField(msg='分期数'),\n 'period_fee' : FloatField(msg='分期费率')\n }\n )\n@admin_json_dec(validator=new_period_pay_choice_validator)\ndef new_period_pay_choice():\n period_count = request.valid_data.get('period_count')\n period_fee = request.valid_data.get('period_fee')\n\n pay_id = CreditService.create_period_choice(period_count=period_count, period_fee=period_fee)\n result = {\n 'pay_id': pay_id\n }\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True)\ndef get_item():\n item_id = request.args.get('item_id')\n item = ItemService.get_item_dict_by_id(item_id)\n\n item['has_fee'] = 1 if item['has_fee'] else 0\n item['direct_buy'] = 1 if item['direct_buy'] else 0\n result = {\n 'data' : item\n }\n response = jsonify_response(result)\n return response\n\n\n@admin_json_dec(required=True)\ndef get_cat():\n item_id = request.args.get('cat_id')\n item = ItemService.get_cat_dict_by_id(item_id)\n\n result = {\n 'data' : item\n }\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True)\ndef get_subcat():\n item_id = request.args.get('sub_cat_id')\n item = ItemService.get_subcat_dict_by_id(item_id)\n\n result = {\n 'data' : item\n }\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True)\ndef get_school_list():\n\n limit = 100\n page = int(request.args.get('page', 1))\n city_name = request.args.get('city_name')\n start = (page-1)*limit\n where = None\n if city_name: where = School.city_name==city_name\n total = DataService.count_schools(where=where)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n has_more, item_list = DataService.get_paged_schools(limit=limit, start=start, where=where, _sort='city_name')\n\n result = {\n 'infos' : item_list,\n 'page_info' : page_info,\n 'total' : total,\n }\n\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True)\ndef get_cat_list():\n has_more, cat_list = ItemService.get_paged_cats(limit=1000, _sort='sort_order', _sort_dir='ASC')\n result = {\n 'infos' : cat_list,\n 'page_info' : None,\n }\n\n return jsonify_response(result)\n\n\ncat_list_validator = Inputs(\n {\n 'cat_id' : Optional(IdField(msg='分类id')),\n 'is_recommend' : Optional(IntChoiceField(choices=[0,1], msg='是否推荐'))\n }\n )\n@admin_json_dec(required=True, validator=cat_list_validator)\ndef get_subcat_list():\n cat_id = request.valid_data.get('cat_id')\n is_recommend = request.valid_data.get('is_recommend')\n\n filters = []\n order_by = None\n join = None\n where = None\n if is_recommend:\n subquery = db.session.query(RecommendSubcat.sub_cat_id).subquery()\n filters.append(ItemSubCat.id.in_(subquery))\n order_by = RecommendSubcat.sort_order.asc()\n join = RecommendSubcat\n if cat_id:\n or_query= or_(\n ItemSubCat.cat_ids==cat_id,\n ItemSubCat.cat_ids.like('%,{}'.format(cat_id)),\n ItemSubCat.cat_ids.like('%,{},%'.format(cat_id)),\n ItemSubCat.cat_ids.like('{},%'.format(cat_id))\n )\n filters.append(or_query)\n\n if filters: where = and_(*filters)\n has_more, subcat_list = ItemService.get_paged_sub_cats(where=where, order_by=order_by, join=join, limit=100)\n fetch_item_cat_refs(subcat_list)\n\n all_cats = ItemService.get_item_cats()\n cat_id_obj = {i.id:i.as_dict() for i in all_cats}\n sub_cat_ids = [i['id'] for i in subcat_list]\n exists_recommend = ItemService.exists_recommend_subcat_map(sub_cat_ids)\n for i in subcat_list:\n i['is_recommend'] = i['id'] in exists_recommend\n i['sort_order'] = exists_recommend.get(i['id'], 0)\n i['cat_list'] = [cat_id_obj.get(k) for k in i['cat_id_list']]\n result = {\n 'infos' : subcat_list,\n 'page_info' : None,\n }\n\n return jsonify_response(result)\n\n\nhospital_list_validator = Inputs(\n {\n 'keyword' : Optional(TextField(min_length=1, max_length=100, msg='搜索关键字')),\n 'is_recommend' : Optional(IntChoiceField(choices=[0,1], msg='是否推荐')),\n 'page' : Optional(IdField(msg='页数')),\n }\n )\n@admin_json_dec(required=True, validator=hospital_list_validator)\ndef get_hospital_list():\n is_recommend = request.valid_data.get('is_recommend')\n keyword = request.valid_data.get('keyword')\n page = request.valid_data.get('page') or 1\n\n\n limit = 10\n start = (page-1)*limit\n filters = []\n where = None\n join = None\n order_by = None\n if is_recommend:\n subquery = db.session.query(RecommendHospital.hospital_id).subquery()\n filters.append(Hospital.id.in_(subquery))\n order_by = RecommendHospital.sort_order.asc()\n join = RecommendHospital\n if keyword: filters.append(Hospital.name.like('%{}%'.format(keyword)))\n if filters: where = and_(*filters)\n total = ItemService.count_hospitals(where)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n if request.admin.city_id:\n if where is not None:\n where = and_(\n where,\n Hospital.city_id==request.admin.city_id\n )\n else:\n where = Hospital.city_id==request.admin.city_id\n has_more, hospital_list = ItemService.get_paged_hospitals(\n join=join, order_by=order_by, where=where, start=start)\n\n item_ids = [ i['id'] for i in hospital_list]\n exists_recommend = ItemService.exists_recommend_hospital_ids(item_ids)\n for i in hospital_list:\n i['is_recommend'] = i['id'] in exists_recommend\n i['sort_order'] = exists_recommend.get(i['id'], 0)\n\n result = {\n 'infos' : hospital_list,\n 'page_info' : page_info,\n }\n\n return jsonify_response(result)\n\n\n\nsubcat_status_validator = Inputs(\n {\n 'subcat_id' : IdField(msg='子分类id'),\n 'status' : IntChoiceField(choices=[0,1], msg='状态')\n }\n )\n@admin_json_dec(required=True, validator=subcat_status_validator)\ndef set_subcat_status():\n subcat_id = request.valid_data.get('subcat_id')\n status = request.valid_data.get('status')\n\n ItemService.set_subcat_status(subcat_id, status)\n if status==0:\n ItemService.offline_subcat(subcat_id)\n result = {}\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True)\ndef get_period_choice_list():\n _sort = 'period_count'\n _sort_dir = 'ASC'\n _, choice_list= CreditService.get_paged_period_choices(_sort=_sort, _sort_dir=_sort_dir)\n\n result = {\n 'infos' : choice_list,\n 'page_info' : None,\n }\n\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True)\ndef refresh_qntoken():\n response = jsonify_response({}, with_response=True)\n if 1:#not get_cookie('qntoken'):\n qntoken = gen_qn_token()\n set_cookie(response, 'qntoken', qntoken, 86400*30)#cookie存token一小时\n\n return response\n\n\n@admin_json_dec()\ndef get_apply_list():\n ''' 额度申请列表 '''\n limit = 10\n page = int(request.args.get('page', 1))\n apply_status = int(request.args.get('apply_status') or 0)\n where = None\n if apply_status==1:\n where = CreditApply.status==APPLY_STATUS.VERIFIED\n elif apply_status==2:\n where = CreditApply.status==APPLY_STATUS.REJECTED\n elif apply_status==3:\n where = CreditApply.status==APPLY_STATUS.FIRST_STEP\n elif apply_status==4:\n where = CreditApply.status==APPLY_STATUS.SECOND_STEP\n start = (page-1)*limit\n total = CreditService.count_apply(where=where)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n has_more, item_list = CreditService.get_paged_apply_list(limit=limit, start=start, where=where, _sort='update_time')\n\n fetch_user_refs(item_list)\n result = {\n 'infos' : item_list,\n 'page_info' : page_info,\n 'total' : total,\n }\n\n return jsonify_response(result)\n\n\n@admin_json_dec()\ndef get_apply_detail():\n ''' 额度申请详情 '''\n apply_id = request.args.get('apply_id')\n apply = CreditService.get_apply_dict_by_id(apply_id)\n credit = CreditService.get_user_credit(apply['user_id'])\n if credit: credit = credit.as_dict()\n fetch_user_refs((apply,))\n body_choice_ids = apply['body_choice_ids']\n body_choice_text = apply['body_choice_text']\n body_choices = []\n for i in body_choice_ids.split(',') if body_choice_ids else []:\n body_choices.append(BODY_LABEL.get(int(i)) or '')\n\n apply['body_choices'] = ','.join(body_choices)\n apply['body_choice_text'] = body_choice_text\n \n result = {\n 'apply' : apply,\n 'credit' : credit\n }\n return jsonify_response(result)\n\n\napply_approve_validator = Inputs(\n {\n 'apply_id' : IdField(msg='申请id'),\n 'total' : IntField(msg='额度'),\n }\n )\n@admin_json_dec(validator=apply_approve_validator, roles=[0,1,5])\ndef apply_approve():\n ''' 申请通过 '''\n apply_id = request.valid_data.get('apply_id')\n total = request.valid_data.get('total')\n where = and_(\n CreditApply.id==apply_id,\n )\n data = {\n 'status':APPLY_STATUS.VERIFIED\n }\n CreditService.update_apply(where, **data)\n apply = CreditService.get_apply_dict_by_id(apply_id)\n credit = CreditService.get_user_credit(apply['user_id'])\n if not credit:\n CreditService.init_credit(apply['user_id'])\n credit = CreditService.get_user_credit(apply['user_id'])\n used = credit.used\n err_msg = '审批额度不能低于当前已使用额度{}'.format(used)\n assert total>=used, err_msg\n\n CreditService.set_user_credit_total(apply['user_id'], total)\n count = CreditService.update_user_credit_status(apply['user_id'], CREDIT_STATUS.VERIFIED)\n if count:\n where = and_(\n Order.user_id==apply['user_id'],\n Order.credit_verified!=1\n )\n orders = OrderService.get_orders(where=where)\n for order in orders:\n where = and_(\n Order.id==order.id,\n Order.credit_verified!=1,\n )\n count = OrderService.update_order(where, credit_verified=1)\n if count and order.status==ORDER_STATUS.PAY_SUCCESS:\n pay_success_action(order, send_verified=True)\n where = and_(\n Order.credit_verified!=1,\n Order.user_id==apply['user_id']\n )\n OrderService.update_order(where, credit_verified=1)\n user = UserService.get_user_by_id(apply['user_id'])\n send_sms_apply_success.delay(user.phone, total)\n RoomDesignService.add_user_vote_privilege(apply['user_id'], 1)\n #PromoteService.add_rd_draw_count(apply['user_id'], 1)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\n@admin_json_dec()\ndef get_hospital():\n ''' 医院详情 '''\n hospital_id = request.args.get('item_id')\n hospital = ItemService.get_hospital_dict_by_id(hospital_id)\n\n result = {\n 'data' : hospital\n }\n return jsonify_response(result)\n\n\napply_reject_validator = Inputs(\n {\n 'reason' : TextField(min_length=1, max_length=1000, msg='被拒原因'),\n 'apply_id' : IdField(msg='申请id'),\n }\n )\n@admin_json_dec(validator=apply_reject_validator, roles=[0,1,5])\ndef apply_reject():\n ''' 申请拒绝 '''\n apply_id = request.valid_data.get('apply_id')\n reason = request.valid_data.get('reason')\n\n apply = CreditService.get_apply_dict_by_id(apply_id)\n assert apply, '申请不存在'\n where = and_(\n CreditApply.id==apply_id\n )\n data = {\n 'status':APPLY_STATUS.REJECTED,\n 'reason':reason,\n }\n CreditService.update_apply(where, **data)\n CreditService.update_user_credit_status(apply['user_id'], CREDIT_STATUS.REJECTED)\n user = UserService.get_user_by_id(apply['user_id'])\n where = and_(\n Order.user_id==apply['user_id'],\n Order.credit_verified==0\n )\n orders = OrderService.get_orders(where=where)\n for order in orders:\n where = and_(\n Order.id==order.id,\n Order.credit_verified==0,\n )\n count = OrderService.update_order(where, credit_verified=2)\n send_sms_apply_reject.delay(user.phone)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\n\nhospital_edit_validator = Inputs(\n {\n 'name' : TextField(min_length=1, max_length=100, msg='医院名'),\n 'tags' : TextField(min_length=1, max_length=100, msg='标签'),\n 'addr' : TextField(min_length=1, max_length=100, msg='地址'),\n 'phone' : TextField(min_length=1, max_length=100, msg='电话'),\n 'photos' : Optional(TextField(min_length=0, max_length=1000, msg='图片列表')),\n 'image' : TextField(min_length=1, max_length=100, msg='医院头像'),\n 'desc' : TextField(min_length=1, max_length=10000, msg='描述'),\n 'working_time' : TextField(min_length=1, max_length=100, msg='工作时间'),\n 'city_id' : IdField(msg='城市id'),\n 'lng' : FloatField(msg='经度'),\n 'lat' : FloatField(msg='纬度'),\n }\n )\n@admin_json_dec(required=True, validator=hospital_edit_validator)\ndef hospital_edit(item_id=None):\n name = request.valid_data.get('name')\n phone = request.valid_data.get('phone')\n image = request.valid_data.get('image')\n tags = request.valid_data.get('tags')\n city_id = request.valid_data.get('city_id')\n lng = request.valid_data.pop('lng')\n lat = request.valid_data.pop('lat')\n desc = request.valid_data.get('desc')\n working_time= request.valid_data.get('working_time')\n long_lat = '{},{}'.format(lng, lat)\n request.valid_data['long_lat'] = long_lat\n if item_id:\n print name\n ItemService.update_hospital(item_id, **request.valid_data)\n else:\n item_id = ItemService.create_hospital(**request.valid_data)\n\n return jsonify_response({'item_id': item_id})\n\n\n\nrecommend_item_validator = Inputs(\n {\n 'item_id' : IdField(msg='商品id'),\n 'recommend' : BoolChoiceField(msg='是否推荐'),\n }\n )\n@admin_json_dec(required=True, validator=recommend_item_validator)\ndef recommend_item():\n item_id = request.valid_data.get('item_id')\n recommend = request.valid_data.get('recommend')\n print item_id, recommend\n\n if recommend:\n ItemService.add_recommend_item(item_id)\n else:\n ItemService.rm_recommend_item(item_id)\n msg = '推荐成功' if recommend else '取消推荐成功'\n result = {\n 'msg' : msg\n }\n\n return jsonify_response(result)\n\n\nrecommend_hospital_validator = Inputs(\n {\n 'item_id' : IdField(msg='医院id'),\n 'recommend' : BoolChoiceField(msg='是否推荐'),\n }\n )\n@admin_json_dec(required=True, validator=recommend_hospital_validator)\ndef recommend_hospital():\n ''' 取消推荐医院 '''\n item_id = request.valid_data.get('item_id')\n recommend = request.valid_data.get('recommend')\n\n ItemService.rm_recommend_hospital(item_id)\n msg = '取消推荐成功'\n result = {\n 'msg' : msg,\n 'code' : ResponseCode.SUCCESS\n }\n\n return jsonify_response(result)\n\n\n\nset_item_status_validator = Inputs(\n {\n 'item_id' : IdField(msg='商品id'),\n 'status' : IntChoiceField(choices=[0,1], msg='商品状态'),\n }\n )\n@admin_json_dec(required=True, validator=set_item_status_validator)\ndef set_item_status():\n item_id = request.valid_data.get('item_id')\n status = request.valid_data.get('status')\n print item_id, status\n data = {\n 'status': status\n }\n ItemService.update_item(item_id, **data)\n msg = '上线成功' if status==1 else '下线成功'\n if status==0:\n ItemService.offline_item(item_id)\n result = {\n 'msg' : msg\n }\n return jsonify_response(result)\n\n\n\n\nset_question_status_validator = Inputs(\n {\n 'item_id' : IdField(msg='问题id'),\n 'status' : IntChoiceField(choices=[0,1], msg='问题状态'),\n }\n )\n@admin_json_dec(required=True, validator=set_question_status_validator)\ndef set_question_status():\n item_id = request.valid_data.get('item_id')\n status = request.valid_data.get('status')\n print item_id, status\n data = {\n 'status': status\n }\n RedpackService.update_redpack_status(item_id, status)\n msg = '上线成功' if status==1 else '下线成功'\n result = {\n 'msg' : msg\n }\n return jsonify_response(result)\n\n\n\nuser_list_validator = Inputs(\n {\n 'keyword' : Optional(TextField(min_length=1, max_length=100, msg='搜索关键字')),\n 'page' : Optional(IdField(msg='页数')),\n 'promoter_id' : Optional(IdField(msg='推广员id')),\n 'same_user_id' : Optional(IdField(msg='相同用户注册id')),\n }\n )\n@admin_json_dec(required=True, validator=user_list_validator, roles=[0,2,5])\ndef get_user_list():\n ''' 获取用户列表 '''\n keyword = request.valid_data.get('keyword')\n promoter_id = request.valid_data.get('promoter_id')\n page = request.valid_data.get('page') or 1\n same_user_id = request.valid_data.get('same_user_id')\n limit = 10\n start = (page-1)*limit\n where = None\n filters = []\n if keyword:\n filters.append(\n or_(\n User.name==keyword,\n User.phone==keyword\n )\n )\n if promoter_id:\n sub_q = PromoteService.get_promoter_user_id_suq(promoter_id)\n filters.append(User.id.in_(sub_q))\n if filters: where = and_(*filters)\n if same_user_id:\n open_id = None\n qrcode_user = PromoteService.get_qrcode_user_by_user_id(same_user_id)\n if qrcode_user: open_id = qrcode_user.open_id\n suq = PromoteService.open_id_user_ids_suq(open_id)\n where = and_(\n User.id.in_(suq),\n User.id!=same_user_id\n )\n total = UserService.count_user(where)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n has_more, item_list = UserService.get_paged_user(limit=limit, start=start, where=where)\n fetch_wechatinfo_refs(item_list, id_='id', dest_key='wechat_info', keep_id=True)\n\n user_ids = [i['id'] for i in item_list]\n open_ids = PromoteService.get_open_ids_by_user_ids(user_ids)\n count_map = PromoteService.count_open_id_user_count(open_ids)\n user_id_open_id_map = PromoteService.get_user_id_open_id_map(open_ids)\n open_id_promoter_id_map= PromoteService.get_qrcodeusers_by_open_ids(open_ids)\n print user_id_open_id_map\n for info in item_list:\n open_id = user_id_open_id_map.get(info['id'])\n if open_id:\n info['same_user_count'] = (count_map.get(open_id) or 1) -1\n info['open_id'] = open_id\n if open_id_promoter_id_map.get(open_id):\n info['promoter'] = open_id_promoter_id_map.get(open_id)['promoter']\n if open_id_promoter_id_map.get(open_id):\n info['parent'] = open_id_promoter_id_map.get(open_id)['parent']\n else:\n info['same_user_count'] = 0\n result = {\n 'infos' : item_list,\n 'page_info' : page_info,\n 'total' : total,\n }\n\n return jsonify_response(result)\n\n\n@admin_json_dec()\ndef get_user_detail():\n ''' 用户详情 '''\n item_id = request.args.get('item_id')\n user = UserService.get_user_by_id(item_id)\n assert user, '用户不存在'\n qruser = PromoteService.get_qrcode_user_by_user_id(item_id)\n open_id = None\n wechat_info = None\n location = None\n if qruser:\n open_id = qruser.open_id\n if not qruser.nickname:\n try:\n PromoteService.set_user_sex(open_id)\n qruser = PromoteService.get_qrcode_user_by_user_id(item_id)\n except Exception as e:\n import traceback\n traceback.print_exc()\n if qruser and qruser.nickname:\n wechat_info = {}\n wechat_info['nickname'] = qruser.nickname\n wechat_info['sex'] = qruser.sex\n wechat_info['city'] = qruser.city\n wechat_info['headimgurl'] = qruser.headimgurl\n if open_id:\n try:\n from thirdparty.wechat import wechat\n location = PromoteService.get_first_location(open_id)\n from util.utils import translate_location\n if location:\n latlng = '{},{}'.format(location['lat'], location['lng'])\n result = translate_location(latlng)\n location = result.json()['result'][0]\n except Exception as e:\n import traceback\n traceback.print_exc()\n user = user.as_dict()\n apply = CreditService.get_apply_dict_by_userid(item_id)\n result = {\n 'data' : user,\n 'apply' : apply,\n 'location' : location,\n 'wechat_info': wechat_info\n }\n return jsonify_response(result)\n\n\n\n@admin_json_dec()\ndef get_school_city_list():\n ''' 学校省市列表 '''\n datas = DataService.get_school_city_names()\n result = {\n 'infos': datas\n }\n return jsonify_response(result)\n\n\norder_list_validator = Inputs(\n {\n 'hospital_id' : Optional(IdField(msg='医院id')),\n 'sub_cat_id' : Optional(IdField(msg='子分类id')),\n 'keyword' : Optional(TextField(min_length=1, max_length=1000, msg='搜索订单号或用户手机号')),\n 'order_status' : Optional(IntChoiceField(choices=ORDER_ADMIN_STATUS_MAP.keys(), msg='订单筛选状态')),\n }\n )\n@admin_json_dec(validator=order_list_validator)\ndef get_order_list():\n ''' 订单列表 '''\n page = int(request.args.get('page', 1))\n hospital_id = request.valid_data.get('hospital_id')\n sub_cat_id = request.valid_data.get('sub_cat_id')\n keyword = request.valid_data.get('keyword')\n order_status = request.valid_data.get('order_status')\n limit = 10\n start = (page-1)*limit\n\n where = None\n order_by = None\n conditions = []\n if hospital_id: conditions.append(Order.hospital_id==hospital_id)\n if keyword and len(keyword)==11:\n sub_query = db.session.query(User.id).filter(User.phone==keyword).subquery()\n conditions.append(Order.user_id.in_(sub_query))\n elif keyword:\n conditions.append(Order.order_no==keyword)\n if order_status:\n if order_status==ORDER_ADMIN_STATUS.TO_PAY:\n conditions.append(Order.status.in_([ORDER_STATUS.NEW_ORDER, ORDER_STATUS.TO_PAY]))\n elif order_status==ORDER_ADMIN_STATUS.FINISH:\n conditions.append(Order.status==ORDER_STATUS.FINISH)\n elif order_status==ORDER_ADMIN_STATUS.TO_SERVE: #待服务\n conditions.append(and_(\n Order.status==ORDER_STATUS.PAY_SUCCESS,\n Order.credit_verified==1\n )\n )\n elif order_status==ORDER_ADMIN_STATUS.CREDIT_VERIFY: #额度待审核\n conditions.append(Order.credit_verified==0)\n elif order_status==ORDER_ADMIN_STATUS.CANCELD:\n conditions.append(Order.status.in_([ORDER_STATUS.CANCEL_BEFORE_PAY, ORDER_STATUS.CANCELED]))\n elif order_status==ORDER_ADMIN_STATUS.TO_REFUND:\n order_by = [Order.refund.asc(), Order.id.desc()]\n sub_filter = and_(\n PeriodPayLog.status==1\n )\n sub_q = db.session.query(PeriodPayLog.order_id).filter(sub_filter).subquery()\n conditions.append(and_(\n Order.status==ORDER_STATUS.CANCELED,\n or_(\n Order.price>0,\n Order.id.in_(sub_q)\n ),\n Order.refund==0, #分两部退款的可能某一次退款不成功\n )\n )\n elif order_status==ORDER_ADMIN_STATUS.REFUNDED:\n conditions.append(\n and_(\n Order.refund==1,\n Order.status==ORDER_STATUS.CANCELED\n )\n )\n else:\n conditions.append(Order.status==None)\n if sub_cat_id:\n sub_query = db.session.query(Item.id).filter(Item.sub_cat_id==sub_cat_id).subquery()\n conditions.append(Order.item_id.in_(sub_query))\n if conditions:\n where = and_(*conditions)\n\n total = OrderService.count_order(where)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n has_more, item_list = OrderService.get_paged_orders(\n order_by=order_by, limit=limit, start=start, where=where)\n\n fetch_servicecode_refrence(item_list, 'id', dest_key='service_code_dict', keep_id=True)\n\n for order in item_list:\n comment = None\n set_order_status(order, comment, order['service_code_dict'])\n trans_list(item_list, 'status', 'status_label', ORDER_STATUS_LABEL, pop=False)\n\n order_status_choices = ADMIN_ORDER_STATUS_CHOICES\n\n fetch_user_refs(item_list)\n fetch_item_refs(item_list)\n result = {\n 'infos' : item_list,\n 'page_info' : page_info,\n 'total' : total,\n 'order_status_choices': order_status_choices\n }\n\n return jsonify_response(result)\n\n\nupload_image_validator = Inputs(\n {\n 'image' : TextField(min_length=1, max_length=10000000, msg='图片'),\n 'prefix' : Optional(TextField(min_length=0, max_length=10000000, msg='前缀'))\n }\n )\n@admin_json_dec(validator=upload_image_validator)\ndef upload_image():\n try:\n img_str = request.valid_data.pop('image')\n prefix = request.valid_data.pop('prefix') or ''\n if prefix: prefix = '{}'.format(prefix)\n code = 0\n msg = '上传成功'\n print 'uploading...', len(img_str)\n content = b64decode(img_str.split(',')[1])\n key = (prefix or 'subcaticon') + '/' + str(time.time()) + '.jpg'\n upload_img(key, content)\n return jsonify_response({'image': key, 'fullpath': prefix_img_domain(key)})\n except Exception as e:\n import traceback\n traceback.print_exc()\n return jsonify_response({'msg':'服务器异常'})\n\n\n\n@admin_json_dec()\ndef verify_chsi():\n ''' 验证chsi学信网 '''\n user_id = request.args.get('user_id')\n apply = CreditService.get_apply_dict_by_userid(user_id)\n chsi_name = apply['chsi_name']\n chsi_passwd = apply['chsi_passwd']\n data, success, return_captcha, session = login_xuexin(chsi_name, chsi_passwd)\n\n ChsiCache.set(user_id, pickle.dumps(session))\n if return_captcha:\n data = prefix_img_domain(data)\n msg = '抓取成功' if success else '抓取失败'\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : msg,\n 'success' : success,\n 'return_captcha': return_captcha,\n 'data' : data,\n }\n return jsonify_response(result)\n\n\n\nset_chsi_captcha_validator = Inputs(\n {\n 'captcha' : TextField(min_length=1, max_length=10000000, msg='验证码'),\n 'apply_id' : IdField(msg='申请id')\n }\n )\n@admin_json_dec(validator=set_chsi_captcha_validator)\ndef set_chsi_captcha():\n ''' 输入验证码成功 '''\n captcha = request.valid_data.get('captcha')\n apply_id = request.valid_data.get('apply_id')\n\n apply = CreditService.get_apply_dict_by_id(apply_id)\n chsi_name = apply['chsi_name']\n chsi_passwd = apply['chsi_passwd']\n user_id = apply['user_id']\n\n session_pickle = ChsiCache.get(user_id)\n session = pickle.loads(session_pickle)\n\n msg = ''\n data = get_chsi_info(chsi_name, chsi_passwd, captcha, session)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : msg,\n 'success' : True,\n 'data' : data,\n }\n return jsonify_response(result)\n\n\n@admin_json_dec(validator=None)\ndef refresh_captcha():\n ''' 刷新验证码 '''\n apply_id = request.args.get('apply_id')\n apply = CreditService.get_apply_dict_by_id(apply_id)\n session_pickle = ChsiCache.get(apply['user_id'])\n session = pickle.loads(session_pickle)\n\n print session\n print apply_id\n key = refresh_chsi_captcha(session)\n print key\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'data' : prefix_img_domain(key),\n }\n return jsonify_response(result)\n\n\n@admin_json_dec()\ndef get_advice_list():\n ''' 反馈列表 '''\n limit = 10\n page = int(request.args.get('page', 1))\n start = (page-1)*limit\n total = UserService.count_advices()\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n has_more, item_list = UserService.get_paged_user_advices(limit=limit, start=start)\n\n fetch_user_refs(item_list)\n result = {\n 'infos' : item_list,\n 'page_info' : page_info,\n 'total' : total,\n }\n\n return jsonify_response(result)\n\n\n@admin_json_dec()\ndef get_advice_detail():\n ''' 反馈详情 '''\n advice_id = request.args.get('advice_id')\n advice = UserService.get_advice_dict_by_id(advice_id)\n fetch_user_refs((advice,))\n result = {\n 'data': advice\n }\n return jsonify_response(result)\n\n\n\nrefund_validator = Inputs(\n {\n 'order_id' : IdField(msg='订单id')\n }\n )\n@admin_json_dec(validator=refund_validator, roles=[0,1,5])\ndef admin_refund_order():\n ''' 退款 '''\n order_id = request.valid_data.get('order_id')\n\n order = OrderService.get_order_by_id(order_id)\n\n assert order and order.status==ORDER_STATUS.CANCELED, '订单不能退款'\n\n where = and_(\n Order.id==order_id,\n Order.status==ORDER_STATUS.CANCELED\n )\n count = OrderService.update_order(where, commit=True, refund=True)\n has_alipay = False\n if order.pay_method==PAY_METHOD.ALIPAY:\n has_alipay = True\n \n order_repayments= OrderService.get_order_repayment_logs_amount(order_id)\n repayment_amount= sum([format_price(i['price']) for i in order_repayments.values()] or [0])\n assert order.price or repayment_amount, '订单未曾支付过金额'\n sms_msg = '首付金额{}'.format(format_price(order.price))\n if repayment_amount:\n sms_msg = sms_msg + '和已还款金额{}'.format(repayment_amount)\n\n refund_data = {}\n link = ''\n for order_no in order_repayments:\n info = order_repayments[order_no]\n if info['pay_method'] == PAY_METHOD.ALIPAY:\n has_alipay = True\n\n #微信支付 支付宝支付退款需要分两部进行\n if not has_alipay: assert count, '订单不能退款'\n\n msg = ''\n if not has_alipay:\n for order_no in order_repayments:\n info = order_repayments[order_no]\n pay_method = info['pay_method']\n amount = info['price']\n total_fee = info['total']\n transaction_id = info['transaction_id']\n if pay_method==PAY_METHOD.WECHAT_APP:\n result = wxapp_refund_repayment(amount, total_fee, order_no, transaction_id)\n else:\n resullt = refund_repayment(amount, total_fee, order_no, transaction_id)\n if order.price:\n if order.pay_method==PAY_METHOD.WECHAT_APP:\n result = refund_app_order(order)\n else:\n result = refund_order(order)\n print result\n if result['result_code'] == 'SUCCESS':\n msg = '退款成功'\n code = ResponseCode.SUCCESS\n user = UserService.get_user_by_id(order.user_id)\n send_sms_refund.delay(user.phone, order.order_no, sms_msg, '14个工作日')\n else:\n code = ResponseCode.SERVER_ERROR\n msg = '退款失败'\n else: #支付宝 微信混杂退款\n for order_no in order_repayments:\n info = order_repayments[order_no]\n pay_method = info['pay_method']\n amount = info['price']\n total_fee = info['total']\n transaction_id = info['transaction_id']\n if pay_method==PAY_METHOD.ALIPAY:\n refund_data[transaction_id] = amount\n elif pay_method==PAY_METHOD.WECHAT_APP:\n wxapp_refund_repayment(amount, total_fee, order_no, transaction_id)\n else:\n refund_repayment(amount, total_fee, order_no, transaction_id)\n if order.pay_method==PAY_METHOD.WECHAT_WEB:\n result = refund_order(order)\n elif order.pay_method==PAY_METHOD.WECHAT_APP:\n result = refund_app_order(order)\n else:\n if order.price:\n refund_data[order.transaction_id] = format_price(order.price)\n from thirdparty.alipay import alipay\n link = alipay.refund_order(refund_data, '美分分订单退款')\n msg = '跳转到支付宝商户后台退款'\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : msg,\n 'refund_data': refund_data,\n 'link' : link,\n 'has_alipay': has_alipay\n }\n return jsonify_response(result)\n\n\n\ndel_item_activity_validator = Inputs(\n {\n 'item_id' : IdField(msg='商品id')\n }\n )\n@admin_json_dec(validator=del_item_activity_validator)\ndef del_item_activity():\n item_id = request.valid_data.get('item_id')\n ActivityService.del_item_activitys(item_id, None)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '从活动移除商品成功'\n }\n return jsonify_response(result)\n\n\n@admin_json_dec()\ndef get_activity_list():\n ''' 活动列表 ''' \n limit = 10\n page = int(request.args.get('page', 1))\n start = (page-1)*limit\n total = ActivityService.count_activitys()\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n where = None\n if request.admin.city_id:\n where = Activity.city_id==request.admin.city_id\n has_more, item_list = ActivityService.get_paged_activitys(where=where, limit=limit, start=start)\n\n result = {\n 'infos' : item_list,\n 'page_info' : page_info,\n 'total' : total,\n }\n\n return jsonify_response(result)\n\n\n@admin_json_dec()\ndef get_activity_items():\n ''' 活动商品列表 '''\n limit = 1000\n activity_id = int(request.args.get('activity_id', 1))\n where = ActivityItem.activity_id==activity_id\n has_more, infos = ActivityService.get_paged_activity_items(where=where, limit=limit)\n\n fields = ['id', 'title']\n has_more, items = ItemService.get_paged_items(limit=limit, fields=fields)\n\n selected = set(i['item_id'] for i in infos)\n for i in items:\n i['selected'] = i['id'] in selected\n i['label'] = i['title']\n\n result = {\n 'infos' : items,\n }\n\n return jsonify_response(result)\n\n\nset_activity_items_validator = Inputs(\n {\n 'activity_id' : IdField(msg='活动id'),\n 'ids' : IntChoicesField(all=True, msg='商品id列表')\n }\n )\n@admin_json_dec(validator=set_activity_items_validator)\ndef set_activity_items():\n ''' 设置活动商品列表 '''\n item_ids = request.valid_data.get('ids')\n activity_id = request.valid_data.get('activity_id')\n\n print item_ids, activity_id\n\n ActivityService.set_activity_items(activity_id, item_ids)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '编辑成功'\n }\n return jsonify_response(result)\n\n\n\nactivity_edit_validator = Inputs(\n {\n 'title' : TextField(min_length=1, max_length=100, msg='活动标题'),\n 'desc' : TextField(min_length=1, max_length=10000, msg='活动描述'),\n 'start_time' : TextField(min_length=1, max_length=100, msg='开始时间'),\n 'end_time' : TextField(min_length=1, max_length=100, msg='结束时间'),\n 'city_id' : IdField(msg='城市id'),\n }\n )\n@admin_json_dec(required=True, validator=activity_edit_validator)\ndef activity_edit(item_id=None):\n title = request.valid_data.get('title')\n city_id = request.valid_data.get('city_id')\n desc = request.valid_data.get('desc')\n start_time = request.valid_data.get('start_time')\n end_time = request.valid_data.get('end_time')\n if not item_id:\n assert not ActivityService.exists_activity_time(start_time, end_time, city_id), '时间范围已存在'\n item_id = ActivityService.create_activity(title, desc, start_time, end_time, city_id)\n msg = '添加成功'\n else:\n assert not ActivityService.exists_activity_time(start_time, end_time, city_id, item_id), '时间范围已存在'\n ActivityService.update_activity(item_id, **request.valid_data)\n msg = '修改成功'\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'item_id' : item_id,\n 'msg' : msg\n }\n return jsonify_response(result)\n\n\ndef get_activity():\n item_id = request.args.get('item_id')\n activity = ActivityService.get_activity_dict_by_id(item_id)\n\n result = {\n 'data':activity\n }\n return jsonify_response(result)\n\n\ntop_recommend_item_validator = Inputs(\n {\n 'item_id' : IdField(msg='商品id'),\n }\n )\n@admin_json_dec(validator=top_recommend_item_validator)\ndef top_recommend_item():\n ''' 置顶推荐 '''\n item_id = request.valid_data.get('item_id')\n ItemService.top_recommend_item(item_id)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '置顶成功'\n }\n return jsonify_response(result)\n\n\n\ntop_recommend_subcat_validator = Inputs(\n {\n 'sub_cat_id' : IdField(msg='子分类id'),\n }\n )\n@admin_json_dec(validator=top_recommend_subcat_validator)\ndef top_recommend_subcat():\n ''' 子分类 '''\n sub_cat_id = request.valid_data.get('sub_cat_id')\n ItemService.top_recommend_subcat(sub_cat_id)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '置顶成功'\n }\n return jsonify_response(result)\n\n\n\nrecommend_subcat_validator = Inputs(\n {\n 'item_id' : IdField(msg='子分类id'),\n 'recommend' : BoolChoiceField(msg='是否推荐'),\n }\n )\n@admin_json_dec(required=True, validator=recommend_subcat_validator)\ndef recommend_subcat():\n sub_cat_id = request.valid_data.get('item_id')\n recommend = request.valid_data.get('recommend')\n print sub_cat_id, recommend\n\n if recommend:\n ItemService.add_recommend_subcat(sub_cat_id)\n else:\n ItemService.rm_recommend_subcat(sub_cat_id)\n msg = '推荐成功' if recommend else '取消推荐成功'\n result = {\n 'msg' : msg\n }\n\n\n@admin_json_dec()\ndef get_item_recommend():\n item_id = int(request.args.get('item_id'))\n\n data = ItemService.get_item_recommend(item_id) or dict(item_id=item_id)\n\n if data: fetch_item_refs((data,))\n result = {\n 'data' : data\n }\n return jsonify_response(result)\n\n\n@admin_json_dec()\ndef get_hospital_recommend():\n hospital_id = int(request.args.get('hospital_id'))\n\n data = ItemService.get_hospital_recommend(hospital_id) or dict(hospital_id=hospital_id)\n\n if data: fetch_hospital_refs((data,))\n result = {\n 'data' : data\n }\n return jsonify_response(result)\n\n\n@admin_json_dec()\ndef get_item_activity():\n item_id = int(request.args.get('item_id'))\n\n data = ItemService.get_item_activity(item_id) or dict(item_id=item_id)\n\n if data: fetch_item_refs((data,))\n result = {\n 'data' : data\n }\n return jsonify_response(result)\n\n\n@admin_json_dec()\ndef get_subcat_recommend():\n sub_cat_id = int(request.args.get('sub_cat_id'))\n data = ItemService.get_subcat_recommend(sub_cat_id) or dict(sub_cat_id=sub_cat_id)\n if data: fetch_item_subcat_refs((data, ))\n result = {\n 'data' : data\n }\n return jsonify_response(result)\n\n\n\nitem_activity_edit_validator = Inputs(\n {\n 'sort_order' : IntField(msg='排序'),\n 'activity_id' : IdField(msg='活动id'),\n 'image' : TextField(min_length=1, max_length=1000, msg='图片'),\n 'price' : FloatField(msg='活动价格'),\n }\n )\n@admin_json_dec(validator=item_activity_edit_validator)\ndef item_activity_edit(item_id=None):\n sort_order = request.valid_data.get('sort_order')\n activity_id = request.valid_data.get('activity_id')\n price = request.valid_data.get('price')\n image = request.valid_data.get('image')\n\n item_activity = ItemService.get_item_activity(item_id)\n if not item_activity:\n ItemService.add_activity_item(item_id, sort_order, activity_id, price, image)\n else:\n ActivityService.del_item_activitys(item_id, item_activity['activity_id'])\n ItemService.update_activity_item(item_id, **request.valid_data)\n\n msg = '编辑成功'\n result = {\n 'msg' : msg\n }\n return jsonify_response(result)\n\n\n\nitem_recommend_edit_validator = Inputs(\n {\n 'sort_order' : IntField(msg='排序'),\n 'image' : TextField(min_length=1, max_length=1000, msg='图片'),\n 'desc' : TextField(min_length=1, max_length=1000, msg='描述')\n }\n )\n@admin_json_dec(validator=item_recommend_edit_validator)\ndef item_recommend_edit(item_id=None):\n image = request.valid_data.get('image')\n desc = request.valid_data.get('desc')\n sort_order = request.valid_data.get('sort_order')\n\n recommend = ItemService.get_item_recommend(item_id)\n DataService.set_img_size.delay(image)\n if not recommend:\n ItemService.add_recommend_item(item_id, sort_order, image, desc)\n else:\n ItemService.update_recommend_item(item_id, **request.valid_data)\n\n msg = '编辑成功'\n result = {\n 'msg' : msg\n }\n return jsonify_response(result)\n\n\nsubcat_recommend_edit_validator = Inputs(\n {\n 'sort_order' : IntField(msg='排序'),\n 'icon' : TextField(min_length=1, max_length=1000, msg='图片'),\n }\n )\n@admin_json_dec(validator=subcat_recommend_edit_validator)\ndef subcat_recommend_edit(item_id=None):\n icon = request.valid_data.get('icon')\n sort_order = request.valid_data.get('sort_order')\n\n recommend = ItemService.get_subcat_recommend(item_id)\n if not recommend:\n ItemService.add_recommend_subcat(item_id, sort_order, icon)\n else:\n ItemService.update_recommend_subcat(item_id, **request.valid_data)\n\n msg = '编辑成功'\n result = {\n 'msg' : msg\n }\n return jsonify_response(result)\n\n\nhospital_recommend_edit_validator = Inputs(\n {\n 'sort_order' : IntField(msg='排序'),\n 'color' : TextField(min_length=1, max_length=1000, msg='颜色'),\n 'tag' : TextField(min_length=1, max_length=1000, msg='标签'),\n }\n )\n@admin_json_dec(validator=hospital_recommend_edit_validator)\ndef hospital_recommend_edit(item_id=None):\n tag = request.valid_data.get('tag')\n color = request.valid_data.get('color')\n sort_order = request.valid_data.get('sort_order')\n\n recommend = ItemService.get_hospital_recommend(item_id)\n if not recommend:\n ItemService.add_recommend_hospital(item_id, sort_order, color, tag)\n else:\n ItemService.update_recommend_hospital(item_id, **request.valid_data)\n\n msg = '编辑成功'\n result = {\n 'msg' : msg\n }\n return jsonify_response(result)\n\n\n\nset_recommend_order_validator = Inputs(\n {\n 'sort_order' : IntField(msg='排序'),\n 'item_id' : IdField(msg='商品id'),\n }\n )\n@admin_json_dec(validator=set_recommend_order_validator)\ndef set_recommend_order():\n item_id = request.valid_data.get('item_id')\n sort_order = request.valid_data.get('sort_order')\n\n exist = ItemService.check_exist_order(sort_order)\n assert not exist, '排序值已存在'\n ItemService.update_recommend_item(item_id, sort_order=sort_order)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '修改成功'\n }\n return jsonify_response(result)\n\n\nset_trial_order_validator = Inputs(\n {\n 'sort_order' : IntField(msg='排序'),\n 'item_id' : IdField(msg='商品id'),\n }\n )\n@admin_json_dec(validator=set_trial_order_validator)\ndef set_trial_order():\n item_id = request.valid_data.get('item_id')\n sort_order = request.valid_data.get('sort_order')\n\n exist = TrialService.check_exist_order(sort_order)\n assert not exist, '排序值已存在'\n TrialService.update_trial(item_id, sort_order=sort_order)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '修改成功'\n }\n return jsonify_response(result)\n\n\n\n\nset_recommend_subcat_order_validator = Inputs(\n {\n 'sort_order' : IntField(msg='排序'),\n 'item_id' : IdField(msg='子分类id'),\n }\n )\n@admin_json_dec(validator=set_recommend_subcat_order_validator)\ndef set_recommend_subcat_order():\n item_id = request.valid_data.get('item_id')\n sort_order = request.valid_data.get('sort_order')\n\n exist = ItemService.check_exist_subcat_order(sort_order)\n assert not exist, '排序值已存在'\n ItemService.update_recommend_subcat(item_id, sort_order=sort_order)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '修改成功'\n }\n return jsonify_response(result)\n\n\nset_recommend_hospital_order_validator = Inputs(\n {\n 'sort_order' : IntField(msg='排序'),\n 'item_id' : IdField(msg='医院id'),\n }\n )\n@admin_json_dec(validator=set_recommend_hospital_order_validator)\ndef set_recommend_hospital_order():\n item_id = request.valid_data.get('item_id')\n sort_order = request.valid_data.get('sort_order')\n\n exist = ItemService.check_exist_hospital_order(sort_order)\n assert not exist, '排序值已存在'\n ItemService.update_recommend_hospital(item_id, sort_order=sort_order)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '修改成功'\n }\n return jsonify_response(result)\n\n\ndef get_period_pay_log_list():\n ''' 逾期分期帐单列表 '''\n limit = 10\n page = int(request.args.get('page') or 1)\n keyword = request.args.get('keyword', '')\n is_delayed = request.args.get('is_delayed')=='true'\n start = (page-1)*limit\n\n deadline = get_due_time(0)\n where = and_()\n if is_delayed:\n where.append(and_(#逾期为还的\n PeriodPayLog.repayment_time==None,\n PeriodPayLog.deadline<deadline,\n PeriodPayLog.status==0\n ))\n\n if keyword:\n user = UserService.get_user_by_phone(keyword)\n user_id = None\n if user: user_id = user.id\n where.append(PeriodPayLog.user_id==user_id)\n has_more, infos = CreditService.get_paged_period_pay_logs(where=where, limit=limit, start=start)\n fetch_order_refs(infos)\n for log in infos:\n log['item_id'] = log['order']['item_id']\n fetch_item_refs(infos, fields=['id', 'title'])\n fetch_user_refs(infos)\n for i in infos:\n cacl_punish_fee(i)\n\n total = CreditService.count_logs(where)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n\n result = {\n 'msg' : '',\n 'code' : ResponseCode.SUCCESS,\n 'infos' : infos,\n 'total' : total,\n 'page_info' : page_info\n }\n\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True)\ndef get_refund_detail():\n order_id = request.args.get('order_id')\n\n order = OrderService.get_order_by_id(order_id)\n\n assert order and order.status==ORDER_STATUS.CANCELED, '订单不能退款'\n\n order_repayments= OrderService.get_order_repayment_logs_amount(order_id)\n repayment_amount= sum([format_price(i['price']) for i in order_repayments.values()] or [0])\n\n refund_data = {}\n wechat_web = {}\n wechat_app = {}\n link = ''\n has_alipay = False\n for order_no in order_repayments:\n info = order_repayments[order_no]\n if info['pay_method'] == PAY_METHOD.ALIPAY:\n has_alipay = True\n if order.pay_method==PAY_METHOD.ALIPAY: has_alipay=True\n if has_alipay:\n for order_no in order_repayments:\n info = order_repayments[order_no]\n pay_method = info['pay_method']\n amount = info['price']\n total_fee = info['total']\n transaction_id = info['transaction_id']\n if pay_method==PAY_METHOD.ALIPAY:\n refund_data[transaction_id] = amount\n elif pay_method==PAY_METHOD.WECHAT_APP:\n wechat_app[transaction_id] = amount\n elif pay_method==PAY_METHOD.WECHAT_WEB:\n wechat_web[transaction_id] = amount\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'has_alipay' : has_alipay,\n 'order_repayments' : order_repayments,\n 'wechat_app' : wechat_app,\n 'wechat_web' : wechat_web,\n 'repayment_amount' : repayment_amount,\n 'price' : format_price(order.price)\n }\n return jsonify_response(result)\n\n\n\n@admin_json_dec(required=True)\ndef get_coupon_list():\n ''' '''\n limit = 10\n page = int(request.args.get('page') or 1)\n start = (page-1)*limit\n\n has_more, infos = CouponService.get_paged_coupons(start=start, limit=limit)\n total = CouponService.count(None)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n\n fetch_item_subcat_refs(infos)\n fetch_item_refs(infos, fields=['id', 'title'])\n fetch_item_cat_refs(infos)\n\n result = {\n 'msg' : '',\n 'code' : ResponseCode.SUCCESS,\n 'infos' : infos,\n 'total' : total,\n 'page_info' : page_info\n }\n\n return jsonify_response(result)\n\n\ncoupon_edit_validator = Inputs(\n {\n 'title' : TextField(min_length=1, max_length=100, msg='商品名'),\n 'cat_id' : Optional(IdField(msg='分类id')),\n 'is_trial' : IntChoiceField(choices=[0, 1], msg='是否试用'),\n 'sub_cat_id' : Optional(IdField(msg='子分类id')),\n 'item_id' : Optional(IdField(msg='商品id')),\n 'coupon_cat' : IdField(msg='优惠券类型'),\n 'price' : Optional(FloatField(msg='优惠金额')),\n 'need' : Optional(IntField(msg='满多少使用')),\n 'effective' : IntField(msg='有效期')\n }\n )\n@admin_json_dec(required=True, validator=coupon_edit_validator, roles=[0,1])\ndef coupon_edit(item_id=None):\n title = request.valid_data.get('title')\n cat_id = request.valid_data.get('cat_id')\n sub_cat_id = request.valid_data.get('sub_cat_id')\n mff_item_id = request.valid_data.get('item_id')\n price = request.valid_data.get('price') or 0\n need = request.valid_data.get('need') or 0\n effective = request.valid_data.get('effective')\n effective = effective * 86400\n request.valid_data['effective'] = effective\n coupon_cat = request.valid_data.get('coupon_cat')\n is_trial = request.valid_data.get('is_trial')\n request.valid_data['price'] = price\n if not is_trial:\n assert price, '请输入优惠金额'\n\n if coupon_cat==1: assert cat_id, '请选择商品分类'\n if coupon_cat==2: assert sub_cat_id, '请选择商品子分类'\n if coupon_cat==3:\n assert mff_item_id, '请输入商品id'\n item = ItemService.get_item_dict_by_id(mff_item_id)\n assert item, '商品不存在'\n if not item_id:\n coupon_id = CouponService.create_coupon(\n coupon_cat, cat_id, title, price, effective,\n mff_item_id, sub_cat_id, is_trial, need=need)\n else:\n CouponService.update_coupon(item_id, **request.valid_data)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\n\ncity_edit_validator = Inputs(\n {\n 'name' : TextField(min_length=1, max_length=100, msg='城市名'),\n 'city_code' : TextField(min_length=1, max_length=100, msg='百度城市编码'),\n 'amap_code' : TextField(min_length=1, max_length=100, msg='高德城市编码'),\n }\n )\n@admin_json_dec(required=True, validator=city_edit_validator, roles=[0,1])\ndef city_edit(item_id=None):\n name = request.valid_data.get('name')\n city_code = request.valid_data.get('city_code')\n amap_code = request.valid_data.get('amap_code')\n\n if not item_id:\n DataService.create_city(name, city_code, amap_code)\n else:\n DataService.update_city(item_id, **request.valid_data)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\ndef get_city():\n ''' 获取城市 '''\n city_id = request.args.get('item_id')\n city = DataService.get_city_dict_by_id(city_id)\n assert city, '城市不存在'\n result = {\n 'data': city}\n return jsonify_response(result)\n\n\ndef get_coupon():\n ''' '''\n item_id = request.args.get('item_id')\n coupon = CouponService.get_coupon(item_id)\n result = {\n 'data': coupon}\n return jsonify_response(result)\n\n\n\n\ntrial_edit_validator = Inputs(\n {\n 'title' : TextField(min_length=1, max_length=100, msg='商品名'),\n 'image' : TextField(min_length=1, max_length=1000, msg='商品小图'),\n 'rules' : TextField(min_length=1, max_length=1000000, msg='攻略'),\n 'process' : TextField(min_length=1, max_length=1000000, msg='流程'),\n 'start_time' : TextField(min_length=1, max_length=10000, msg='开始时间'),\n 'end_time' : TextField(min_length=1, max_length=10000, msg='结束时间'),\n 'cat' : IntChoiceField(choices=[0,1], msg='是否免息'),\n 'total' : IntField(msg='总共'),\n 'coupon_id' : Optional(IdField(msg='子分类id')),\n }\n )\n@admin_json_dec(required=True, validator=trial_edit_validator)\ndef trial_edit(item_id=None):\n title = request.valid_data.get('title')\n image = request.valid_data.get('image')\n rules = request.valid_data.get('rules')\n cat = request.valid_data.get('cat')\n need = request.valid_data.get('need') or 0\n total = request.valid_data.get('total')\n coupon_id = request.valid_data.get('coupon_id')\n total = request.valid_data.get('total')\n end_time = request.valid_data.get('end_time')\n start_time = request.valid_data.get('start_time')\n process = request.valid_data.get('process')\n\n print start_time, end_time\n print start_time<end_time\n assert start_time[:16]<end_time[:16], '开始时间必须前于结束时间'\n if cat==1:\n assert coupon_id, '请选择优惠券'\n coupon = CouponService.get_coupon(coupon_id)\n assert coupon, '优惠券不存在'\n assert coupon['item_id'], '优惠券类型必须为指定商品'\n\n if not item_id:\n trial_id = TrialService.create_trial(\n title, image, cat, total, start_time,\n end_time, rules, process, coupon_id=coupon_id,\n need=need\n )\n else:\n trial = TrialService.get_trial(item_id)\n assert trial, '申请不存在'\n assert total>trial['sent'], '不能低于已发放数'\n TrialService.update_trial(item_id, **request.valid_data)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True)\ndef get_trial_list():\n ''' '''\n limit = 10\n page = int(request.args.get('page') or 1)\n start = (page-1)*limit\n\n _sort = 'end_time'\n _sort_dir = 'DESC'\n has_more, infos = TrialService.get_paged_trials(start=start, limit=limit, _sort=_sort, _sort_dir=_sort_dir)\n\n total = TrialService.count_trial(where=None)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n current_time = dt_obj.now()\n\n result = {\n 'msg' : '',\n 'now' : str(current_time),\n 'code' : ResponseCode.SUCCESS,\n 'infos' : infos,\n 'total' : total,\n 'page_info' : page_info\n }\n\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True)\ndef get_trial():\n ''' '''\n trial_id = request.args.get('item_id')\n trial = TrialService.get_trial(trial_id)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'data' : trial,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\n\n@admin_json_dec()\ndef trial_applyer_list():\n ''' 申请用户列表 '''\n limit = 10\n page = int(request.args.get('page') or 1)\n start = (page-1)*limit\n\n item_id = request.args.get('item_id')\n trial = TrialService.get_trial(item_id)\n\n where = TrialApply.trial_id==item_id\n has_more, infos = TrialService.get_paged_apply_user_list(where=where, start=start)\n user_ids = [i['user_id'] for i in infos]\n apply_count_map = TrialService.count_user_apply(user_ids)\n apply_received_count_map = TrialService.count_user_apply(user_ids, 1)\n for info in infos:\n apply_count = apply_count_map.get(info['user_id'], 0)\n apply_received_count = apply_received_count_map.get(info['user_id'], 0)\n info['apply_count'] = apply_count\n info['apply_received_count'] = apply_received_count\n total = TrialService.count_apply(where)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'item' : trial,\n 'infos' : infos,\n 'total' : total,\n 'page_info' : page_info,\n 'has_more' : has_more,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\n\n@admin_json_dec()\ndef daily_applyer_list():\n ''' 每日领取优惠券用户列表 '''\n limit = 10\n page = int(request.args.get('page') or 1)\n start = (page-1)*limit\n\n item_id = request.args.get('item_id')\n item = TutorialService.get_daily_coupon(item_id)\n\n where = DailyUser.daily_id==item_id\n has_more, infos = TutorialService.get_daily_user_ids(where=where, start=start)\n total = TutorialService.count_daily_users(where)\n\n fetch_user_refs(infos)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'item' : item,\n 'infos' : infos,\n 'total' : total,\n 'page_info' : page_info,\n 'has_more' : has_more,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\nsend_trial_validator = Inputs(\n {\n 'apply_id' : IdField(msg='申请id'),\n 'item_id' : IdField(msg='试用id'),\n }\n )\n@admin_json_dec(validator=send_trial_validator, roles=[0,1,3])\ndef send_trial():\n ''' 赠送申请 '''\n apply_id = request.valid_data.get('apply_id')\n item_id = request.valid_data.get('item_id')\n\n trial = TrialService.get_trial(item_id)\n assert trial, '试用不存在'\n apply = TrialService.get_apply(apply_id)\n assert apply, '申请不存在'\n\n where = and_(\n TrialApply.id==apply_id,\n TrialApply.status==0\n )\n to_status = 1\n count = TrialService.update_apply_status(where, to_status)\n if count:\n TrialService.incr_trial_sent_count(item_id)\n if trial['cat']==1:\n user_coupon_id = CouponService.send_user_coupon(apply['user_id'], trial['coupon_id'])\n TrialService.update_apply(TrialApply.id==apply_id, coupon_id=user_coupon_id)\n msg = '发放成功'\n else:\n apply = TrialService.get_apply(apply_id)\n if apply['status'] in {1,2,3}:\n msg = '已发放给该用户'\n else:\n msg = '发放失败'\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : msg\n }\n return jsonify_response(result)\n\n\n@admin_json_dec()\ndef get_promoter_list():\n ''' 推广管理员列表 '''\n limit = 10\n page = int(request.args.get('page') or 1)\n start = (page-1)*limit\n\n where = Promoter.status==1\n has_more, infos = PromoteService.get_paged_promoters(where=where, start=start, limit=limit)\n promoter_ids = [ i['id'] for i in infos ]\n count = PromoteService.count_promoter_admin_reg(promoter_ids)\n follow_count_map = {i[0]:int(i[1]) for i in count}\n reg_count_map = {i[0]:int(i[2]) for i in count}\n unfollow_count_map = {i[0]:int(i[3] or 0) for i in count}\n for info in infos:\n info['follow_count_total'] = follow_count_map.get(info['id'])\n info['reg_count_total'] = reg_count_map.get(info['id'])\n info['unfollow_count_total'] = unfollow_count_map.get(info['id'])\n if info['id']==1:\n promoter = PromoteService.get_promoter_by_phone('18801794295')\n if promoter:\n info['follow_count_total'] = promoter.follow_count\n info['reg_count_total'] = promoter.reg_count\n total = PromoteService.count_promoters(where)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'infos' : infos,\n 'total' : total,\n 'page_info' : page_info,\n 'has_more' : has_more,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\nadd_promoter_validator = Inputs(\n {\n 'name' : TextField(min_length=1, max_length=100, msg='请输入用户名'),\n 'phone' : MobileField(min_length=1, max_length=100, msg='请输入手机号'),\n 'passwd' : TextField(min_length=1, max_length=100, msg='请输入密码'),\n }\n )\n@admin_json_dec(validator=add_promoter_validator)\ndef add_promoter():\n ''' 添加推广员 '''\n name = request.valid_data.get('name')\n phone = request.valid_data.get('phone')\n passwd = request.valid_data.get('passwd')\n PromoteService.create_promoter(phone, passwd, name)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\n\n@admin_json_dec()\ndef get_hospital_user_list():\n ''' 医院管理员列表 '''\n limit = 10\n page = int(request.args.get('page') or 1)\n start = (page-1)*limit\n\n where = None\n has_more, infos = HospitalService.get_paged_hospital_admin_users(where=where, start=start)\n\n fetch_hospital_refs(infos)\n total = HospitalService.count_admin(where)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'infos' : infos,\n 'total' : total,\n 'page_info' : page_info,\n 'has_more' : has_more,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\nadd_hospital_admin_validator = Inputs(\n {\n 'name' : TextField(min_length=1, max_length=100, msg='请输入用户名'),\n 'hospital_id' : IdField(msg='医院id'),\n 'passwd' : TextField(min_length=1, max_length=100, msg='请输入密码'),\n }\n )\n@admin_json_dec(validator=add_hospital_admin_validator, roles=[0,1])\ndef add_hospital_admin():\n ''' 添加医院管理员 '''\n name = request.valid_data.get('name')\n passwd = request.valid_data.get('passwd')\n hospital_id = request.valid_data.get('hospital_id')\n\n HospitalService.create_user(name, passwd, hospital_id)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\n\nto_supply_validator = Inputs(\n {\n 'apply_id' : IdField(msg='申请id'),\n }\n )\n@admin_json_dec(validator=to_supply_validator, roles=[0,1])\ndef to_supply():\n ''' 补充资料 '''\n apply_id = request.valid_data.get('apply_id')\n\n where = and_(\n CreditApply.id==apply_id,\n )\n data = {\n 'status':APPLY_STATUS.SECOND_STEP\n }\n CreditService.update_apply(where, **data)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\n\n\nsupply_apply_validator = Inputs(\n {\n 'apply_id' : IdField(msg='申请id'),\n 'id_no' : TextField(min_length=0, max_length=100, msg='身份证号'),\n 'school' : TextField(min_length=0, max_length=100, msg='学校'),\n 'enrollment_time' : TextField(min_length=0, max_length=100, msg='入学时间'),\n 'graduate_time' : TextField(min_length=0, max_length=100, msg='毕业时间'),\n 'name' : TextField(min_length=0, max_length=100, msg='真实姓名'),\n 'major' : TextField(min_length=0, max_length=100, msg='专业'),\n 'stu_no' : TextField(min_length=0, max_length=100, msg='学号'),\n 'stu_years' : FloatField(msg='学制请输入浮点数如:4'),\n 'stu_education' : TextField(min_length=0, max_length=100, msg='学历'),\n }\n )\n@admin_json_dec(validator=supply_apply_validator)\ndef supply_apply():\n apply_id = request.valid_data.pop('apply_id')\n try:\n request.valid_data['enrollment_time'] = '{} 00:00:00'.format(request.valid_data['enrollment_time'])\n request.valid_data['graduate_time'] = '{} 00:00:00'.format(request.valid_data['graduate_time'])\n format = '%Y-%m-%d %H:%M:%S'\n request.valid_data['graduate_time'] = date_to_datetime(request.valid_data['graduate_time'], format)\n request.valid_data['enrollment_time'] = date_to_datetime(request.valid_data['enrollment_time'], format)\n except Exception as e:\n assert 0, '入学时间,毕业时间输入有误,请按格式2015-09-01输入'\n assert len(request.valid_data['id_no'])==18, '身份证号码长度有误'\n where = and_(\n CreditApply.id==apply_id,\n )\n request.valid_data['update_time'] = dt_obj.now()\n request.valid_data['has_supply'] = True\n count = CreditService.update_apply(where, **request.valid_data)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '补充成功' if count else '申请不存在'\n }\n return jsonify_response(result)\n\n\nset_hospital_status_validator = Inputs(\n {\n 'item_id' : IdField(msg='医院id'),\n 'status' : IntChoiceField(choices=[0,1], msg='医院状态'),\n }\n )\n@admin_json_dec(required=True, validator=set_hospital_status_validator)\ndef set_hospital_status():\n item_id = request.valid_data.get('item_id')\n status = request.valid_data.get('status')\n print item_id, status\n data = {\n 'status': status\n }\n msg = '上线成功' if status==1 else '下线成功'\n count = ItemService.set_hospital_status(item_id, status)\n assert count, '医院不存在'\n if status==1:\n where = and_(\n Item.hospital_id==item_id,\n Item.status==2\n )\n ItemService.set_hospital_item_status(where, 1)\n else:\n where = and_(\n Item.hospital_id==item_id,\n Item.status==1\n )\n ItemService.set_hospital_item_status(where, 2)\n result = {\n 'msg' : msg,\n 'code' : ResponseCode.SUCCESS\n }\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True)\ndef get_daily_coupon_list():\n ''' 每日领取优惠券列表 '''\n limit = 10\n page = int(request.args.get('page') or 1)\n start = (page-1)*limit\n\n _sort = 'start_time'\n has_more, infos = TutorialService.get_paged_daily_coupons(_sort=_sort, start=start, limit=limit)\n total = TutorialService.count_daily_coupons(None)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n fetch_coupon_refs(infos)\n set_coupon_use_time(infos)\n result = {\n 'msg' : '',\n 'code' : ResponseCode.SUCCESS,\n 'infos' : infos,\n 'total' : total,\n 'page_info' : page_info\n }\n\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True)\ndef get_tutorial_list():\n ''' 美攻略列表 '''\n limit = 10\n page = int(request.args.get('page') or 1)\n start = (page-1)*limit\n\n has_more, infos = TutorialService.get_paged_tutorial_entries(start=start, limit=limit)\n total = TutorialService.count_tutorials(None)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n result = {\n 'msg' : '',\n 'code' : ResponseCode.SUCCESS,\n 'infos' : infos,\n 'total' : total,\n 'page_info' : page_info\n }\n\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True)\ndef get_tutorial():\n ''' '''\n item_id = request.args.get('item_id')\n tutorial = TutorialService.get_tutorial(item_id)\n assert tutorial, '攻略不存在'\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'data' : tutorial\n }\n return jsonify_response(result)\n\n\ndaily_coupon_edit_validator = Inputs(\n {\n 'start_time' : TextField(min_length=1, max_length=100, msg='开始时间'),\n 'end_time' : TextField(min_length=1, max_length=100, msg='结束时间'),\n 'use_condition' : TextField(min_length=0, max_length=100, msg='使用条件'),\n 'total' : IntField(msg='领取总数量'),\n 'title' : Optional(TextField(min_length=0, max_length=100, msg='每日优惠标题')),\n 'coupon_id' : IdField(msg='优惠券id')\n }\n )\n@admin_json_dec(required=True, validator=daily_coupon_edit_validator, roles=[0,1])\ndef daily_coupon_edit(item_id=None):\n start_time = request.valid_data.get('start_time')\n end_time = request.valid_data.get('end_time')\n total = request.valid_data.get('total')\n title = request.valid_data.get('title')\n coupon_id = request.valid_data.get('coupon_id')\n use_condition = request.valid_data.get('use_condition')\n\n assert start_time<end_time, '开始时间不能晚于结束时间'\n if item_id:\n daily = TutorialService.get_daily_coupon(item_id)\n assert daily, '领取不存在'\n assert total>=daily['sent'], '总数不能低于已领取数'\n count = TutorialService.update_daily_coupon(item_id, **request.valid_data)\n else:\n use_time = ''\n TutorialService.create_daily_coupon(title, coupon_id, start_time, end_time, total, use_time, use_condition)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : ''\n }\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True)\ndef get_daily_coupon():\n ''' '''\n item_id = request.args.get('item_id')\n daily = TutorialService.get_daily_coupon(item_id)\n assert daily, '领取不存在'\n\n result = {\n 'data': daily,\n 'code': ResponseCode.SUCCESS,\n 'msg': '' \n }\n return jsonify_response(result)\n\n\nset_tutorial_status_validator = Inputs(\n {\n 'item_id' : IdField(msg='攻略id'),\n 'status' : IntChoiceField(choices=[0,1], msg='攻略状态'),\n }\n )\n@admin_json_dec(required=True, validator=set_tutorial_status_validator)\ndef set_tutorial_status():\n item_id = request.valid_data.get('item_id')\n status = request.valid_data.get('status')\n\n data = {\n 'status': status\n }\n TutorialService.set_tutorial_status(item_id, status)\n msg = '上线成功' if status==1 else '下线成功'\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : msg\n }\n return jsonify_response(result)\n\n\n\nreset_user_vcode_validator = Inputs(\n {\n 'phone': MobileField(msg='请输入手机号')\n }\n )\n@admin_json_dec(validator=reset_user_vcode_validator, roles=[0,4])\ndef reset_user_vcode_sent():\n ''' 重置验证码次数 '''\n from ops.cache import InvalidUserPasswdCache\n from ops.cache import InvalidUserResetVcodeCache\n from ops.cache import InvalidUserSignupVcodeCache\n from ops.cache import SmsCache\n phone = request.valid_data.get('phone')\n InvalidUserPasswdCache.clear_today_counter(phone)\n InvalidUserResetVcodeCache.clear_today_counter(phone)\n InvalidUserSignupVcodeCache.clear_today_counter(phone)\n SmsCache.clear_sent_count(phone)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '重置成功'\n }\n return jsonify_response(result)\n\n\nget_user_vcode_validator = Inputs(\n {\n 'phone': MobileField(msg='请输入手机号'),\n 'cat' : IntChoiceField(choices=[1,2], msg='请选择类型')\n }\n )\n@admin_json_dec(validator=get_user_vcode_validator, roles=[0,4])\ndef get_user_vcode():\n ''' 获取用户验证码 '''\n from ops.cache import SmsCache\n phone = request.valid_data.get('phone')\n cat = request.valid_data.get('cat')\n user = UserService.get_user_by_phone(phone)\n if cat==1:\n assert not user, '用户已存在'\n else:\n assert user, '用户不存在'\n vcode = SmsCache.get_vcode(phone)\n sent = SmsCache.get_sent_count(phone)\n assert vcode, '验证码不存在, 请获取验证码'\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'vcode' : vcode,\n 'count' : sent\n }\n return jsonify_response(result)\n\n\ntutorial_edit_validator = Inputs(\n {\n 'title' : TextField(min_length=1, max_length=100, msg='攻略标题'),\n 'image' : TextField(min_length=1, max_length=100, msg='攻略首页推荐图'),\n 'icon' : TextField(min_length=1, max_length=100, msg='攻略列表icon'),\n 'photo' : TextField(min_length=1, max_length=100, msg='攻略详情大图'),\n 'items' : REGField(pattern='^(\\d+,?)+$', msg='请输入项目id,逗号隔开')\n }\n )\n@admin_json_dec(required=True, validator=tutorial_edit_validator, roles=[0,1])\ndef tutorial_edit(item_id=None):\n title = request.valid_data.get('title')\n image = request.valid_data.get('image')\n photo = request.valid_data.get('photo')\n items = request.valid_data.get('items')\n icon = request.valid_data.get('icon')\n for the_item_id in items.split(','):\n item = ItemService.get_item_dict_by_id(the_item_id)\n assert item, 'ID为{}的项目不存在'.format(the_item_id)\n if item_id:\n TutorialService.update_tutorial_entry(item_id, **request.valid_data)\n else:\n item_id = TutorialService.create_tutorial_entry(title, icon, image, photo, items)\n return jsonify_response({'item_id': item_id})\n\n\n\nsend_user_coupon_validator = Inputs(\n {\n 'phone' : MobileField(min_length=1, max_length=100, msg='请输入用户手机号码'),\n 'coupon_id' : IdField(msg='请选择优惠券')\n }\n )\n@admin_json_dec(required=True, validator=send_user_coupon_validator, roles=[0,1,4])\ndef send_user_coupon():\n phone = request.valid_data.get('phone')\n coupon_id = request.valid_data.get('coupon_id')\n\n user = UserService.get_user_by_phone(phone)\n assert user, '用户不存在'\n coupon = CouponService.get_coupon(coupon_id)\n assert coupon, '优惠券不存在'\n CouponService.send_user_coupon(user.id, coupon_id)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '发放成功'\n }\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True, validator=None, roles=[0,1,4])\ndef set_cats_order():\n data = json.loads(request.data)\n for index, i in enumerate(data):\n ItemService.set_item_cat_order(i, index)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '排序成功'\n }\n return jsonify_response(result)\n\n\nset_city_validator = Inputs(\n {\n 'city_id' : Optional(IdField(msg='请选择城市'))\n }\n )\n@admin_json_dec(required=True, validator=None)\ndef set_city():\n city_id = json.loads(request.data).get('city_id')\n print city_id, 'city_id'\n where = AdminUser.name==request.name\n AdminService.update(where, city_id=city_id)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '设置成功'\n }\n response = jsonify_response(result)\n set_cookie(response, 'city_id', str(city_id or ''), 86400*300)\n\n return response\n\n\n@admin_json_dec(required=True)\ndef get_question_list():\n limit = 100\n page = int(request.args.get('page') or 1)\n start = (page-1)*limit\n\n has_more, infos = RedpackService.get_paged_redpack_questions(limit=limit, start=start)\n total = RedpackService.count_redpack_question()\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n\n result = {\n 'infos' : infos,\n 'page_info' : page_info,\n 'total' : total,\n }\n return jsonify_response(result)\n\n\n\nnew_question_validator = Inputs(\n {\n 'content' : TextField(msg='问题内容')\n }\n )\n@admin_json_dec(required=True, validator=new_question_validator)\ndef new_question():\n content = request.valid_data.get('content')\n\n question_id = RedpackService.create_question(content)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'question_id' : question_id,\n 'msg' : '创建成功',\n }\n return jsonify_response(result)\n\n\n\n@admin_json_dec(required=True)\ndef get_user_question_list():\n limit = 10\n page = int(request.args.get('page') or 1)\n _sort = request.args.get('_sort') or 'view_count'\n is_random = request.args.get('is_random')\n start = (page-1)*limit\n where = None\n if is_random:\n where = RedpackUserQuestion.is_random==is_random \n has_more, infos = RedpackService.get_paged_user_question(where=where, limit=limit, _sort=_sort, start=start)\n total = RedpackService.count_redpack_user_question(where)\n question_ids = [i['id'] for i in infos]\n\n total_money = RedpackService.total_money()\n total_redpack = RedpackService.count_redpack()\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n question_infos = filter(lambda i:i['question_id'], infos)\n fetch_question_refs(question_infos, dest_key='the_question')\n fetch_qrcodeuser_refs(infos)\n result = {\n 'infos' : infos,\n 'page_info' : page_info,\n 'total' : total,\n 'total_money' : total_money,\n 'total_redpack' : total_redpack\n }\n return jsonify_response(result)\n\n\n@admin_json_dec(required=True)\ndef get_room_list():\n ''' 寝室列表 '''\n limit = 10\n page = int(request.args.get('page', 1))\n start = (page-1)*limit\n total = RoomDesignService.count_rooms()\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n _sort = request.args.get('_sort') or 'vote_count'\n room_ids = RoomDesignVoteCounter.get_paged_rank_room_ids(start, start+limit-1)\n where = RoomDesignDetail.id.in_(room_ids)\n sort_map = {i:index for index, i in enumerate(room_ids)}\n if _sort=='vote_count':\n has_more, item_list = RoomDesignService.get_paged_rooms(where=where)\n item_list.sort(key=lambda i:(sort_map[i['id']]))\n else:\n where = None\n has_more, item_list = RoomDesignService.get_paged_rooms(where=where, _sort=_sort, start=start, limit=limit)\n for item in item_list:\n item['rank'] = RoomDesignVoteCounter.rank(item['id'])\n\n fetch_user_refs(item_list)\n fetch_school_refs(item_list)\n result = {\n 'infos' : item_list,\n 'page_info' : page_info,\n 'total' : total,\n }\n\n return jsonify_response(result)\n\n\n@admin_json_dec()\ndef get_room_detail():\n ''' 寝室详情 '''\n room_id = request.args.get('room_id')\n room = RoomDesignService.get_room_dict_by_id(room_id)\n fetch_user_refs((room,))\n pics = []\n for index, i in enumerate(room['pic_list']):\n if not i:\n pics.append('http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a4u9raim1rpcckf59vq7q1gth1LG11IG]7F5G5%7D861P1IUW[T.jpg'+'?'+str(index))\n else:\n pics.append(i)\n\n room['pic_list'] = pics\n room['rank'] = RoomDesignVoteCounter.rank(room_id)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'room' : room\n }\n return jsonify_response(result)\n\n\n\nremark_order_validator = Inputs(\n {\n 'remark' : TextField(msg='备注内容'),\n 'order_id' : IdField(msg='订单id'),\n }\n )\n@admin_json_dec(validator=remark_order_validator)\ndef remark_order():\n remark = request.valid_data.get('remark')\n order_id = request.valid_data.get('order_id')\n where = Order.id==order_id\n count = OrderService.update_order(where, commit=True, remark=remark)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '备注成功'\n }\n return jsonify_response(result)\n\n\nremark_advice_validator = Inputs(\n {\n 'remark' : TextField(msg='备注内容'),\n 'advice_id' : IdField(msg='反馈id'),\n }\n )\n@admin_json_dec(validator=remark_advice_validator)\ndef remark_useradvice():\n remark = request.valid_data.get('remark')\n advice_id = request.valid_data.get('advice_id')\n count = AdminService.remark_useradvice(advice_id, remark=remark)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '备注成功'\n }\n return jsonify_response(result)\n\n\n\n\n\n\n\nremark_apply_validator = Inputs(\n {\n 'remark' : TextField(msg='备注内容'),\n 'remark_img' : TextField(msg='备注图片'),\n 'apply_id' : IdField(msg='申请id'),\n }\n )\n@admin_json_dec(validator=remark_apply_validator)\ndef remark_apply():\n remark = request.valid_data.get('remark')\n remark_img = request.valid_data.get('remark_img')\n apply_id = request.valid_data.get('apply_id')\n where = CreditApply.id==apply_id\n count = CreditService.update_apply(where, remark=remark, remark_img=remark_img)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '备注成功'\n }\n return jsonify_response(result)\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6191117167472839, "alphanum_fraction": 0.6796770095825195, "avg_line_length": 25.535715103149414, "blob_id": "d4abc52757d7bdc3299e81d3f27226384dc64765", "content_id": "b03c8425038b055d0b7f493cb23ce0b6990bcc85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 743, "license_type": "no_license", "max_line_length": 90, "num_lines": 28, "path": "/migrations/versions/42d4367e28b2_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 42d4367e28b2\nRevises: 569e3d7f70ab\nCreate Date: 2015-12-10 10:58:36.611750\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '42d4367e28b2'\ndown_revision = '569e3d7f70ab'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('coupon', sa.Column('title', sa.String(length=300), nullable=True))\n op.add_column('user_coupon', sa.Column('title', sa.String(length=300), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user_coupon', 'title')\n op.drop_column('coupon', 'title')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5788283348083496, "alphanum_fraction": 0.581768810749054, "avg_line_length": 28.64429473876953, "blob_id": "a1fc909e2074b5433ecaea3daafef9f792a7b5a1", "content_id": "c1c9dd546a9d06b430d2ad1f8ba2bf1d754712c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4443, "license_type": "no_license", "max_line_length": 77, "num_lines": 149, "path": "/ops/data.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport requests\n\nfrom util.sqlerr import SQL_DUPLICATE_NAME\n\nfrom models import db\nfrom models import School\nfrom models import City\nfrom models import HelpCat\nfrom models import HelpEntry\nfrom models import ImageSize\nfrom ops.utils import get_items\nfrom ops.utils import get_page\nfrom ops.utils import count_items\nfrom util.utils import prefix_img_domain\nfrom settings import celery\n\n\nclass DataService(object):\n\n @staticmethod\n def create_school(name, link, city_name):\n try:\n school = School(name=name, link=link, city_name=city_name)\n db.session.add(school)\n db.session.commit()\n return school.id\n except Exception as e:\n db.session.rollback()\n import traceback\n traceback.print_exc()\n\n @staticmethod\n def get_paged_schools(**kw):\n return get_page(School, {}, **kw)\n\n @staticmethod\n def get_schools():\n return School.query.all()\n\n @staticmethod\n def get_paged_cities(**kw):\n return get_page(City, {}, **kw)\n\n @staticmethod\n def create_city(name, city_code, amap_code):\n city = City(name=name, amap_code=amap_code, city_code=city_code)\n db.session.add(city)\n db.session.commit()\n return city.id\n\n @staticmethod\n def update_city(city_id, **kw):\n count = City.query.filter(City.id==city_id).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def get_city_by_baidu_city_code(city_code):\n return City.query.filter(City.city_code==city_code).first()\n\n @staticmethod\n def count_schools(where=None):\n return count_items(School, where)\n\n @staticmethod\n def get_school_city_names():\n rows = db.session.query(School.city_name).distinct().all()\n return [i.city_name for i in rows]\n\n @staticmethod\n def create_help_cat(id, name):\n try:\n cat = HelpCat(id=id, name=name)\n db.session.add(cat)\n db.session.commit()\n return cat.id\n except Exception as e:\n db.session.rollback()\n if SQL_DUPLICATE_NAME.search(str(e)):\n print 'duplicate entry name'\n\n @staticmethod\n def create_help_entry(cat_id, title, content):\n entry = HelpEntry(cat_id=cat_id, title=title, content=content)\n db.session.add(entry)\n db.session.commit()\n\n @staticmethod\n def get_paged_helpcats(**kw):\n _sort_dir = 'ASC'\n return get_page(HelpCat, {}, limit=1000, _sort_dir=_sort_dir, **kw)\n\n @staticmethod\n def get_paged_helpentries(**kw):\n _sort_dir = 'ASC'\n return get_page(HelpEntry, {}, limit=1000, _sort_dir=_sort_dir, **kw)\n\n @staticmethod\n def get_helpentry_by_id(entry_id):\n entry = HelpEntry.query.filter(HelpEntry.id==entry_id).first()\n if entry: return entry.as_dict()\n\n @staticmethod\n def get_paged_city_list(**kw):\n return get_page(City, {}, **kw)\n\n @staticmethod\n def get_city_dict_by_id(city_id):\n city = City.query.filter(City.id==city_id).first()\n if city: return city.as_dict()\n\n @staticmethod\n @celery.task\n def set_img_size(key):\n ''' 设置图片宽高 '''\n full_url = prefix_img_domain(key)\n print full_url\n result = requests.get('{}?imageInfo'.format(full_url))\n if result.status_code!=200:\n assert 0, '图片不存在'\n data = result.json()\n width = data['width']\n height = data['height']\n try:\n img = ImageSize(key=key, width=width, height=height)\n db.session.add(img)\n db.session.commit()\n except Exception as e:\n import traceback\n traceback.print_exc()\n db.session.rollback()\n\n @staticmethod\n def get_imgs_size_by_keys(keys):\n if isinstance(keys, (tuple, list)):\n query = ImageSize.key.in_(keys)\n else:\n query = ImageSize.key==keys\n sizes = ImageSize.query.filter(query).all()\n return [ i.as_dict() for i in sizes]\n\n @staticmethod\n def get_schools_dict_by_ids(school_ids):\n ''' '''\n where = School.id.in_(school_ids)\n results = School.query.filter(where).all()\n return [i.as_dict() for i in results]\n\n\n\n\n" }, { "alpha_fraction": 0.6113902926445007, "alphanum_fraction": 0.6750418543815613, "avg_line_length": 21.961538314819336, "blob_id": "a3482c0459c8d86189339ab43b409011bd30f124", "content_id": "71731862dd3b5294f7f75ffc9fff53a5897e07f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 597, "license_type": "no_license", "max_line_length": 85, "num_lines": 26, "path": "/migrations/versions/4c9fdc97c246_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4c9fdc97c246\nRevises: 51187e1e4dbc\nCreate Date: 2015-11-16 11:05:15.297821\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4c9fdc97c246'\ndown_revision = '51187e1e4dbc'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('avatar', sa.String(length=1000), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'avatar')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6270358562469482, "alphanum_fraction": 0.6775244474411011, "avg_line_length": 22.615385055541992, "blob_id": "92b2a5442cb0cc153e938a08b0b267311b09323b", "content_id": "458c033e150ebc170e808a4ea378d6833dff84a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 614, "license_type": "no_license", "max_line_length": 92, "num_lines": 26, "path": "/migrations/versions/c855246d7e8_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: c855246d7e8\nRevises: 1bef4d9bd99b\nCreate Date: 2015-11-03 10:38:03.675764\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'c855246d7e8'\ndown_revision = '1bef4d9bd99b'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('credit_apply', sa.Column('reason', sa.String(length=500), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('credit_apply', 'reason')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6082148551940918, "alphanum_fraction": 0.6935229301452637, "avg_line_length": 23.346153259277344, "blob_id": "a0d13a8ba23b35bf3193d21ec3c03a3ea04691e4", "content_id": "66bc7521f231a41b459e73a7f06b87e39ec16c7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 633, "license_type": "no_license", "max_line_length": 95, "num_lines": 26, "path": "/migrations/versions/2ab4005efb6c_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 2ab4005efb6c\nRevises: 2a01c5929823\nCreate Date: 2016-01-27 16:01:27.623336\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2ab4005efb6c'\ndown_revision = '2a01c5929823'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('room_design_detail', sa.Column('addr', sa.String(length=30), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('room_design_detail', 'addr')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6176470518112183, "alphanum_fraction": 0.6742081642150879, "avg_line_length": 28.46666717529297, "blob_id": "3478ffc1cda2970246aaa3956e1cdfcb2cd97670", "content_id": "80198954d9a0e7536ddce9a11f399e50dad78c51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 884, "license_type": "no_license", "max_line_length": 85, "num_lines": 30, "path": "/migrations/versions/5784ac6510c3_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 5784ac6510c3\nRevises: 345ee23bca8\nCreate Date: 2015-12-09 18:03:16.566805\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '5784ac6510c3'\ndown_revision = '345ee23bca8'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('trial_apply', sa.Column('cat', sa.Integer(), nullable=True))\n op.add_column('trial_apply', sa.Column('coupon_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'trial_apply', 'coupon', ['coupon_id'], ['id'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'trial_apply', type_='foreignkey')\n op.drop_column('trial_apply', 'coupon_id')\n op.drop_column('trial_apply', 'cat')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5618622303009033, "alphanum_fraction": 0.5645408034324646, "avg_line_length": 31.774059295654297, "blob_id": "d6351c7f72b4b64f7aa4e1f6a38ea1a7b8c31237", "content_id": "837ef578d3f26fb33b2989db164329ba19c335db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7968, "license_type": "no_license", "max_line_length": 131, "num_lines": 239, "path": "/ops/credit.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom decimal import Decimal\nfrom sqlalchemy import and_\n\nfrom models import db\nfrom models import CreditUseLog\nfrom models import Order\nfrom models import UserCredit\nfrom models import PeriodPayLog\nfrom models import PeriodPayChoice\nfrom models import CreditApply\nfrom util.sqlerr import SQL_DUPLICATE\nfrom util.utils import get_due_time\nfrom util.utils import dt_obj\nfrom ops.utils import count_items\nfrom ops.utils import get_page\nfrom settings import DEFAULT_CREDIT\nfrom constants import CREDIT_STATUS\n\n\nclass CreditService(object):\n ''' '''\n\n @staticmethod\n def create_default_credit(user_id, total=DEFAULT_CREDIT, status=0):\n ''' 创建额度 '''\n try:\n credit = UserCredit(user_id=user_id, total=total, status=status)\n db.session.add(credit)\n db.session.commit()\n return True\n except Exception as e:\n db.session.rollback()\n if SQL_DUPLICATE.search(str(e)):\n return False\n else:\n raise(e)\n\n @staticmethod\n def get_user_credit(user_id):\n credit = UserCredit.query.filter(UserCredit.user_id==user_id).first()\n return credit\n\n @staticmethod\n def set_user_credit_total(user_id, total):\n ''' 审核通过设置用户额度 '''\n credit = CreditService.get_user_credit(user_id)\n if not credit:\n CreditService.create_default_credit(user_id, total, status=1)\n else:\n UserCredit.query.filter(UserCredit.user_id==user_id).update({'total':total, 'status':CREDIT_STATUS.VERIFIED})\n db.session.commit()\n\n @staticmethod\n def update_user_credit_status(user_id, status):\n count = UserCredit.query.filter(UserCredit.user_id==user_id).update({'status':status})\n db.session.commit()\n return count\n\n @staticmethod\n def init_credit(user_id):\n credit = CreditService.get_user_credit(user_id)\n if not credit:\n CreditService.create_default_credit(user_id)\n credit = CreditService.get_user_credit(user_id)\n return credit\n\n @staticmethod\n def modify_credit(user_id, amount):\n '''\n 变更信用额度\n 1变更成功\n 2变更成功 但额度是虚假的 未通过审核\n 0额度不足\n '''\n amount = Decimal(str(amount))\n verified_query = and_(\n UserCredit.user_id==user_id,\n UserCredit.status==2,\n UserCredit.used+amount<=UserCredit.total,\n UserCredit.used+amount>=0,\n )\n unverified_query = and_(\n UserCredit.user_id==user_id,\n UserCredit.status==1,\n UserCredit.used+amount<=UserCredit.total,\n UserCredit.used+amount>=0,\n )\n update_data = {\n 'used':UserCredit.used+amount\n }\n count = UserCredit.query.filter(verified_query).update(update_data)\n if count:\n log = CreditUseLog(user_id=user_id, amount=amount, status=1)\n db.session.add(log)\n db.session.commit()\n return 1\n count = UserCredit.query.filter(unverified_query).update(update_data)\n if count:\n log = CreditUseLog(user_id=user_id, amount=amount, status=2)\n db.session.add(log)\n db.session.commit()\n return 2\n\n db.session.commit()\n return 0\n \n @staticmethod\n def get_period_choices():\n return PeriodPayChoice.query.all()\n\n @staticmethod\n def get_period_choice(choice_id):\n choice = PeriodPayChoice.query.filter(PeriodPayChoice.id==choice_id).first()\n return choice\n\n @staticmethod\n def gen_order_period_logs(order_id):\n order = Order.query.filter(Order.id==order_id).first()\n assert order, '订单不存在'\n choice = PeriodPayChoice.query.filter(PeriodPayChoice.id==order.credit_choice_id).first()\n total_amount= order.credit_amount - order.total_fee\n period_amount = total_amount/choice.period_count\n period_fee = float(period_amount)*choice.period_fee\n for i in range(1, 1+choice.period_count):\n due_time = get_due_time(i)\n log = PeriodPayLog(\n order_id = order_id,\n period_count = choice.period_count, \n user_id = order.user_id,\n period_pay_index = i,\n amount = period_amount,\n fee = period_fee,\n deadline = due_time\n )\n db.session.add(log)\n db.session.commit()\n\n @staticmethod\n def get_period_pay_logs(user_id, where=None):\n query = and_()\n query.append(PeriodPayLog.user_id==user_id)\n if where is not None: query.append(where)\n logs = PeriodPayLog.query.filter(query).all()\n return logs\n\n @staticmethod\n def get_paged_pay_logs(**kw):\n return get_page(PeriodPayLog, {}, **kw)\n\n @staticmethod\n def add_apply(user_id, **kw):\n ''' 提交第一步申请资料 '''\n try:\n kw['create_time'] = dt_obj.now()\n apply = CreditApply(user_id=user_id, **kw)\n db.session.add(apply)\n db.session.commit()\n return apply.id\n except Exception as e:\n db.session.rollback()\n if SQL_DUPLICATE.search(str(e)):\n pass\n else:\n raise(e)\n\n @staticmethod\n def update_apply(where, **kw):\n ''' 更新申请 '''\n kw.setdefault('update_time', dt_obj.now())\n count = CreditApply.query.filter(where).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def create_period_choice(**kw):\n try:\n choice = PeriodPayChoice(**kw)\n db.session.add(choice)\n db.session.commit()\n return choice.id\n except Exception as e:\n db.session.rollback()\n\n @staticmethod\n def get_paged_apply_list(**kw):\n return get_page(CreditApply, {}, **kw)\n\n @staticmethod\n def count_apply(where=None):\n return count_items(CreditApply, where=where)\n\n @staticmethod\n def get_apply_dict_by_id(apply_id):\n apply = CreditApply.query.filter(CreditApply.id==apply_id).first()\n if apply: return apply.as_dict()\n\n @staticmethod\n def get_apply_dict_by_userid(user_id):\n apply = CreditApply.query.filter(CreditApply.user_id==user_id).first()\n if apply: return apply.as_dict()\n\n @staticmethod\n def update_pay_log(log_ids):\n ''' 还款 '''\n query = and_(\n PeriodPayLog.id.in_(log_ids),\n PeriodPayLog.status==0\n )\n repayment_time = dt_obj.now()\n count = PeriodPayLog.query.filter(query).update({'status':1, 'repayment_time':repayment_time}, synchronize_session=False)\n db.session.commit()\n if count==len(log_ids):\n return True\n else:\n db.session.rollback()\n return False\n\n @staticmethod\n def cancel_pay_logs(order_id):\n query = and_(\n PeriodPayLog.order_id==order_id,\n PeriodPayLog.status==0\n )\n count = PeriodPayLog.query.filter(query).update({'status':2})\n db.session.commit()\n return count\n\n @staticmethod\n def get_paged_period_choices(**kw):\n return get_page(PeriodPayChoice, {}, **kw)\n\n @staticmethod\n def get_paged_period_pay_logs(**kw):\n return get_page(PeriodPayLog, {}, **kw)\n\n @staticmethod\n def count_logs(where=None):\n return count_items(PeriodPayLog, where=where)\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6641631126403809, "alphanum_fraction": 0.6917024254798889, "avg_line_length": 48.92856979370117, "blob_id": "1a6c885d29570dba39940dcb4a4904839e348631", "content_id": "c99676d308985f18ca4e6c6a114c8051c416c1f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2796, "license_type": "no_license", "max_line_length": 130, "num_lines": 56, "path": "/migrations/versions/3ae16db9c83e_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3ae16db9c83e\nRevises: 3d0882a6044\nCreate Date: 2016-01-27 15:03:50.881072\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3ae16db9c83e'\ndown_revision = '3d0882a6044'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ''' '''\n #foreign key constriant del order @xianpeng\n op.drop_constraint(u'room_design_detail_ibfk_1', 'room_design_detail', type_='foreignkey')\n op.drop_constraint(u'room_design_vote_log_ibfk_1', 'room_design_vote_log', type_='foreignkey')\n op.drop_column('room_design_detail', 'room_id')\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('room_design_apply')\n op.add_column('room_design_detail', sa.Column('applyer_name', sa.String(length=30), nullable=True))\n op.add_column('room_design_detail', sa.Column('room_name', sa.String(length=30), nullable=True))\n op.create_unique_constraint(None, 'room_design_detail', ['room_name'])\n op.create_foreign_key(None, 'room_design_vote_log', 'room_design_detail', ['room_id'], ['id'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'room_design_vote_log', type_='foreignkey')\n op.create_foreign_key(u'room_design_vote_log_ibfk_1', 'room_design_vote_log', 'room_design_apply', ['room_id'], ['id'])\n op.add_column('room_design_detail', sa.Column('room_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))\n op.create_foreign_key(u'room_design_detail_ibfk_1', 'room_design_detail', 'room_design_apply', ['room_id'], ['id'])\n op.drop_constraint(None, 'room_design_detail', type_='unique')\n op.drop_column('room_design_detail', 'room_name')\n op.drop_column('room_design_detail', 'applyer_name')\n op.create_table('room_design_apply',\n sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),\n sa.Column('school_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('user_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('room_name', mysql.VARCHAR(length=30), nullable=True),\n sa.Column('applyer_name', mysql.VARCHAR(length=30), nullable=True),\n sa.Column('phone', mysql.VARCHAR(length=30), nullable=True),\n sa.Column('addr', mysql.VARCHAR(length=30), nullable=True),\n sa.Column('create_time', mysql.DATETIME(), nullable=True),\n sa.ForeignKeyConstraint(['school_id'], [u'school.id'], name=u'room_design_apply_ibfk_1'),\n sa.ForeignKeyConstraint(['user_id'], [u'user.id'], name=u'room_design_apply_ibfk_2'),\n sa.PrimaryKeyConstraint('id'),\n mysql_default_charset=u'utf8',\n mysql_engine=u'InnoDB'\n )\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.4229559600353241, "alphanum_fraction": 0.46069180965423584, "avg_line_length": 26.69565200805664, "blob_id": "2f252bee96d29bf9ed68fd8c17cf74d6a2233cb0", "content_id": "6f5fdb9fca8e026c8280deab2a4bec56bada108d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 636, "license_type": "permissive", "max_line_length": 65, "num_lines": 23, "path": "/static/mobile/js/base.js", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "$(document).ready(function() {\n \n var oHtml = document.documentElement;\n getFont();\n \n window.onresize = function(){\n getFont();\n \n\n } \n function getFont(){\n var screenWidth = oHtml.clientWidth;\n /*if (screenWidth<=320) {\n oHtml.style.fontSize ='17.0667px';\n }else if (screenWidth>=750) {\n oHtml.style.fontSize ='40px';\n }else{\n oHtml.style.fontSize = screenWidth/(750/40)+'px';\n }*/\n oHtml.style.fontSize = screenWidth/(750/40)+'px';\n \n }\n});" }, { "alpha_fraction": 0.6523456573486328, "alphanum_fraction": 0.6528394818305969, "avg_line_length": 37.94230651855469, "blob_id": "c5340b2fb2892b99ac6e3dcaeeef4181082d7e8a", "content_id": "7dd88f9689210dfe187305670ec0f84139ef7c46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2179, "license_type": "no_license", "max_line_length": 94, "num_lines": 52, "path": "/ops/common.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import and_\nfrom models import db\nfrom models import Order\nfrom ops.user import UserService\nfrom ops.order import OrderService\nfrom ops.credit import CreditService\nfrom ops.activity import ActivityService\nfrom ops.room_design import RoomDesignService\nfrom ops.item import ItemService\nfrom constants import ORDER_STATUS\nfrom thirdparty.sms import send_sms_new_order\n\n\n\ndef pay_success_action(order, send_verified=False, need_pay=True, **kw):\n ''' 支付成功 处理函数\n send_verified #需要额外微信或支付宝付钱并且额度待审核订单 审核前已支付成功现金部分 没发送短信就return了 审核通过后不管count多少 需要发送短信 \n '''\n new_status = ORDER_STATUS.PAY_SUCCESS\n where = and_(\n Order.id==order.id,\n Order.status.in_([ORDER_STATUS.TO_PAY, ORDER_STATUS.NEW_ORDER])\n )\n kw['status'] = new_status\n count = OrderService.update_order(where, **kw)\n if not order.credit_verified: return\n user = UserService.get_user_by_id(order.user_id)\n phone = user.phone\n hospital = ItemService.get_hospital_dict_by_id(order.hospital_id)\n hospital_name = hospital['name']\n hospital_addr = hospital['addr']\n hospital_phone = hospital['phone']\n item = ItemService.get_item_dict_by_id(order.item_id)\n item_name = item['title']\n desc = '{},{},{}'.format(hospital_name, hospital_addr, hospital_phone)\n print 'desc', desc\n if count or send_verified or not need_pay:\n service_code= OrderService.create_servicecode(order.id)\n if order.credit_amount:\n CreditService.gen_order_period_logs(order.id)\n #给用户发送通知,确认购买成功\n send_sms_new_order.delay(phone, item_name, desc, service_code)\n\n\ndef get_item_activity_price(item):\n activity = ActivityService.get_current_activity()\n if activity:\n activity_item = ItemService.get_item_activity(item['id'], activity['id'])\n if activity_item:\n item['price'] = activity_item['price']\n" }, { "alpha_fraction": 0.43155309557914734, "alphanum_fraction": 0.45518723130226135, "avg_line_length": 23.066667556762695, "blob_id": "836938eada9aaadf6582e0c416e553df14594f09", "content_id": "0ceebe8daa8698380503527272d693a6dcffe543", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3752, "license_type": "no_license", "max_line_length": 126, "num_lines": 135, "path": "/constants.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nimport time\n\n\nclass ORDER_STATUS:\n NEW_ORDER = 0\n TO_PAY = 1\n PAY_SUCCESS = 2\n PAY_ERROR = 3\n BOOKED = 4 #已预约 不存在这个状态 须读表service_code\n FINISH = 5\n TO_COMMENT = 6 #待评论 表中status不存这个状态,根据另一个字段来判断\n CANCELED = 7\n VERIFYING = 8\n CONFIRMED = 9 #服务码已确认\n CANCEL_BEFORE_PAY = 10 #支付前取消\n REJECTED = 11 #额度申请被拒绝\n PAY_TIMEOUT = 12 #支付超时关闭\n\n\nclass REPAYMENT_STATUS:\n NEW = 0 #准备还款\n TO_PAY = 1 #还款中\n PAY_SUCCESS = 2 #还款成功\n PAY_ERROR = 3\n\n\n\nclass REDPACK_PAY_STATUS:\n NEW = 0 #准备还款\n TO_PAY = 1 #还款中\n PAY_SUCCESS = 2 #还款成功\n PAY_ERROR = 3\n\n\n\nclass PAY_METHOD:\n WECHAT_WEB = 1 #公众号\n WECHAT_APP = 2 #微信app\n ALIPAY = 3 #支付宝\n\n\nORDER_STATUS_LABEL = {\n ORDER_STATUS.NEW_ORDER : '待支付',\n ORDER_STATUS.TO_PAY : '待支付',\n ORDER_STATUS.PAY_SUCCESS : '待服务',\n ORDER_STATUS.BOOKED : '已预约',\n ORDER_STATUS.PAY_ERROR : '支付异常',\n ORDER_STATUS.TO_COMMENT : '待评价',\n ORDER_STATUS.FINISH : '服务完成',\n ORDER_STATUS.CANCELED : '已取消',\n ORDER_STATUS.VERIFYING : '额度审核中',\n ORDER_STATUS.CONFIRMED : '服务码已确认',\n ORDER_STATUS.CANCEL_BEFORE_PAY : '支付前取消',\n ORDER_STATUS.REJECTED : '额度申请被拒绝',\n ORDER_STATUS.PAY_TIMEOUT : '支付超时关闭',\n }\n\nVOTE_COUNT_SOURCE_MAP = {\n 1: 20,\n 2: 200,\n 3: 1\n }\n\n\nBODY_LABEL = {\n 1 : '眼部',\n 2 : '鼻部',\n 3 : '祛痣',\n 4 : '美白',\n 5 : '脱毛',\n 6 : '牙齿',\n 7 : '嘴唇',\n 8 : '脸型',\n 9 : '胸部',\n 10 : '其他',\n 11 : '整体都满意',\n }\n\n\nclass ORDER_ADMIN_STATUS:\n TO_PAY = 1\n TO_SERVE = 2\n FINISH = 3\n CANCELD = 4\n TO_REFUND = 5\n CREDIT_VERIFY = 6\n REFUNDED = 7\n\nORDER_ADMIN_STATUS_MAP = {\n ORDER_ADMIN_STATUS.TO_PAY : '支付中',\n ORDER_ADMIN_STATUS.FINISH : '已完成',\n ORDER_ADMIN_STATUS.TO_REFUND : '待退款',\n ORDER_ADMIN_STATUS.CANCELD : '已取消',\n ORDER_ADMIN_STATUS.CREDIT_VERIFY : '额度待审核',\n ORDER_ADMIN_STATUS.TO_SERVE : '待服务',\n ORDER_ADMIN_STATUS.REFUNDED : '已退款'\n }\n\n\nADMIN_ORDER_STATUS_CHOICES = sorted([ {\"id\":k, \"title\":v} for k,v in ORDER_ADMIN_STATUS_MAP.items()], key=lambda i:i['id'])\n\n\nclass CREDIT_STATUS:\n ''' 额度状态 '''\n DEFAULT = 0\n VERIFYING = 1\n VERIFIED = 2\n REJECTED = 3\n\n\nclass APPLY_STATUS:\n ''' 额度申请状态 '''\n FIRST_STEP = 1 #第一步\n SECOND_STEP = 2 #第二步\n VERIFIED = 3 #通过审核\n REJECTED = 4 #被拒绝\n\n\nclass SERVICE_STATUS:\n ''' 服务码状态 '''\n STANDBY = 0 #待服务\n BOOKED = 1 #已预约\n VERIFYED = 2 #已验证\n\n\nclass ResponseCode:\n ''' 请求返回码 '''\n\n SUCCESS = 0\n NEED_LOGIN = 1\n INVALID_VCODE = 2 #验证码错误\n INVALID_USERNAME_OR_PASSWD = 3 #用户名或密码错误\n SERVER_ERROR = 10000\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.633431077003479, "alphanum_fraction": 0.7023460268974304, "avg_line_length": 25.230770111083984, "blob_id": "0e85d0cc3666eabe1acbf5782d7250dd438815d3", "content_id": "946e53fe786da15af1f9f1352789fbc6bd3d42e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 682, "license_type": "no_license", "max_line_length": 110, "num_lines": 26, "path": "/migrations/versions/29bbb2cfc971_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 29bbb2cfc971\nRevises: 29347d4f2522\nCreate Date: 2016-01-28 11:21:45.024170\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '29bbb2cfc971'\ndown_revision = '29347d4f2522'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('room_design_vote_log', sa.Column('source', mysql.TINYINT(display_width=1), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('room_design_vote_log', 'source')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5446872115135193, "alphanum_fraction": 0.5541211366653442, "avg_line_length": 22.68235206604004, "blob_id": "cd99a6dcb974f291d65d8d6dfca907598e59016a", "content_id": "755ad8e5f97e687f915e7610a1689359c5d5d46f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2166, "license_type": "permissive", "max_line_length": 71, "num_lines": 85, "path": "/static/user/js/new_applyer-infor.js", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\n// 同意并接受\nvar oAgree=document.getElementById('agree');\nvar onOff=true;\n$('#agree').click(function(){\n if(onOff){\n $('#agree').html('&#xe61a;');\n $('#agree').css('color','#FF4200')\n $('#agree').addClass('active')\n onOff=false;\n }else{\n $('#agree').html('&#xe607;');\n $('#agree').css('color','#9B9B9B')\n $('#agree').removeClass('active')\n onOff=true;\n }\n});\n\n\n$('#nextStep').on('click',function(){\n\tvar chsi_name=$('#account').val();\n\tvar chsi_passwd=$('#passwd').val();\n var parent_contact=$('#eldersPhone').val();\n var graduate_time=$('.graduation').text();\n var body_choice_ids = []\n var active_entrys = $('.choice-entry.active');\n for(var i=0;i<active_entrys.length;i++) {\n body_choice_ids.push($(active_entrys[i]).attr('data'));\n }\n var body_choice_text = $('#input-other-text').val();\n\n\tif(!chsi_name){\n\t\talert('请输入学信网账号');\n\t\treturn;\n\t}\n\tif(!chsi_passwd){\n\t\talert('请输入学信网密码');\n\t\treturn;\n\t}\n\n\t\n if(graduate_time.trim()=='请选择你毕业时间'){\n alert('请选择毕业时间');\n return;\n }\n if(parent_contact=='' || isNaN(parent_contact)){\n alert('请输入紧急联系人方式')\n return;\n }\n if(!body_choice_ids.length>0) {\n alert('你希望自己哪个部位变得更美');\n return\n }\n if(body_choice_ids.indexOf('10')!=-1&&!body_choice_text.length>0) {\n alert('请输入其他内容');\n return\n }\n \n if(!$('#agree').hasClass('active')) {\n return alert('请同意美分分授信协议');\n }\n\t$.ajax({\n\t\txhrFields: {withCredentials: true},\n\t\ttype:\"post\",\n\t\turl:\"http://\"+getHostName()+\"/user/apply_credit_post/\",\n\t\tdataType:'json',\n\t\tdata:{\n\t\t\tchsi_name:chsi_name,\n\t\t\tchsi_passwd:chsi_passwd,\n\t\t\tparent_contact: parent_contact,\n\t\t\tgraduate_time: graduate_time,\n\t\t\tbody_choice_ids: body_choice_ids.join(','),\n\t\t\tbody_choice_text: body_choice_text\n\t\t},\n\t\tsuccess:function(data){\n\t\t\tif(data.code==0){\n\t\t\t\tlocation.href='/user/menu_credit_apply/';\n\t\t\t} else {\n\t\t\t alert(data.msg)\n\t\t\t}\n\t\t},\n\t\terror:function(){\n\t\t\t\n\t\t}\n\t});\n});\n" }, { "alpha_fraction": 0.6366013288497925, "alphanum_fraction": 0.6810457706451416, "avg_line_length": 26.321428298950195, "blob_id": "a7aafd21dbac9166f70760b67e2976c8622e92c7", "content_id": "26bdecab8d58f1f0511b4966bd78aad45a734e57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 765, "license_type": "no_license", "max_line_length": 96, "num_lines": 28, "path": "/migrations/versions/13a5889df13_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 13a5889df13\nRevises: 9969fdf8fc9\nCreate Date: 2016-03-07 16:57:10.806493\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '13a5889df13'\ndown_revision = '9969fdf8fc9'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('credit_apply', sa.Column('remark', sa.String(length=500), nullable=True))\n op.add_column('credit_apply', sa.Column('remark_img', sa.String(length=500), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('credit_apply', 'remark_img')\n op.drop_column('credit_apply', 'remark')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6447733640670776, "alphanum_fraction": 0.6799259781837463, "avg_line_length": 32.78125, "blob_id": "550a205747a0c6a66f5382cee57a170310828861", "content_id": "d747bc57e8ca8066f35592fe29e74e2d292727a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1081, "license_type": "no_license", "max_line_length": 97, "num_lines": 32, "path": "/migrations/versions/282274e764ca_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 282274e764ca\nRevises: 2b4b8933851\nCreate Date: 2015-12-05 16:05:41.867941\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '282274e764ca'\ndown_revision = '2b4b8933851'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('promoter', sa.Column('follow_count', sa.Integer(), nullable=True))\n op.add_column('promoter', sa.Column('reg_count', sa.Integer(), nullable=True))\n op.create_index(op.f('ix_promoter_follow_count'), 'promoter', ['follow_count'], unique=False)\n op.create_index(op.f('ix_promoter_reg_count'), 'promoter', ['reg_count'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_promoter_reg_count'), table_name='promoter')\n op.drop_index(op.f('ix_promoter_follow_count'), table_name='promoter')\n op.drop_column('promoter', 'reg_count')\n op.drop_column('promoter', 'follow_count')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.7163570523262024, "alphanum_fraction": 0.7172976136207581, "avg_line_length": 47.6870231628418, "blob_id": "542819bb23376b292d78f52baaccfe9af711119a", "content_id": "060da3915585a0c601f89ee24bf41829207ec915", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12915, "license_type": "no_license", "max_line_length": 139, "num_lines": 262, "path": "/user/api_urls.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import request\nfrom flask import Blueprint, render_template, abort\nfrom jinja2 import TemplateNotFound\n\nfrom user.api_views import user_index\nfrom user.api_views import item_detail\nfrom user.api_views import item_list\nfrom user.api_views import item_filters\nfrom user.api_views import item_comment_list\nfrom user.api_views import user_fav_item\nfrom user.api_views import user_advice\nfrom user.api_views import user_order_list\nfrom user.api_views import order_preview\nfrom user.api_views import confirm_order\nfrom user.api_views import order_pay\nfrom user.api_views import repayment_pay\nfrom user.api_views import wxapp_pay_callback\nfrom user.api_views import wxapp_repayment_callback\nfrom user.api_views import uploads\nfrom user.api_views import order_detail\nfrom user.api_views import comment_post\nfrom user.api_views import my_period_bill\nfrom user.api_views import user_home\nfrom user.api_views import my_repayments\nfrom user.api_views import item_cats\nfrom user.api_views import my_favs\nfrom user.api_views import my_coupons\nfrom user.api_views import my_apply\nfrom user.api_views import help\nfrom user.api_views import repayment\nfrom user.api_views import get_help_entry\nfrom user.api_views import apply_credit_page\nfrom user.api_views import project_doctor_description\nfrom user.api_views import get_jssdk_js\nfrom user.api_views import get_school_list\nfrom user.api_views import hospital_detail\nfrom user.api_views import get_city_list\nfrom user.api_views import upload_image\nfrom user.api_views import apply_credit_post\nfrom user.api_views import apply_credit_photo\nfrom user.api_views import edit_name\nfrom user.api_views import my_item_comment_list\nfrom user.api_views import my_order_bill\nfrom user.api_views import hospital_item_list\nfrom user.api_views import order_pay_success\nfrom user.api_views import repayment_pay_success\nfrom user.api_views import cancel_order\nfrom user.api_views import finish_order\nfrom user.api_views import hospital_location\nfrom user.api_views import meifenfen_index\nfrom user.api_views import meifenfen_city\nfrom user.api_views import help_html\nfrom user.api_views import alipay_order_pay_action\nfrom user.api_views import wx_order_pay_action\nfrom user.api_views import my_apply_result\nfrom user.api_views import alipay_notify\nfrom user.api_views import alipay_repayment_notify\nfrom user.api_views import apply_credit\nfrom user.api_views import alipay_repayment_pay_action\nfrom user.api_views import wx_repayment_pay_action\nfrom user.api_views import notification_list\nfrom user.api_views import mark_read\nfrom user.api_views import test_wx_app_pay\nfrom user.api_views import test_alipay\nfrom user.api_views import recommend_item_list\nfrom user.api_views import check_update\n\nfrom user.auth import get_reg_vcode\nfrom user.auth import signup_post\nfrom user.auth import get_vcode\nfrom user.auth import reset_passwd\nfrom user.auth import signup\nfrom user.auth import user_login\nfrom user.auth import user_login_post\nfrom user.auth import auth_wechat\nfrom user.auth import logout\n\n\nuser_api = Blueprint('user_app_api', __name__,\n template_folder='templates')\n\n\nuser_api.add_url_rule('/', 'user', user_index)\n\nuser_api.add_url_rule('/recommend_item_list/', 'recommend_item_list', recommend_item_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/index/', 'meifenfen', meifenfen_index)\nuser_api.add_url_rule('/item_cats/', 'item_cats', item_cats)\nuser_api.add_url_rule('/item_list/', 'item_list', item_list, methods=['POST', 'GET'])\n#user_api.add_url_rule('/hospital_item_list/', 'the_hospital_item_list', item_list, methods=['POST', 'GET'])\nuser_api.add_url_rule('/item_detail/', 'item_detail', item_detail, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/login_post/', 'user_login_post', user_login_post, methods=['POST'])\nuser_api.add_url_rule('/get_vcode/', 'get_vcode', get_vcode, methods=['POST', 'GET'])\nuser_api.add_url_rule('/get_reg_vcode/', 'get_reg_vcode', get_reg_vcode, methods=['POST', 'GET'])\nuser_api.add_url_rule('/signup_post/', 'signup_post', signup_post, methods=['POST', 'GET'])\nuser_api.add_url_rule('/reset_passwd/', 'reset_passwd', reset_passwd, methods=['POST', 'GET'])\nuser_api.add_url_rule('/logout/', 'logout', logout, methods=['POST', 'GET'])\nuser_api.add_url_rule('/item_filters/', 'item_filters', item_filters, methods=['POST', 'GET'])\nuser_api.add_url_rule('/home/', 'user_home', user_home, methods=['POST', 'GET'])\n\n\nuser_api.add_url_rule('/upload_image/', 'upload_image', upload_image, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/edit_name/', 'edit_name', edit_name, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/meifenfen_city/', 'meifenfen_city', meifenfen_city)\nuser_api.add_url_rule('/comment_list/', 'item_comment_list', item_comment_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/hospital_detail/', 'hospital_detail', hospital_detail, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/order_preview/', 'order_preview', order_preview, methods=['POST', 'GET'])\nuser_api.add_url_rule('/confirm_order/', 'confirm_order', confirm_order, methods=['POST', 'GET'])\nuser_api.add_url_rule('/order_pay/', 'order_pay', order_pay, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/comment_post/', 'item_comment_post', comment_post, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/order_detail/', 'order_detail', order_detail, methods=['POST', 'GET']) #\n\n\n#0000000000\nuser_api.add_url_rule('/hospital_item_list/', 'hospital_item_list', hospital_item_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/help.html', 'help_html', help_html)\n\n\nuser_api.add_url_rule('/hospital_location/', 'hospital_location', hospital_location, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/comment_list/', 'item_comment_list', item_comment_list, methods=['POST', 'GET'])\nuser_api.add_url_rule('/my_item_comment_list/', 'my_item_comment_list', my_item_comment_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/my_favs/', 'my_favs', my_favs, methods=['POST', 'GET']) #我的心愿单\nuser_api.add_url_rule('/fav_item/', 'fav_item', user_fav_item, methods=['POST', 'GET']) #我的心愿单\n\n\n#申请额度\nuser_api.add_url_rule('/apply_credit/', 'apply_credit', apply_credit, methods=['POST', 'GET']) #申请第一步\nuser_api.add_url_rule('/apply_credit_post/', 'apply_credit_post', apply_credit_post, methods=['POST', 'GET'])\nuser_api.add_url_rule('/apply_photo/', 'apply_photo', user_login_post, methods=['POST', 'GET']) #申请第二步\n#user_api.add_url_rule('/apply_photo_post/', 'apply_photo_post', apply_credit_photo, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/repayment_pay/', 'repayment_pay', repayment_pay, methods=['POST', 'GET']) #选择支付方式 还款\nuser_api.add_url_rule('/order_pay_post/', 'order_pay_post', user_login_post, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/wx_order_pay_action/', 'wx_order_pay_action', wx_order_pay_action, methods=['POST', 'GET'])\nuser_api.add_url_rule('/wx_repayment_pay_action/', 'wx_repayment_pay_action', wx_repayment_pay_action, methods=['POST', 'GET'])\nuser_api.add_url_rule('/alipay_order_pay_action/', 'alipay_order_pay_action', alipay_order_pay_action, methods=['POST', 'GET'])\nuser_api.add_url_rule('/alipay_repayment_pay_action/', 'alipay_repayment_pay_action', alipay_repayment_pay_action, methods=['POST', 'GET'])\nuser_api.add_url_rule('/wxapp_pay_callback/', 'wxapp_pay_callback', wxapp_pay_callback, methods=['POST', 'GET']) #微信支付回调\nuser_api.add_url_rule('/wxapp_repayment_callback/', 'wxapp_repayment_callback', wxapp_repayment_callback, methods=['POST', 'GET']) #微信还款回调\n\nuser_api.add_url_rule('/alipay_notify/', 'alipay_notify', alipay_notify, methods=['POST', 'GET'])\nuser_api.add_url_rule('/alipay_repayment_notify/', 'alipay_repayment_notify', alipay_repayment_notify, methods=['POST', 'GET'])\n\n#上传图片\nuser_api.add_url_rule('/uploads/', 'uploads', uploads, methods=['POST', 'GET']) #上传图片 \n\n#个人接口\nuser_api.add_url_rule('/edit_profile/', 'edit_profile', user_login_post, methods=['POST', 'GET'])\nuser_api.add_url_rule('/my_period_bill/', 'my_period_bill', my_period_bill, methods=['POST', 'GET']) #当期账单\nuser_api.add_url_rule('/my_repayments/', 'my_repayments', my_repayments, methods=['POST', 'GET']) #还款历史\nuser_api.add_url_rule('/my_order_bill/', 'my_order_bill', my_order_bill, methods=['POST', 'GET']) #订单每期账单\n\nuser_api.add_url_rule('/my_apply/', 'my_apply', my_apply, methods=['POST', 'GET']) #审核进度\nuser_api.add_url_rule('/my_orders/', 'my_orders', user_order_list, methods=['POST', 'GET']) #\nuser_api.add_url_rule('/my_coupons/', 'my_coupons', my_coupons, methods=['POST', 'GET']) #\nuser_api.add_url_rule('/cancel_order/', 'cancel_order', cancel_order, methods=['POST', 'GET']) #\n\n#帮助\nuser_api.add_url_rule('/help/', 'help', help, methods=['POST', 'GET'])\nuser_api.add_url_rule('/get_help_entry/', 'get_help_entry', get_help_entry, methods=['POST', 'GET'])\nuser_api.add_url_rule('/advice/', 'advice', user_advice, methods=['POST', 'GET'])\n\n\nuser_api.add_url_rule('/apply_credit_page/', 'apply_credit_page', apply_credit_page, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/project_doctor_description/', 'project_doctor_description', project_doctor_description, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/get_school_list/', 'get_school_list', get_school_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/repayment/', 'repayment', repayment, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/get_city_list/', 'get_city_list', get_city_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/my_apply_result/', 'my_apply_result', my_apply_result, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/order_pay_success/', 'order_pay_success', order_pay_success)\nuser_api.add_url_rule('/repayment_pay_success/', 'repayment_pay_success', repayment_pay_success)\n\nuser_api.add_url_rule('/finish_order/', 'finish_order', finish_order, methods=['POST', 'GET'])\n\n\nfrom user.trial import trial_list\nfrom user.trial import my_trial_list\nfrom user.trial import comment_trial\nfrom user.trial import apply_trial\nfrom user.trial import trial_applyers\nfrom user.trial import trial_comment_list\n\nfrom user.trial import get_trial_detail\nfrom user.trial import get_history_apply\n\n#试用\nuser_api.add_url_rule('/trial_list/', 'trial_list', trial_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/my_trial_list/', 'my_trial_list', my_trial_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/comment_trial/', 'comment_trial', comment_trial, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/apply_trial/', 'apply_trial', apply_trial, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/trial_applyers/', 'trial_applyers', trial_applyers, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/trial_comment_list/', 'trial_comment_list', trial_comment_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/get_trial_detail/', 'get_trial_detail', get_trial_detail, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/get_history_apply/', 'get_history_apply', get_history_apply, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/notification_list/', 'notification_list', notification_list)\n\nuser_api.add_url_rule('/mark_read/', 'mark_read', mark_read, methods=['POST', 'GET'])\n\n#user_api.add_url_rule('/test_wx_app_pay/', 'test_wx_app_pay', test_wx_app_pay)\n\n#user_api.add_url_rule('/test_alipay/', 'test_alipay', test_alipay, methods=['POST','GET'])\n\nuser_api.add_url_rule('/check_update/', 'check_update', check_update, methods=['POST','GET'])\n\nfrom user.api_views import upload_device_info\n\nuser_api.add_url_rule('/upload_device_info/', 'upload_device_info', upload_device_info, methods=['POST','GET'])\n\n\n\nfrom user.api_views import hospital_list\nfrom user.api_views import hospital_filters\nfrom user.api_views import mei_tutorials\nfrom user.api_views import tutorial_detail\nfrom user.api_views import daily_coupons\nfrom user.api_views import resend_user_coupon\nfrom user.api_views import receive_coupon\n\nuser_api.add_url_rule('/hospital_list/', 'hospital_list', hospital_list, methods=['POST','GET'])\nuser_api.add_url_rule('/hospital_filters/', 'hospital_filters', hospital_filters, methods=['POST','GET'])\n\nuser_api.add_url_rule('/mei_tutorials/', 'mei_tutorials', mei_tutorials, methods=['POST','GET'])\n\nuser_api.add_url_rule('/tutorial_detail/', 'tutorial_detail', tutorial_detail, methods=['POST','GET'])\n\nuser_api.add_url_rule('/daily_coupons/', 'daily_coupons', daily_coupons, methods=['POST','GET'])\n\nuser_api.add_url_rule('/resend_user_coupon/', 'resend_user_coupon', resend_user_coupon, methods=['POST','GET'])\n\nuser_api.add_url_rule('/receive_coupon/', 'receive_coupon', receive_coupon, methods=['POST','GET'])\n\n\nfrom user.api_views import meifenfen_new_index\n\nuser_api.add_url_rule('/meifenfen_new_index/', 'meifenfen_new_index', meifenfen_new_index, methods=['POST','GET'])\n\n\n\n" }, { "alpha_fraction": 0.6425406336784363, "alphanum_fraction": 0.7031019330024719, "avg_line_length": 25.038461685180664, "blob_id": "7f30bc5858b9c1b2950151f2bdae64a2872138e2", "content_id": "3d930fd32169c159e43e7567e50610be58e79de3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 677, "license_type": "no_license", "max_line_length": 106, "num_lines": 26, "path": "/migrations/versions/456050d473e_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 456050d473e\nRevises: 34a7370fe40\nCreate Date: 2016-02-02 10:30:28.130061\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '456050d473e'\ndown_revision = '34a7370fe40'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('redpack_question', sa.Column('status', mysql.TINYINT(display_width=1), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('redpack_question', 'status')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6242603659629822, "alphanum_fraction": 0.7071005702018738, "avg_line_length": 25, "blob_id": "e0faa3a625e879add74b4b422d22d7df058df60a", "content_id": "2c7af242beab6e077d7490abab1f8f7fb40da5e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 676, "license_type": "no_license", "max_line_length": 100, "num_lines": 26, "path": "/migrations/versions/285c9a12f7b4_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 285c9a12f7b4\nRevises: 200344fc698d\nCreate Date: 2016-01-27 15:31:26.017505\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '285c9a12f7b4'\ndown_revision = '200344fc698d'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('room_design_detail', 'phone')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('room_design_detail', sa.Column('phone', mysql.VARCHAR(length=30), nullable=True))\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5477750897407532, "alphanum_fraction": 0.5660532116889954, "avg_line_length": 28.159927368164062, "blob_id": "64036ac0b9b0b0bfad584ed34182cb33d49447e0", "content_id": "848f7206014d272a5a2901e02ac9cd03e7d71f22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16342, "license_type": "no_license", "max_line_length": 167, "num_lines": 544, "path": "/util/utils.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport random\nimport hashlib\nimport hmac\nimport time\nimport json\nimport datetime\nimport calendar\nimport datetime as dt_mod\nfrom datetime import datetime as dt_obj\nfrom random import Random\n\nfrom flask import Response\n\nfrom settings import IMAGE_HOST_URL_DOMAIN\nfrom settings import SERVER_NAME\n\n\ndef get_time_str_from_dt(date, format='%Y-%m-%d %H:%M:%S'):\n return date.strftime(format)\n\n\ndef format_dt(date, format='%Y-%m-%d %H:%M:%S'):\n now = dt_obj.now()\n if now.year==date.year and now.month==date.month and now.day==date.day:\n return '今日'\n return date.strftime('%m.%d')\n\n\ndef get_timestamp(s,format='%Y-%m-%d %H:%M:%S'):\n return int(time.mktime(dt_obj.strptime(s, format).timetuple()))\n\ndef date_to_datetime(date, format='%Y-%m-%d'):\n date = date[:19]\n timestamp = get_timestamp(date, format)\n current_time = dt_mod.datetime.fromtimestamp(timestamp)\n return current_time\n\n\ndef prefix_img_domain(img_key, domain=IMAGE_HOST_URL_DOMAIN):\n if not img_key: return ''\n if 'http' in img_key: return img_key\n if domain in img_key:\n return img_key\n return 'http://'+domain+'/'+img_key\n\ndef prefix_http(link):\n ''' 前缀http '''\n if not link: return ''\n if 'http' in link: return link\n return 'http://' + link\n\n\ndef prefix_servername(link):\n if not link: return ''\n if 'http' in link: return link\n link = link.lstrip('/')\n return 'http://{}/{}'.format(SERVER_NAME, link)\n\n\ndef prefix_img_list(images, domain=IMAGE_HOST_URL_DOMAIN):\n ''' 图片链接绝对路径带http '''\n images = map(lambda i:i.strip(), (images or '').split(','))\n return map(lambda key:prefix_img_domain(key, domain), images)\n\ndef prefix_img_list_thumb(images, domain=IMAGE_HOST_URL_DOMAIN, width=200):\n ''' 图片链接绝对路径带http '''\n images = filter(bool, (images or '').split(','))\n result = map(lambda key:prefix_img_domain(key, domain), images)\n fix_func = lambda i: i+'?imageView2/1/w/{}/h/{}'.format(width, width)\n return map(fix_func, result)\n\n\ndef str_to_int_list(comma_str):\n ''' \"1,2,3\" > [1,2,3] '''\n str_list = filter(bool, (comma_str or '').split(','))\n return map(int, str_list)\n\n\ndef jsonify(data, encoder=None):\n if isinstance(data, dict):\n data = Utf8Dict(data)()\n if isinstance(data, (tuple, list)):\n data = Utf8Dict.Utf8List(list(data))\n return json.dumps(data, ensure_ascii=False, cls=encoder or Utf8Encoder)\n\n\ndef jsonify_response(result, with_response=False):\n# if not with_response:\n# return jsonify(result), 200, {'Content-Type': 'application/json; charset=utf-8'}\n return Response(\n response=jsonify(result),\n content_type='application/json; charset=utf-8',\n mimetype=\"application/json\"\n )\n\n\ndef template_response(result):\n return Response(\n response=result\n )\n\n\ndef js_response(result):\n return Response(\n response=(result),\n content_type='application/javascript; charset=utf-8',\n mimetype=\"application/javascript\"\n )\n\n\nclass Utf8Encoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, unicode):\n return obj.encode('utf8')\n else:\n return json.JSONEncoder.default(self, obj)\n\nclass Utf8Dict(dict):\n class Utf8List(list):\n def __init__(self, obj):\n self._obj = obj\n\n def __iter__(self, *args, **kwargs):\n iter = list.__iter__(self._obj, *args, **kwargs)\n for k in iter:\n if isinstance(k, (tuple, list)):\n yield Utf8Dict.Utf8List(k)\n elif isinstance(k, dict):\n yield Utf8Dict(k)()\n elif isinstance(k, unicode):\n yield k.encode('utf8')\n elif isinstance(k, dt_obj):\n yield str(k)\n else:\n yield k\n def __init__(self, obj):\n self._obj = obj\n\n def __iter__(self, *args, **kwargs):\n iter = dict.__iter__(self._obj, *args, **kwargs)\n for k in iter:\n yield k\n\n def __getitem__(self, *args, **kwargs):\n value = dict.__getitem__(self._obj, *args, **kwargs)\n if isinstance(value, unicode):\n return value.encode('utf8')\n elif isinstance(value, (tuple,list)):\n value = list(value)\n for i in xrange(len(value)):\n if isinstance(value[i], unicode):\n value[i] = value[i].encode('utf8')\n elif isinstance(value[i], dict):\n value[i] = Utf8Dict(value[i])()\n elif isinstance(value[i], (tuple,list)):\n value[i] = Utf8Dict.Utf8List(value[i])\n return value\n elif isinstance(value, dict):\n return Utf8Dict(value)()\n elif isinstance(value, dt_obj):\n return str(value)\n else:\n return value\n def __setitem__(self, *args, **kwargs):\n return dict.__setitem__(self._obj, *args, **kwargs)\n\n def __call__(self):\n data = {}\n for k in self:\n data[k] = self.__getitem__(k)\n return data\n\n\ndef union_dict(*args):\n ''' 把多个字典合并 后面的覆盖前面的'''\n result = {}\n for i in args:\n result.update(i)\n return result\n\n\ndef comma_str_to_list(comma_str):\n ''' 逗号分割的字符串转化为列表 '''\n comma_str = comma_str or ''\n comma_str = comma_str.replace(',', ',')\n str_list = comma_str.split(',')\n return filter(bool, str_list)\n\n\ndef keep_fields_from_list(items, fields):\n ''' 保留列表item中的指定字段 去掉其余字段 '''\n for item in items:\n for key in item.keys():\n if key not in fields: item.pop(key)\n\n\ndef template_response(result):\n return Response(\n response=result\n )\n\n\ndef random_str(randomlength=6):\n string = ''\n chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'\n length = len(chars) - 1\n random = Random()\n for i in range(randomlength):\n string += chars[random.randint(0, length)]\n return string.lower()\n\n\ndef gen_item_no():\n ''' 商品编号 '''\n now = dt_obj.now()\n prefix = get_time_str_from_dt(now, '%y%m%d')\n return prefix + random_no(randomlength=4)\n\n\ndef random_no(randomlength=6):\n string = ''\n chars = '0123456789'\n length = len(chars) - 1\n random = Random()\n for i in range(randomlength):\n string += chars[random.randint(0, length)]\n return string\n\n\ndef trans_list(items, field, new_field, trans_dict, pop=False):\n print trans_dict, field\n for item in items:\n field_val = item[field]\n item[new_field] = trans_dict.get(field_val) \\\n if not pop else trans_dict.pop(field_val)\n\n\nday_delta = dt_mod.timedelta(days=1)\nhour_delta = dt_mod.timedelta(hours=1)\nminute_delta = dt_mod.timedelta(minutes=1)\nsecond_delta = dt_mod.timedelta(seconds=1)\n\n\ndef calc_expire_remain(end_time_str, status=0):\n end_time = date_to_datetime(end_time_str, '%Y-%m-%d %H:%M:%S')\n delta = end_time-dt_obj.now()\n total_seconds = int(delta.total_seconds())\n if total_seconds>86400:\n return '{}天到期'.format(total_seconds/86400)\n elif total_seconds>3600:\n return '{}小时到期'.format(total_seconds/3600)\n elif total_seconds>60:\n return '{}分钟到期'.format(total_seconds/60)\n elif status==0:\n return '已过期'\n else:\n return '{}前'.format(end_time_str[:10])\n\n\ndef get_current_period():\n ''' 本期帐单log开始 本期结束 '''\n now = dt_obj.now()\n year = now.year\n month = now.month\n day = now.day\n end = dt_obj(year=year, month=month, day=1) - second_delta\n if month>1:\n start = dt_obj(year=year, month=month-1, day=1)\n else:\n start = dt_obj(year=year-1, month=12, day=1)\n return start, end\n\n\ndef get_current_deadline():\n ''' 获取当期帐单截止日期 '''\n now = dt_obj.now()\n year = now.year\n month = now.month\n day = now.day\n if month+1>12:\n end = dt_obj(year=year+1, month=month+1-12, day=2) - second_delta\n else:\n end = dt_obj(year=year, month=month+1, day=2) - second_delta\n return end\n\n\ndef is_delayed():\n return dt_obj.now().day>15\n\n\ndef get_next_period():\n ''' 下期开始 下期结束 '''\n now = dt_obj.now()\n year = now.year\n month = now.month\n day = now.day\n start = dt_obj(year=year, month=month, day=1)\n if month==12:\n end = dt_obj(year=year+1, month=1, day=1) - second_delta\n else:\n end = dt_obj(year=year, month=month+1, day=1) -second_delta\n return start, end\n\n\ndef add_months(sourcedate, months):\n month = sourcedate.month - 1 + months\n year = int(sourcedate.year + month / 12 )\n month = month % 12 + 1\n day = min(sourcedate.day,calendar.monthrange(year,month)[1])\n day = datetime.date(year,month,day)\n hour = sourcedate.hour\n minute = sourcedate.minute\n second = sourcedate.second\n return dt_obj(year=day.year, month=day.month, day=day.day, hour=hour, minute=minute, second=second)\n\n\ndef get_due_time(index):\n ''' 计算每一期应还时间 '''\n due_time_start = get_current_deadline()\n hour = due_time_start.hour\n minute = due_time_start.minute\n second = due_time_start.second\n return add_months(due_time_start, index)\n\n\ndef abbreviated_pages(n, page):\n \"\"\"\n 分页数 eg 1 2 3 ... 10 11 ... 100\n \"\"\"\n if not(0 < n): return {}\n assert(0 < page <= n)\n\n if n <= 10:\n pages = set(range(1, n + 1))\n else:\n pages = (set(range(1, 4))\n | set(range(max(1, page - 2), min(page + 3, n + 1)))\n | set(range(n - 2, n + 1)))\n\n def display():\n last_page = 0\n for p in sorted(pages):\n if p != last_page + 1: yield '...'\n yield p\n last_page = p\n d = []\n for i in display():\n if i not in d:\n d.append(i)\n else:\n d.append(i*2)\n return {\n 'total' : n,\n 'current': page,\n 'pages' : tuple(d)\n }\n\n\ndef cacl_punish_fee(info):\n ''' 计算滞纳金 '''\n amount = info['amount']\n deadline = date_to_datetime(info['deadline'], format='%Y-%m-%d %H:%M:%S')\n if deadline<dt_obj.now() and info['status']==0:\n delta = get_date_delta(str(deadline)[:19], str(dt_obj.now())[:19])\n for i in range(delta):\n info['punish'] = (info['amount']+ (info['punish'] or 0))/100.0\n info['punish'] = format_price(info['punish'])\n\n\ndef format_price(field):\n result = round(float(field), 2)\n if int(result)==result:\n result = int(result)\n return result\n\n\ndef format_rate(field):\n return round(float(field), 1)\n\n\ndef get_date_delta(date, other_date):\n ''' 获取时间差 '''\n date_time = date_to_datetime(date, format='%Y-%m-%d %H:%M:%S')\n other_time = date_to_datetime(other_date, format='%Y-%m-%d %H:%M:%S')\n\n return (other_time - date_time).days\n\n\ndef get_delayed_info(log):\n ''' 返回 是否逾期 逾期天数 '''\n delayed = False\n delayed_days = 0\n if log['repayment_time'] and log['repayment_time'] > log['deadline']:\n delayed = True\n delayed_days = get_date_delta(log['deadline'], log['repayment_time'])\n elif log['status']==0 and log['deadline']<str(dt_obj.now()):\n delayed = True\n delayed_days = get_date_delta(log['deadline'], str(dt_obj.now()))\n log['delayed'] = delayed\n log['delayed_days'] = delayed_days\n\n\ndef deadline_zh(deadline):\n orig_deadline = deadline\n deadline = add_months(deadline, -1)\n year = deadline.year\n month = deadline.month\n end = dt_obj(year=year, month=month, day=1)-second_delta\n if month-1>=1:\n begin = dt_obj(year=year, month=month-1, day=1)\n else:\n begin = dt_obj(year=year-1, month=12, day=1)\n\n title = get_time_str_from_dt(deadline, '%Y年%m月帐单') + \\\n get_time_str_from_dt(begin, '(%m.%d-') + get_time_str_from_dt(end, '%m.%d)')\n return title, get_time_str_from_dt(orig_deadline, '还款日截至%Y年%m月%d日')\n\n\ndef get_next_working_day(submit_time=None):\n ''' 下个工作日 '''\n if not submit_time:\n now = dt_obj.now()\n else:\n now = date_to_datetime(submit_time[:19], format='%Y-%m-%d %H:%M:%S')\n weekday = now.weekday()\n if weekday in [4, 5, 6]:\n return now+(7-weekday)*day_delta\n return now+day_delta\n\n\n\ndef md5_str(val, key='meifenfen'):\n result = hmac.new(key, val, hashlib.md5).hexdigest()\n return result\n\n\nnow = lambda : int(time.time())\ndef get_today_timestamp():\n now = dt_obj.now()\n year, month, day = now.year, now.month, now.day\n now = dt_obj(year=year, month=month, day=day)\n return int(time.mktime(now.timetuple()))\n\n\ndef delta_time_str(end_time, start_time=None):\n ''' 时间差字符串 '''\n if not start_time:\n start_time = dt_obj.now()\n if not end_time > start_time:\n return '已结束'\n delta = end_time - start_time\n days = delta.days\n hours = delta.seconds / 3600\n minutes = ( delta.seconds % 3600 ) / 60\n seconds = ( delta.seconds % 3600 ) % 60\n return '{}天{}时{}分{}秒'.format(days, hours, minutes, seconds)\n\n\nimport re\nurl_pattern = re.compile('http.*?.com/')\ndef get_img_key(full_url):\n return re.split(url_pattern, full_url)[1]\n\n\ndef translate_location(latlng=None):\n '''\n http://www.gpsspg.com/api/convert/latlng/\n 0= WGS84 / GPS硬件 / Google Earth / Google Maps 卫星模式\n 1= Google Maps 地图模式\n 2= 百度地图坐标\n 3= QQ腾讯地图坐标 / 高德地图坐标 / 阿里云地图坐标\n 4= MapBar图吧地图坐标\n '''\n import requests\n import urllib\n data = {\n 'oid': '1753',\n 'key': '64D8B0DF48E904DCEA7A264FC8811EAF',\n 'from': '0',\n 'to': '3',\n 'latlng': latlng\n }\n params_str = urllib.urlencode(data)\n data = requests.get('http://api.gpsspg.com/convert/latlng/?'+params_str)\n return data\n\n\ndef set_coupon_use_time(dailys):\n ''' 设置优惠券使用时间 '''\n now = dt_obj.now()\n year = now.year\n month = now.month\n day = now.day\n hour = now.hour\n minute = now.minute\n now = dt_obj(year=year, month=month, day=day, hour=hour, minute=minute)\n\n for daily in dailys:\n assert daily.get('coupon'), '优惠券不存在'\n effective = daily['coupon']['effective']\n start = now\n end = now + second_delta*effective\n daily['use_time_start'] = start\n daily['use_time_end'] = end\n daily['use_time'] = get_time_str_from_dt(start, '%-m.%-d') + '-' + get_time_str_from_dt(end, '%-m.%-d')\n\n\ndef convert_locaton(lnglat):\n ''' '''\n import requests\n# url = 'http://restapi.amap.com/rgeocode/simple?resType=json&encode=utf-8&range=3000&roadnum=0&crossnum=0&poinum=0&retvalue=1&sid=7001&region=&key=da03b0f06b056963d2f823d6ddffad6c' + lnglat\n# response = requests.get(url)\n# return response.json()\n lng, lat = lnglat.split(',')\n url = 'http://api.map.baidu.com/geocoder/v2/?ak=74136f02e6474fb72f3000e449e93c97&location='+lat+','+lng+'&output=json&pois=1'\n print url\n response = requests.get(url)\n return response.json()\n\n\n\ndef gen_redpack_billno():\n ''' 红包订单编号 '''\n now = dt_obj.now()\n prefix = get_time_str_from_dt(now, '%Y%m%d')\n return prefix + random_no(randomlength=10)\n\n\ndef random_redpack_price():\n val = int(random.random()*100)/10.0\n if val<1: val += 1\n if val>5: val -= 5\n return max(1, format_price(val))\n\n\ndef imgs_to_list(pics):\n ''' '''\n str_list = (pics or '').split(',')\n length = len(str_list)\n if length<4:\n for i in range(4-length):\n str_list.append('')\n return str_list\n\n\n\n" }, { "alpha_fraction": 0.6469155550003052, "alphanum_fraction": 0.6785714030265808, "avg_line_length": 31.421052932739258, "blob_id": "966369f0fa4a6fe1726f0e563e76f973691a6aa4", "content_id": "692f12931b82ce918b7852784d5674bc2ce41683", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1232, "license_type": "no_license", "max_line_length": 81, "num_lines": 38, "path": "/migrations/versions/3621ae6c4339_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3621ae6c4339\nRevises: 42e923c1238\nCreate Date: 2015-10-31 10:03:56.359836\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3621ae6c4339'\ndown_revision = '42e923c1238'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\nimport models\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('repayment',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('price', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('data', sa.String(length=1000), nullable=True),\n sa.Column('order_no', sa.String(length=30), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.Column('update_time', sa.DateTime(), nullable=True),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('order_no')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('repayment')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.49175742268562317, "alphanum_fraction": 0.5022078156471252, "avg_line_length": 40.175758361816406, "blob_id": "7bf5e3d054390bb7479a71dcc8a646b399dddfe3", "content_id": "f38337424c1cc446cfb1c2fd8aa6805c13f8cf6e", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 6928, "license_type": "no_license", "max_line_length": 101, "num_lines": 165, "path": "/thirdparty/chsi.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\nfrom thirdparty.qn import upload_img\n\n\n\ndef login_xuexin(username, passwd):\n ''' 抓取学信网数据 '''\n link = 'https://account.chsi.com.cn/passport/login'\n import requests\n from lxml import html\n import urllib\n payload = {\n 'username': username,\n 'password': passwd,\n }\n response = None\n success = False\n return_captcha = False\n try:\n with requests.Session() as session:\n print session.cookies\n r = session.get('https://account.chsi.com.cn/passport/login')\n tree= html.fromstring(r.text)\n _lt = tree.xpath('//*[@id=\"fm1\"]/input[1]')[0].value\n print _lt, 'lt'\n payload['lt'] = _lt\n payload['_eventId'] = 'submit'\n payload['submit'] = '登录'\n\n headers = {\n 'content-type': 'application/x-www-form-urlencoded'\n }\n print urllib.urlencode(payload)\n data = urllib.urlencode(payload)\n p = session.post(link, data=payload, headers=headers)\n print 'logged in'\n response = session.get('http://my.chsi.com.cn/archive/xjarchive.action')\n tree = html.fromstring(response.text)\n table = tree.xpath('//*[@id=\"resultTable\"]')[0]\n enroll_time = tree.xpath('//*[@id=\"resultTable\"]/table/tr[9]/td[2]')[0].text.strip()\n grade = tree.xpath('//*[@id=\"resultTable\"]/table/tr[7]/td[2]')[0].text.strip()\n graduate_time = tree.xpath('//*[@id=\"resultTable\"]/table/tr[10]/td[2]')[0].text.strip()\n birth_day = tree.xpath('//*[@id=\"resultTable\"]/table/tr[3]/td[1]')[0].text.strip()\n school = tree.xpath('//*[@id=\"resultTable\"]/table/tr[5]/td[1]')[0].text.strip()\n name = tree.xpath('//*[@id=\"resultTable\"]/table/tr[1]/td[1]')[0].text.strip()\n sex = tree.xpath('//*[@id=\"resultTable\"]/table/tr[2]/td[1]')[0].text.strip()\n years = tree.xpath('//*[@id=\"resultTable\"]/table/tr[8]/td[1]')[0].text.strip()\n major = tree.xpath('//*[@id=\"resultTable\"]/table/tr[6]/td[2]')[0].text.strip()\n id_no = tree.xpath('//*[@id=\"resultTable\"]/table/tr[3]/td[2]')[0].text.strip()\n #print table.text_content()\n print '姓名', name\n print '性别', sex\n print '身份证号', id_no\n print '学校', school\n print '学制', years\n print '学历', grade\n print '入学时间', enroll_time\n print '毕业时间', graduate_time\n print '出生日期', birth_day\n print '专业', major\n \n result = {\n 'name' : name,\n 'sex' : sex,\n 'school' : school,\n 'years' : years,\n 'grade' : grade,\n 'enroll_time' : enroll_time,\n 'graduate_time' : graduate_time,\n 'birth_day' : birth_day,\n 'major' : major,\n 'id_no' : id_no\n }\n success = True\n return result, success, return_captcha, session\n\n except Exception as e:\n import traceback\n traceback.print_exc()\n img_res = session.get('https://account.chsi.com.cn/passport/captcha.image')\n #return img_res\n print img_res.status_code, len(img_res.content)\n captcha_img = img_res.content\n import time\n key = str(time.time())+'.jpg'\n print 'key', key\n upload_img(key, captcha_img)\n success = False\n return_captcha = True\n return key, success, return_captcha, session\n\n\nimport urllib\nfrom lxml import html\ndef get_chsi_info(username, passwd, captcha, session):\n payload = {\n 'username': username,\n 'password': passwd,\n }\n print session.cookies\n r = session.get('https://account.chsi.com.cn/passport/login')\n tree= html.fromstring(r.text)\n _lt = tree.xpath('//*[@id=\"fm1\"]/input[1]')[0].value\n print _lt, 'lt'\n payload['lt'] = _lt\n payload['_eventId'] = 'submit'\n payload['submit'] = '登录'\n payload['captcha'] = captcha\n headers = {\n 'content-type': 'application/x-www-form-urlencoded'\n }\n print urllib.urlencode(payload)\n data = urllib.urlencode(payload)\n link = 'https://account.chsi.com.cn/passport/login'\n p = session.post(link, data=payload, headers=headers)\n response = session.get('http://my.chsi.com.cn/archive/xjarchive.action')\n tree = html.fromstring(response.text)\n table = tree.xpath('//*[@id=\"resultTable\"]')[0]\n enroll_time = tree.xpath('//*[@id=\"resultTable\"]/table/tr[9]/td[2]')[0].text.strip()\n grade = tree.xpath('//*[@id=\"resultTable\"]/table/tr[7]/td[2]')[0].text.strip()\n graduate_time = tree.xpath('//*[@id=\"resultTable\"]/table/tr[10]/td[2]')[0].text.strip()\n birth_day = tree.xpath('//*[@id=\"resultTable\"]/table/tr[3]/td[1]')[0].text.strip()\n school = tree.xpath('//*[@id=\"resultTable\"]/table/tr[5]/td[1]')[0].text.strip()\n name = tree.xpath('//*[@id=\"resultTable\"]/table/tr[1]/td[1]')[0].text.strip()\n sex = tree.xpath('//*[@id=\"resultTable\"]/table/tr[2]/td[1]')[0].text.strip()\n years = tree.xpath('//*[@id=\"resultTable\"]/table/tr[8]/td[1]')[0].text.strip()\n major = tree.xpath('//*[@id=\"resultTable\"]/table/tr[6]/td[2]')[0].text.strip()\n id_no = tree.xpath('//*[@id=\"resultTable\"]/table/tr[3]/td[2]')[0].text.strip()\n #print table.text_content()\n print '姓名', name\n print '性别', sex\n print '身份证号', id_no\n print '学校', school\n print '学制', years\n print '学历', grade\n print '入学时间', enroll_time\n print '毕业时间', graduate_time\n print '出生日期', birth_day\n print '专业', major\n\n result = {\n 'name' : name,\n 'sex' : sex,\n 'school' : school,\n 'years' : years,\n 'grade' : grade,\n 'enroll_time' : enroll_time,\n 'graduate_time' : graduate_time,\n 'birth_day' : birth_day,\n 'major' : major,\n 'id_no' : id_no\n }\n return result\n\n\ndef refresh_chsi_captcha(session):\n img_res = session.get('https://account.chsi.com.cn/passport/captcha.image')\n captcha_img = img_res.content\n import time\n key = str(time.time())+'.jpg'\n print 'key', key\n upload_img(key, captcha_img)\n return key\n" }, { "alpha_fraction": 0.6256239414215088, "alphanum_fraction": 0.6722129583358765, "avg_line_length": 22.115385055541992, "blob_id": "063bd2d809b5410bf26181e5aa9c31de39457a65", "content_id": "ad4200503a1826f11442d9096708d291d52b2f71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 601, "license_type": "no_license", "max_line_length": 81, "num_lines": 26, "path": "/migrations/versions/4bd7029c14b5_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4bd7029c14b5\nRevises: 3ee27e625d6d\nCreate Date: 2015-11-19 18:10:40.370035\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4bd7029c14b5'\ndown_revision = '3ee27e625d6d'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('item', sa.Column('create_time', sa.DateTime(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('item', 'create_time')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5850368142127991, "alphanum_fraction": 0.5883074402809143, "avg_line_length": 34.27536392211914, "blob_id": "f8a113f6b968d1a49eb653735191469f3f505183", "content_id": "c7f2ffc505fd498ed0bc1cce9dce98122c01b2f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2454, "license_type": "no_license", "max_line_length": 141, "num_lines": 69, "path": "/ops/comment.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import and_\nfrom sqlalchemy import func\nfrom models import db\nfrom models import ItemComment\nfrom models import Item\nfrom models import Hospital\nfrom ops.utils import get_page\nfrom ops.utils import get_items\nfrom util.utils import format_rate\n\n\nclass CommentService(object):\n\n @staticmethod\n def comment_item(item_id, user_id, content, photos, rate, is_anonymous, order_id, is_re_comment=False):\n ''' 评价商品 '''\n comment = ItemComment(\n item_id=item_id, user_id=user_id,\n content=content, photos=photos, rate=rate,\n order_id=order_id,\n is_anonymous=is_anonymous,\n is_re_comment=is_re_comment)\n db.session.add(comment)\n db.session.commit()\n return comment.id\n\n @staticmethod\n def count_comments(where):\n return db.session.query(func.count(ItemComment.id)).filter(where).scalar()\n\n @staticmethod\n def get_paged_comments(**kw):\n return get_page(ItemComment, **kw)\n\n @staticmethod\n def get_comments_by_item_ids(item_ids, user_id=None, **kw):\n query = and_(\n ItemComment.user_id==user_id,\n ItemComment.item_id.in_(item_ids)\n )\n rows = ItemComment.query.filter(query).all()\n return [i.as_dict() for i in rows]\n\n @staticmethod\n def get_comments_by_order_ids(order_ids, user_id=None, **kw):\n query = and_(\n ItemComment.user_id==user_id,\n ItemComment.order_id.in_(order_ids)\n )\n rows = ItemComment.query.filter(query).all()\n return [i.as_dict() for i in rows]\n\n @staticmethod\n def get_comment(where):\n return ItemComment.query.filter(where).first()\n\n @staticmethod\n def rerate_hospital(hospital_id):\n comment_id_suq = db.session.query(Item.id).filter(Item.hospital_id==hospital_id).subquery()\n result = db.session.query(func.count(), func.sum(ItemComment.rate)).filter(ItemComment.item_id.in_(comment_id_suq)).all()\n if result:\n rate = format_rate(result[0][1]/result[0][0] if result[0][0] else 0)\n else:\n rate = format_rate(rate)\n Hospital.query.filter(Hospital.id==hospital_id).update({'rate':rate})\n db.session.commit()\n return rate\n\n \n\n\n" }, { "alpha_fraction": 0.6120312213897705, "alphanum_fraction": 0.6178125143051147, "avg_line_length": 31.308080673217773, "blob_id": "57e8d45975f2e833cb2e376a5cddd083f562ff9e", "content_id": "5cbedee76a3bcbc2b12a6c5e87ca0f5b137a4b3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6572, "license_type": "no_license", "max_line_length": 99, "num_lines": 198, "path": "/promote/views.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport math\nfrom collections import defaultdict\nfrom sqlalchemy import and_\nfrom sqlalchemy import or_\n\nfrom flask import request\nfrom flask import redirect\nfrom flask import Blueprint\nfrom flask import render_template\nfrom flask import make_response\n\nfrom util.utils import jsonify_response\nfrom util.utils import template_response\nfrom util.utils import abbreviated_pages\nfrom util.sign import gen_promote_token\nfrom util.sign import del_cookie\nfrom util.sign import set_cookie\nfrom util.decorators import hospital_dec\nfrom util.decorators import promote_dec\nfrom util.validators import Optional\nfrom util.validators import Inputs\nfrom util.validators import MobileField\nfrom util.validators import TextField\nfrom util.validators import IdField\nfrom util.validators import IntChoiceField\nfrom util.validators import MobileField\nfrom models import db\nfrom models import Order\nfrom models import User\nfrom models import ItemCat\nfrom models import Item\nfrom models import ItemSubCat\nfrom models import ServiceCode\nfrom models import Promoter\nfrom ops.promote import PromoteService\nfrom ops.data import DataService\nfrom ops.item import ItemService\nfrom ops.comment import CommentService\nfrom ops.user import UserService\nfrom ops.order import OrderService\nfrom ops.bulks import fetch_credit_refs\nfrom ops.bulks import fetch_user_refs\nfrom ops.bulks import fetch_item_refs\nfrom ops.bulks import fetch_servicecode_refrence\nfrom constants import ResponseCode\nfrom constants import ORDER_STATUS\nfrom constants import SERVICE_STATUS\nfrom thirdparty.wechat import create_qrcode\n\n\n@promote_dec(required=True)\ndef index():\n ''' 首页 '''\n return 'promote'\n\n@promote_dec(required=False)\ndef login():\n ''' 登录 '''\n return render_template('promote/login.html')\n\n\nlogin_post_validator = Inputs(\n {\n 'name' : TextField(min_length=1, max_length=100, msg='请输入账号'),\n 'passwd' : TextField(min_length=1, max_length=100, msg='请输入密码')\n }\n )\n@promote_dec(required=False, validator=login_post_validator)\ndef login_post():\n ''' 登录 '''\n name = request.valid_data.get('name')\n passwd = request.valid_data.get('passwd')\n if PromoteService.check_user(name, passwd):\n response = jsonify_response({'code':ResponseCode.SUCCESS}, with_response=True)\n token = gen_promote_token(name)\n set_cookie(response, 'promote_sign', token, 86400*30)\n return response\n assert 0, '用户名或密码错误'\n\n\n\ncreate_promoter_validator = Inputs(\n {\n 'name' : TextField(min_length=1, max_length=100, msg='请输入用户名'),\n 'phone' : TextField(min_length=1, max_length=100, msg='手机号码')\n }\n )\n@promote_dec(required=True, validator=create_promoter_validator)\ndef create_promoter():\n ''' 创建推广员 '''\n name = request.valid_data.get('name')\n phone = request.valid_data.get('phone')\n passwd = ''\n creator = PromoteService.get_promoter_by_phone(request.name)\n assert creator, '请登录'\n create_by = creator.id\n status = 2\n promoter_id = PromoteService.create_promoter(phone, passwd, name, create_by, status=status)\n assert promoter_id, '创建失败'\n qrcode_id = PromoteService.create_promoter_qrcode(promoter_id)\n PromoteService.download_promoter_qrcode.delay(qrcode_id)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '创建成功'\n }\n return jsonify_response(result)\n\n\n\n@promote_dec(required=True)\ndef get_promoter_list():\n ''' 推广人员列表 '''\n creator = PromoteService.get_promoter_by_phone(request.name)\n assert creator, '请登录'\n phone = request.args.get('phone')\n sort_type = int(request.args.get('sort_type') or 0)\n _sort_dir = int(request.args.get('_sort_dir') or 0)\n sort_map = {\n 1 : 'follow_count',\n 2 : 'reg_count'\n }\n _sort = sort_map.get(sort_type)\n if _sort_dir:\n _sort_dir = 'ASC' #升序\n else:\n _sort_dir = 'DESC' #降序\n\n print phone, 'phone'\n page = int(request.args.get('page', 1) or 1)\n limit = 5\n start = (page-1)*limit\n where = None\n filters = [Promoter.create_by==creator.id]\n filters.append(Promoter.status!=0) #过滤掉已被删除的\n if phone:\n filters.append(Promoter.phone==phone)\n if filters: where = and_(*filters)\n\n total = PromoteService.count_promoters(where)\n page_info = abbreviated_pages(int(math.ceil(total/(limit*1.0))), page)\n has_more, item_list = PromoteService.get_paged_promoters(\n _sort=_sort, _sort_dir=_sort_dir, limit=limit, start=start, where=where)\n\n promoter_ids = [i['id'] for i in item_list]\n qrcodes = PromoteService.get_promoter_qrcodes_by_promoter_ids(promoter_ids)\n promoter_map = {i['promoter_id']:i['image'] for i in qrcodes}\n for item in item_list:\n item['image'] = promoter_map.get(item['id']) or ''\n result = {\n 'infos' : item_list,\n 'page_info' : page_info,\n 'total' : total,\n 'creator' : creator,\n 'phone' : phone,\n 'page' : page,\n 'sort_type' : sort_type,\n '_sort_dir' : _sort_dir\n }\n\n if request.args.get('json'):\n return jsonify_response(result)\n return render_template('promote/index.html',**result)\n\n\n\ndef logout():\n try:\n response = redirect('/promote/login/')\n del_cookie(response, 'promote_sign')\n return response\n except:\n import traceback\n traceback.print_exc()\n return 'server error'\n\n\n\n\ndel_promoter_validator = Inputs(\n {\n 'promoter_id' : IdField(msg='请输入推广员id'),\n }\n )\n@promote_dec(required=True, validator=del_promoter_validator)\ndef del_promoter():\n ''' 删除推广员 '''\n promoter_id = request.valid_data.get('promoter_id')\n count = PromoteService.del_promoter(promoter_id)\n\n msg = '删除成功'\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : msg\n }\n\n return jsonify_response(result)\n\n\n\n" }, { "alpha_fraction": 0.6355382800102234, "alphanum_fraction": 0.6809338331222534, "avg_line_length": 26.535715103149414, "blob_id": "2477fac3187c86350f2cd58f74333f2cde6a30b5", "content_id": "da79b8b98d9a5042db949f20593115ef4b0f7688", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 771, "license_type": "no_license", "max_line_length": 99, "num_lines": 28, "path": "/migrations/versions/3c990682c3f0_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3c990682c3f0\nRevises: 18e507e87862\nCreate Date: 2016-01-04 15:42:28.125990\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3c990682c3f0'\ndown_revision = '18e507e87862'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('daily_coupon', sa.Column('use_condition', sa.String(length=100), nullable=True))\n op.add_column('daily_coupon', sa.Column('use_time', sa.String(length=100), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('daily_coupon', 'use_time')\n op.drop_column('daily_coupon', 'use_condition')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6537989974021912, "alphanum_fraction": 0.671875, "avg_line_length": 42.52000045776367, "blob_id": "133c0fd07c8387bbcb4305a8d713d6206d4ff86c", "content_id": "8bf8c784fff4263b44a5eccc343c96e4baf47d05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3264, "license_type": "no_license", "max_line_length": 82, "num_lines": 75, "path": "/migrations/versions/273db5f3044f_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 273db5f3044f\nRevises: eb0dc173df2\nCreate Date: 2016-02-01 14:34:36.314339\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '273db5f3044f'\ndown_revision = 'eb0dc173df2'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\nimport models\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('redpack_question',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('content', sa.String(length=1000), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('redpack_user_question',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('qr_user_id', sa.Integer(), nullable=True),\n sa.Column('question_id', sa.Integer(), nullable=True),\n sa.Column('question', sa.String(length=1000), nullable=True),\n sa.Column('answer', sa.String(length=1000), nullable=True),\n sa.Column('is_custom', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('is_random', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('price', models.MoneyField(precision=10, scale=2), nullable=True),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['qr_user_id'], ['qr_code_user.id'], ),\n sa.ForeignKeyConstraint(['question_id'], ['redpack_question.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('redpack_pay',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('qr_user_id', sa.Integer(), nullable=True),\n sa.Column('user_question_id', sa.Integer(), nullable=True),\n sa.Column('order_no', sa.String(length=30), nullable=True),\n sa.Column('transaction_id', sa.String(length=100), nullable=True),\n sa.Column('price', models.MoneyField(precision=10, scale=2), nullable=True),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['qr_user_id'], ['qr_code_user.id'], ),\n sa.ForeignKeyConstraint(['user_question_id'], ['redpack_user_question.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('order_no')\n )\n op.create_table('redpack_pay_user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('qr_user_id', sa.Integer(), nullable=True),\n sa.Column('price', models.MoneyField(precision=10, scale=2), nullable=True),\n sa.Column('user_question_id', sa.Integer(), nullable=True),\n sa.Column('pay_id', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['pay_id'], ['redpack_pay.id'], ),\n sa.ForeignKeyConstraint(['qr_user_id'], ['qr_code_user.id'], ),\n sa.ForeignKeyConstraint(['user_question_id'], ['redpack_user_question.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('redpack_pay_user')\n op.drop_table('redpack_pay')\n op.drop_table('redpack_user_question')\n op.drop_table('redpack_question')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6281406879425049, "alphanum_fraction": 0.6733668446540833, "avg_line_length": 21.961538314819336, "blob_id": "07436288e1d5a06c56b735df0c32dfd6702c9f46", "content_id": "edc68ae6d9ccfb31673d7e779001f9b780d124f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 597, "license_type": "no_license", "max_line_length": 74, "num_lines": 26, "path": "/migrations/versions/15e92c9ccee8_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 15e92c9ccee8\nRevises: 45f8da7eae76\nCreate Date: 2015-12-12 15:36:09.053853\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '15e92c9ccee8'\ndown_revision = '45f8da7eae76'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('trial', sa.Column('process', sa.Text(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('trial', 'process')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5759762525558472, "alphanum_fraction": 0.6290322542190552, "avg_line_length": 37.61475372314453, "blob_id": "0bbfdf9643a68a8f1d7785febba8c0ece167c7d4", "content_id": "3bd906a4ac93f411533ff0aa9a6dfaf560b1a7c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4768, "license_type": "no_license", "max_line_length": 125, "num_lines": 122, "path": "/settings.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nimport time\n\n\nMAX_TODAY_PASSWD_ATTEMPT= 6\nMAX_TODAY_VCODE_ATTEMPT = 10\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\nSTATIC_DIR = os.path.join(BASE_DIR,'static/')\nTEMPLATE_DIR = os.path.join(BASE_DIR,'templates/')\n\n\nAPPEND_SLASH = True\n\nRUN_PORT = 80\n\nDEFAULT_CREDIT = 10000\n\nCONTACT = '021-60483658'\n\nWECHAT_APP_ID = 'wx284c24dbdca7b377test'\nWECHAT_APP_SECRET = 'df80d4d3213883e804eca31137c1de1f---'\nWECHAT_APP_TOKEN = 'vDKXGxLyN23PEX'\nWECHAT_MCHID = '1278286901'\nWECHAT_KEY = 'ggH73uoRJnYjqFXfRKWijidzAZxUgLvb'\nWECHAT_CERT_PEM = 'wx_cert/apiclient_cert.pem'\nWECHAT_KEY_PEM = 'wx_cert/apiclient_key.pem'\nAPP_WECHAT_CERT_PEM = 'wx_app_cert/apiclient_cert.pem'\nAPP_WECHAT_KEY_PEM = 'wx_app_cert/apiclient_key.pem'\n\nAPP_WECHAT_ID = 'wx1e8901446967b46b'\nAPP_WECHAT_SECRET = '6a683136a58f5d152daee995dde838f5'\nAPP_MCH_ID = '1305025101'\nAPP_WECHAT_KEY = 'y4JetJrzMMctjnVJUnRFvqitURMgwYsz'\n\nDOMAIN = '127.0.0.1'\nSERVER_NAME = '139.196.6.231'\n\n\nSECRET_USER_COOKIE = os.environ.get('APP_USER_COOKIE_SIGN', 'df2121280332d4d3213883e804eca31137c1de1f')\nADMIN_COOKIE_KEY = 'ADMIN_COOKIE_KEY' \nHOSPITAL_COOKIE_KEY = 'HOSPITAL_COOKIE_KEY'\nPROMOTE_COOKIE_KEY = 'PROMOTE_COOKIE_KEY_2015'\n\nREDIS_PORT = 6379\nREDIS_HOST = '127.0.0.1'\nMAIN_MYSQL_URI = 'mysql://root@localhost/main?charset=utf8mb4'\nIMAGE_HOST_URL_DOMAIN = '7xnpdb.com1.z0.glb.clouddn.com'\n\nDEFAULT_IMAGE = 'http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a4u9raim1rpcckf59vq7q1gth1LG11IG]7F5G5%7D861P1IUW[T.jpg'\nANONY_IMAGE = 'http://7xnpdb.com2.z0.glb.qiniucdn.com/o_1a4u9raim1rpcckf59vq7q1gth1LG11IG]7F5G5%7D861P1IUW[T.jpg'\n\nCACHE_DB_NO = 0\n\nif os.environ.get('APP_ENV')=='dev':\n print 'env', 'dev'\n from setting.dev import *\n from sql_profile import *\nelif os.environ.get('APP_ENV')=='production':\n print 'env', 'production'\n from setting.production import *\nelse:\n print 'env', 'local'\n from setting.local import *\nfrom sql_profile import *\n\n\nWX_PAY_NOTIFY_URL = \"http://{}/user/wx_pay_callback/\".format(SERVER_NAME)\nWX_REPAYMENT_NOTIFY_URL = \"http://{}/user/wx_repayment_callback/\".format(SERVER_NAME)\nWX_REDPACK_NOTIFY_URL = \"http://{}/user/wx_redpack_callback/\".format(SERVER_NAME)\n\nWX_APP_PAY_NOTIFY_URL = \"http://{}/api/wxapp_pay_callback/\".format(SERVER_NAME)\nWX_APP_REPAYMENT_NOTIFY_URL = \"http://{}/api/wxapp_repayment_callback/\".format(SERVER_NAME)\n\nfrom celery import Celery\ncelery = Celery()\ncelery.config_from_object('setting.celeryconfig')\n\n\nITEM_ORDER_CHOICES = [\n {'id':1, 'name':'综合排序'},\n {'id':2, 'name':'销量优先'},\n {'id':3, 'name':'低价优先'},\n {'id':4, 'name':'高价优先'},\n ]\n\nHOSPITAL_ORDER_CHOICES = [\n {'id':1, 'name':'综合排序'},\n {'id':2, 'name':'销量优先'},\n {'id':3, 'name':'好评优先'},\n ]\n\n\n\nCAT_ICONS = {\n 0: 'http://www.meifenfen.com/static/user/img/tuijian_hui.png',\n 1: 'http://www.meifenfen.com/static/user/img/pifu_hui.png',\n 2: 'http://www.meifenfen.com/static/user/img/yanbu_hui.png',\n 3: 'http://www.meifenfen.com/static/user/img/bibu_hui.png',\n 4: 'http://www.meifenfen.com/static/user/img/maofa_hui.png',\n 5: 'http://www.meifenfen.com/static/user/img/weizhengxing_hui.png',\n 6: 'http://www.meifenfen.com/static/user/img/yachi_hui.png',\n 7: 'http://www.meifenfen.com/static/user/img/xingti_hui.png',\n 8: 'http://www.meifenfen.com/static/user/img/pf-hui.png',\n 9: 'http://www.meifenfen.com/static/user/img/banyongjiu-hui.png',\n 10: 'http://www.meifenfen.com/static/user/img/icon-cb0.png',\n 11: 'http://www.meifenfen.com/static/user/img/icon-eb0.png',\n }\nCAT_ICONS_ACTIVE = {\n 0: 'http://www.meifenfen.com/static/user/img/tuijian_hong.png',\n 1: 'http://www.meifenfen.com/static/user/img/pifu_hong.png',\n 2: 'http://www.meifenfen.com/static/user/img/yanbu_hong.png',\n 3: 'http://www.meifenfen.com/static/user/img/bibu_hong.png',\n 4: 'http://www.meifenfen.com/static/user/img/maofa_hong.png',\n 5: 'http://www.meifenfen.com/static/user/img/weizhengxing_hong.png',\n 6: 'http://www.meifenfen.com/static/user/img/yachi_hong.png',\n 7: 'http://www.meifenfen.com/static/user/img/xingti_hong.png',\n 8: 'http://www.meifenfen.com/static/user/img/pf-hong.png',\n 9: 'http://www.meifenfen.com/static/user/img/banyongjiu-hong.png',\n 10: 'http://www.meifenfen.com/static/user/img/icon-cb1.png',\n 11: 'http://www.meifenfen.com/static/user/img/icon-eb1.png',\n }\n\n" }, { "alpha_fraction": 0.5971068739891052, "alphanum_fraction": 0.6234251260757446, "avg_line_length": 36.7042236328125, "blob_id": "430e3a7781fab3d22d9fb8f74afa7648b993cf84", "content_id": "0daf3c8cca7fd6c9181ddb2ea237ae7259b883f0", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 11417, "license_type": "no_license", "max_line_length": 472, "num_lines": 284, "path": "/thirdparty/alipay/alipay.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "#coding=utf-8\n\nimport rsa\nimport types\nimport random\nimport base64\nimport urllib\nimport urllib2\nimport hashlib\nfrom urllib import urlencode, urlopen, quote\n\nfrom thirdparty.alipay import config\n\nsettings = config.settings\n\n\n\ndef smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):\n '''\n Returns a bytestring version of 's', encoded as specified in 'encoding'.\n If strings_only is True, don't convert (some) non-string-like objects.\n\n '''\n if strings_only and isinstance(s, (types.NoneType, int)):\n return s\n if not isinstance(s, basestring):\n try:\n return str(s)\n except UnicodeEncodeError:\n if isinstance(s, Exception):\n # An Exception subclass containing non-ASCII data that doesn't\n # know how to print itself properly. We shouldn't raise a\n # further exception.\n return ' '.join([smart_str(arg, encoding, strings_only,\n errors) for arg in s])\n return unicode(s).encode(encoding, errors)\n elif isinstance(s, unicode):\n return s.encode(encoding, errors)\n elif s and encoding != 'utf-8':\n return s.decode('utf-8', errors).encode(encoding, errors)\n else:\n return s\n\n\n#对数组排序并除去数组中的空值和签名参数 返回数组和链接串\ndef params_filter(params):\n ks = params.keys()\n ks.sort()\n newparams = {}\n prestr = ''\n for k in ks:\n v = params[k]\n k = smart_str(k, settings.ALIPAY_INPUT_CHARSET)\n if k not in ('sign', 'sign_type') and v != '':\n newparams[k] = smart_str(v, settings.ALIPAY_INPUT_CHARSET)\n prestr += '%s=\"%s\"&' % (k, newparams[k])\n prestr = prestr[:-1]\n\n return newparams, prestr\n\n\ndef params_filter_web(params):\n ks = params.keys()\n ks.sort()\n newparams = {}\n prestr = ''\n for k in ks:\n v = params[k]\n k = smart_str(k, settings.ALIPAY_INPUT_CHARSET)\n if k not in ('sign', 'sign_type') and v != '':\n newparams[k] = smart_str(v, settings.ALIPAY_INPUT_CHARSET)\n prestr += '%s=%s&' % (k, newparams[k])\n prestr = prestr[:-1]\n return newparams, prestr\n\n\ndef params_sign_md5(prestr, key):\n return hashlib.md5(prestr+key).hexdigest()\n\n\ndef params_sign_rsa(data):\n with open('thirdparty/alipay/rsa_private_key.pem') as privatefile:\n p = privatefile.read()\n privkey = rsa.PrivateKey.load_pkcs1(p)\n\n signature = rsa.sign(data, privkey, 'SHA-1')\n signature_encode = quote(base64.b64encode(signature),'')\n return signature_encode\n\n# def params_sign_0(data):\n# key = RSA.importKey(settings.PRIVATE_KEY)\n# h = SHA.new(data)\n# signer = PKCS1_v1_5.new(key)\n# signature = signer.sign(h)\n# return base64.b64encode(signature)\n\n# def params_sign_1(data):\n# key = load_privatekey(FILETYPE_PEM, open(\"alipay/rsa_private_key.pem\").read())\n# d = sign(key, data, 'sha1') #d为经过SHA1算法进行摘要、使用私钥进行签名之后的数据 \n# b = base64.b64encode(d) #将d转换为BASE64的格式\n# return b\n\ndef params_decrypt(data):\n with open('thirdparty/alipay/rsa_private_key.pem') as privatefile:\n p = privatefile.read()\n privkey = rsa.PrivateKey.load_pkcs1(p)\n res_data = rsa.decrypt(base64.b64decode(data), privkey)\n return res_data\n\ndef params_verify(data, signature):\n with open('thirdparty/alipay/alipay_public_key.pem') as publicfile:\n p = publicfile.read()\n publickey = rsa.PublicKey.load_pkcs1_openssl_pem(p)\n res = rsa.verify(data,base64.b64decode(signature),publickey)\n return res \n\n# 即时到账交易接口\ndef mobile_sdk_pay(tn, subject, body, total_fee, notify_url=settings.ALIPAY_NOTIFY_URL):\n params = {}\n params['service'] = 'mobile.securitypay.pay'\n # 获取配置文件\n params['partner'] = settings.ALIPAY_PARTNER\n params['notify_url'] = quote(notify_url,'')\n params['_input_charset'] = settings.ALIPAY_INPUT_CHARSET\n params['seller_id'] = settings.ALIPAY_SELLER_EMAIL #卖家支付宝账号\n # 从订单数据中动态获取到的必填参数\n params['out_trade_no'] = tn # 订单号\n params['subject'] = subject # 订单名称,显示在支付宝收银台里的“商品名称”里,显示在支付宝的交易管理的“商品名称”的列表里。\n params['payment_type'] = '1'\n params['body'] = body # 订单描述、订单详细、订单备注,显示在支付宝收银台里的“商品描述”里\n params['total_fee'] = total_fee # 订单总金额,显示在支付宝收银台里的“应付总额”里\n \n params, prestr = params_filter(params)\n \n params['sign'] = params_sign_rsa(prestr)\n params['sign_type'] = settings.ALIPAY_SIGN_TYPE\n\n prestr = prestr + '&sign=\"%s\"'%params['sign'] + '&sign_type=\"%s\"'%params['sign_type']\n print prestr,\" prestr\\n\"\n return prestr\n\ndef mobile_sdk_repayment(tn, subject, body, total_fee, notify_url=settings.ALIPAY_REPAYMENT_NOTIFY_URL):\n ''' 还款参数 '''\n return mobile_sdk_pay(tn, subject, body, total_fee, notify_url)\n\n\ndef mobile_web_pay(tn, subject, body, total_fee):\n params = {}\n params['service'] = 'alipay.wap.trade.create.direct'\n # 获取配置文件\n params['partner'] = settings.ALIPAY_PARTNER\n params['format'] = 'xml'\n params['v'] = '2.0'\n params['req_id'] = tn #请求号\n params['sec_id'] = 'MD5'\n params['_input_charset'] = settings.ALIPAY_INPUT_CHARSET\n\n params['req_data'] = \"<direct_trade_create_req><notify_url>\" + settings.ALIPAY_NOTIFY_URL + \"</notify_url><call_back_url>\" + settings.ALIPAY_RETURN_URL + \"</call_back_url><seller_account_name>\" + settings.ALIPAY_SELLER_EMAIL + \"</seller_account_name><out_trade_no>\" + tn + \"</out_trade_no><subject>\" + subject + \"</subject><total_fee>\" + str(total_fee) + \"</total_fee><merchant_url>\" + settings.ALIPAY_RETURN_URL + \"</merchant_url></direct_trade_create_req>\";\n\n params['subject'] = subject # 订单名称,显示在支付宝收银台里的“商品名称”里,显示在支付宝的交易管理的“商品名称”的列表里。\n params['out_trade_no'] = tn # 订单号\n params['total_fee'] = total_fee # 订单总金额,显示在支付宝收银台里的“应付总额”里\n params['seller_account_name'] = settings.ALIPAY_SELLER_EMAIL\n params['seller_email'] = settings.ALIPAY_SELLER_EMAIL\n #params['call_back_url'] = 'http://restapi.meiya.me/alipay/web/CallBack'\n params['call_back_url'] = 'http://xtestapi.meiya.me/alipay/web/success/CallBack'\n params['notify_url'] = settings.ALIPAY_NOTIFY_URL\n #params['merchant_url'] = settings.ALIPAY_RETURN_URL\n params['pay_expire'] = '300'\n \n params, prestr = params_filter_web(params)\n params['sign'] = params_sign_md5(prestr, settings.ALIPAY_KEY)\n \n params_data = urllib.urlencode(params)\n request = urllib2.Request(url=settings.WEB_GATEWAY, data=params_data)\n result_data = urllib2.urlopen(request)\n result = result_data.read()\n result = urllib.unquote(result).decode('utf8')\n res_data = result.split('>')\n print result\n request_token = ''\n for s in res_data:\n if '</request_token' in s:\n request_token = s.split('<')[0]\n if request_token:\n params['req_data'] = '<auth_and_execute_req><request_token>' + request_token + '</request_token></auth_and_execute_req>'\n params['request_token'] = request_token\n params['service'] = 'alipay.wap.auth.authAndExecute'\n params, prestr = params_filter_web(params) \n params['sign'] = params_sign_md5(prestr, settings.ALIPAY_KEY)\n url = settings.WEB_GATEWAY + prestr + '&sign=%s'%params['sign']\n return url\n else:\n return ''\n\n\ndef notify_verify(post, payment_type=None):\n if payment_type==4:\n params, prestr = params_filter(post)\n mysign = params_sign_rsa(prestr)\n if mysign != post.get('sign'):\n return False\n if payment_type==16:\n params, prestr = params_filter_web(post)\n mysign = params_sign_md5(prestr, settings.ALIPAY_KEY)\n if mysign != post.get('sign'):\n return False\n # 二级验证--查询支付宝服务器此条信息是否有效\n params = {}\n params['partner'] = settings.ALIPAY_PARTNER\n params['notify_id'] = post.get('notify_id')\n if settings.ALIPAY_TRANSPORT == 'https':\n params['service'] = 'notify_verify'\n gateway = 'https://mapi.alipay.com/gateway.do'\n else:\n gateway = 'http://notify.alipay.com/trade/notify_query.do'\n veryfy_result = urlopen(gateway, urlencode(params)).read()\n if veryfy_result.lower().strip() == 'true':\n return True\n\n return False\n\n\ndef get_batch_no():\n from util.utils import dt_obj\n from util.utils import random_no\n current_time = dt_obj.now()\n from util.utils import get_time_str_from_dt\n return get_time_str_from_dt(current_time, '%Y%m%d') + random_no()\n\n\ndef gen_format_detail(data, reason):\n ''' '2016010521001004850065282281^0.02^美分分退款\n refund_order({'2016010521001004850065282281':'0.02'}, '美分分退款')\n '''\n result = []\n for order_no in data:\n fee = data[order_no]\n result.append('{}^{}^{}'.format(order_no, fee, reason))\n return '#'.join(result)\n\n'''\n文档链接:\nhttps://doc.open.alipay.com/doc2/detail.htm?spm=0.0.0.0.bHVaSg&treeId=66&articleId=103600&docType=1\nhttps://mapi.alipay.com/gateway.do?seller_email=Jier1105%40alitest.com&batch_num=1&refund_date=2011-01-12+11%3A21%3A00&notify_url=http%3A%2F%2Fapi.test.alipay.net%2Fatinterface%2Freceive_notify.htm&sign=9b3426cac65d36f64bffbfbc6ce50549&service=refund_fastpay_by_platform_pwd&partner=2088101008267254&detail_data=2011011201037066%5E5.00%5E%D0%AD%C9%CC%CD%CB%BF%EE&sign_type=MD5&batch_no=201101120001\n'''\nfrom util.utils import dt_obj\nimport urllib\ndef refund_order(refund_data, reason):\n data = {\n 'service': 'refund_fastpay_by_platform_pwd',\n 'partner': settings.ALIPAY_PARTNER,\n '_input_charset': 'utf-8',\n 'sign_type': 'MD5',\n 'notify_url': '',\n 'seller_email': '[email protected]',\n 'seller_user_id': settings.ALIPAY_PARTNER,\n 'refund_date': str(dt_obj.now())[:19],\n 'batch_no': get_batch_no(),\n 'batch_num': '1',\n 'detail_data': gen_format_detail(refund_data, reason)\n }\n params, prestr = params_filter_web(data) \n params['sign'] = params_sign_md5(prestr, settings.ALIPAY_KEY)\n print params\n return 'https://mapi.alipay.com/gateway.do?'+urllib.urlencode(params)\n\n\ndef test_demo():\n ''' '''\n tn = '0819145412-' + str(random.randint(1000, 9999))\n subject = '拉拉啊拉拉阿拉'\n body = 'd司机好吧司机呢'\n total_fee = '0.01'\n return mobile_web_pay(tn, subject, body, total_fee)\n\n\nif __name__=='__main__':\n tn = '0819145412-' + str(random.randint(1000, 9999))\n subject = '拉拉啊拉拉阿拉'\n body = 'd司机好吧司机呢'\n total_fee = '0.01'\n mobile_web_pay(tn, subject, body, total_fee)\n # url = mobile_web_order_pay(tn, subject, body, total_fee)\n \n \n" }, { "alpha_fraction": 0.5978391170501709, "alphanum_fraction": 0.6002401113510132, "avg_line_length": 27.724138259887695, "blob_id": "b489e16f5030710bfc2c7d5b217d4f01d33d1dac", "content_id": "701e94477925b1a11ee515af4adcb1e88d6617c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1682, "license_type": "no_license", "max_line_length": 77, "num_lines": 58, "path": "/ops/notification.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import and_\n\nfrom models import db\nfrom models import Article\nfrom models import Notification\n\nfrom util.utils import dt_obj\nfrom util.utils import date_to_datetime\nfrom ops.utils import get_page\nfrom ops.utils import count_items\nfrom ops.utils import get_items\n\n\nclass NotificationService(object):\n ''' 通知 '''\n\n @staticmethod\n def create_article(title, image, desc, link):\n article = Article(title=title, image=image, desc=desc, link=link)\n db.session.add(article)\n db.session.commit()\n return article.id\n\n @staticmethod\n def send_notification(article_id, user_id):\n notification = Notification(article_id=article_id, user_id=user_id)\n db.session.add(notification)\n db.session.commit()\n return notification.id\n\n @staticmethod\n def mark_read(notification_id):\n query = and_(\n Notification.id==notification_id,\n Notification.status==0\n )\n count = Notification.query.filter(query).update({'status':1})\n db.session.commit()\n return count\n\n @staticmethod\n def count_unread(user_id):\n ''' 未读消息计数 '''\n where = and_(\n Notification.user_id==user_id,\n Notification.status==0\n )\n return count_items(Notification, where)\n\n @staticmethod\n def get_paged_notification_list(**kw):\n return get_page(Notification, {}, **kw)\n\n @staticmethod\n def get_articles_by_ids(article_ids):\n return get_items(Article, article_ids)\n" }, { "alpha_fraction": 0.6572279930114746, "alphanum_fraction": 0.695231020450592, "avg_line_length": 34.31578826904297, "blob_id": "2888460dbb0cc5f4cfcd4584c6210f1b5e7ad27a", "content_id": "979e42edce32dda345b53ff55ec5446f6efcae67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1342, "license_type": "no_license", "max_line_length": 99, "num_lines": 38, "path": "/migrations/versions/3355a78241bf_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3355a78241bf\nRevises: 4ba650ba661f\nCreate Date: 2015-12-07 14:33:58.967971\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3355a78241bf'\ndown_revision = '4ba650ba661f'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('promoter_qrcode')\n op.add_column('qrcode', sa.Column('act_type', sa.Integer(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('qrcode', 'act_type')\n op.create_table('promoter_qrcode',\n sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),\n sa.Column('ticket', mysql.VARCHAR(length=100), nullable=True),\n sa.Column('promoter_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False),\n sa.Column('qrcode_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.ForeignKeyConstraint(['promoter_id'], [u'promoter.id'], name=u'promoter_qrcode_ibfk_1'),\n sa.ForeignKeyConstraint(['qrcode_id'], [u'qrcode.id'], name=u'promoter_qrcode_ibfk_2'),\n sa.PrimaryKeyConstraint('id'),\n mysql_default_charset=u'utf8',\n mysql_engine=u'InnoDB'\n )\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5722681879997253, "alphanum_fraction": 0.573356568813324, "avg_line_length": 33.681819915771484, "blob_id": "6df490af81e51cf0ee15b764e5206568fef27092", "content_id": "e9780461f6afca10b7010896cbd2fbc442b186a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4636, "license_type": "no_license", "max_line_length": 113, "num_lines": 132, "path": "/ops/activity.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import and_\n\nfrom models import db\nfrom models import ActivityItem\nfrom models import Activity\n\nfrom util.utils import dt_obj\nfrom util.utils import date_to_datetime\nfrom ops.utils import get_page\nfrom ops.utils import count_items\nfrom ops.utils import get_items\n\n\nclass ActivityService(object):\n ''' 活动 '''\n\n @staticmethod\n def create_activity(title, desc, start, end, city_id):\n start_time = date_to_datetime(start, '%Y-%m-%d %H:%M')\n end_time = date_to_datetime(end, '%Y-%m-%d %H:%M')\n activity = Activity(title=title, desc=desc, start_time=start_time, end_time=end_time, city_id=city_id)\n db.session.add(activity)\n db.session.commit()\n return activity.id\n\n @staticmethod\n def del_item_activitys(item_id, activity_id):\n count = ActivityItem.query.filter(\n and_(\n ActivityItem.item_id==item_id,\n ActivityItem.activity_id!=activity_id\n )).delete()\n db.session.commit()\n return count\n\n @staticmethod\n def exists_activity_time(begin_time, end_time, city_id, ignore_activity_id=None):\n ''' 判断活动是否存在\n 返回:\n true 时间重复\n false 时间可以\n '''\n begin_time = date_to_datetime(begin_time[:16], '%Y-%m-%d %H:%M')\n end_time = date_to_datetime(end_time[:16], '%Y-%m-%d %H:%M')\n if not ignore_activity_id:\n all_activitys = Activity.query.filter(Activity.city_id==city_id).all()\n else:\n query = and_(\n Activity.city_id==city_id,\n Activity.id!=ignore_activity_id\n )\n all_activitys = Activity.query.filter(query).all()\n for act in all_activitys:\n if act.start_time<=begin_time<=act.end_time:\n return True\n if act.start_time<=end_time<=act.end_time:\n return True\n if end_time>act.end_time and begin_time<act.start_time:\n return True\n return False\n\n @staticmethod\n def update_activity(activity_id, **kw):\n count = Activity.query.filter(Activity.id==activity_id).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def add_activity_item(activity_id, item_id):\n activity_item = ActivityItem(activity_id=activity_id, item_id=item_id)\n db.session.add(activity_item)\n db.session.commit()\n\n @staticmethod\n def set_activity_items(activity_id, ids):\n rows = db.session.query(ActivityItem.item_id).filter(ActivityItem.activity_id==activity_id).all()\n item_ids = [ i.item_id for i in rows]\n to_del_ids = set(item_ids) - set(ids)\n to_add_ids = set(ids) - set(item_ids)\n query = and_(\n ActivityItem.activity_id==activity_id,\n ActivityItem.item_id.in_(list(to_del_ids))\n )\n if to_del_ids: ActivityItem.query.filter(query).delete(synchronize_session=False)\n db.session.commit()\n for item_id in to_add_ids:\n i = ActivityItem(item_id=item_id, activity_id=activity_id)\n db.session.add(i)\n db.session.commit()\n\n @staticmethod\n def get_activity_dict_by_id(item_id):\n activity = Activity.query.filter(Activity.id==item_id).first()\n if activity: return activity.as_dict()\n\n @staticmethod\n def rm_activity_item(activity_id, item_id):\n query = and_(\n ActivityItem.activity_id==activity_id,\n ActivityItem.item_id==item_id\n )\n count = ActivityItem.query.filter(query).delete()\n db.session.commit()\n return count\n\n @staticmethod\n def get_paged_activity_items(**kw):\n return get_page(ActivityItem, {}, **kw)\n\n @staticmethod\n def get_paged_activitys(**kw):\n return get_page(Activity, {}, **kw)\n\n @staticmethod\n def count_activitys(where=None):\n return count_items(Activity, where=where)\n\n @staticmethod\n def get_activitys_by_ids(activity_ids):\n return get_items(Activity, activity_ids)\n\n @staticmethod\n def get_current_activity():\n current_time = dt_obj.now()\n query = and_(\n Activity.start_time<current_time,\n Activity.end_time>=current_time\n )\n activity = Activity.query.filter(query).first()\n if activity: return activity.as_dict()\n \n \n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6275964379310608, "alphanum_fraction": 0.7062314748764038, "avg_line_length": 24.923076629638672, "blob_id": "7478999d767a0de9ca9d291b40a820413bc09adf", "content_id": "398dc5dbe90d9749c6d20a2ffd9bf4055cb5a78a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 674, "license_type": "no_license", "max_line_length": 119, "num_lines": 26, "path": "/migrations/versions/18e20ed0da8d_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 18e20ed0da8d\nRevises: 35c5bd051538\nCreate Date: 2016-02-05 10:42:00.617370\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '18e20ed0da8d'\ndown_revision = '35c5bd051538'\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport models\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('redpack_user_question', sa.Column('money', models.MoneyField(precision=10, scale=2), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('redpack_user_question', 'money')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6722995638847351, "alphanum_fraction": 0.6729622483253479, "avg_line_length": 40.28767013549805, "blob_id": "711324b87957e9749c62e0b7b1589454c7df0756", "content_id": "c5d5b4be78e76bb4f89289384e6436890e6b41e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3018, "license_type": "no_license", "max_line_length": 116, "num_lines": 73, "path": "/hospital/urls.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import request\nfrom flask import Blueprint, render_template, abort\nfrom jinja2 import TemplateNotFound\n\nfrom hospital.views import index\nfrom hospital.views import book_surgery\nfrom hospital.views import confirm_surgery\nfrom hospital.views import cancel_book\nfrom hospital.views import cancel_surgery\nfrom hospital.views import finish_order\nfrom hospital.views import login_post\nfrom hospital.views import login\nfrom hospital.views import get_hospital_cats\nfrom hospital.views import get_orders\nfrom hospital.views import change_passwd_post\nfrom hospital.views import change_passwd\nfrom hospital.views import search_order_list\nfrom hospital.views import home\nfrom hospital.views import cat\nfrom hospital.views import reset_passwd\nfrom hospital.views import reset_passwd_post\nfrom hospital.views import cat_items\nfrom hospital.views import logout\nfrom hospital.views import get_paged_orders\n\n\nhospital_api = Blueprint('hospital_api', __name__,\n template_folder='templates')\n\n\nhospital_api.add_url_rule('/', 'index', index)\n\nhospital_api.add_url_rule('/get_paged_orders', 'get_paged_orders', get_paged_orders)\n\n\nhospital_api.add_url_rule('/book_surgery/', 'book_surgery', book_surgery, methods=['POST', 'GET'])\nhospital_api.add_url_rule('/confirm_surgery/', 'confirm_surgery', confirm_surgery, methods=['POST', 'GET'])\n\nhospital_api.add_url_rule('/cancel_book/', 'cancel_book', cancel_book, methods=['POST', 'GET'])\nhospital_api.add_url_rule('/cancel_surgery/', 'cancel_surgery', cancel_surgery, methods=['POST', 'GET'])\n\nhospital_api.add_url_rule('/finish_order/', 'finish_order', finish_order, methods=['POST', 'GET'])\n\n\nhospital_api.add_url_rule('/login/', 'login', login)\nhospital_api.add_url_rule('/login_post/', 'login_post', login_post, methods=['POST', 'GET'])\n\n\nhospital_api.add_url_rule('/get_hospital_cats/', 'get_hospital_cats', get_hospital_cats, methods=['POST', 'GET'])\nhospital_api.add_url_rule('/get_orders/', 'get_orders', get_orders, methods=['POST', 'GET'])\n\n\nhospital_api.add_url_rule('/change_passwd_post/', 'change_passwd_post', change_passwd_post, methods=['POST', 'GET'])\n\n\nhospital_api.add_url_rule('/search_order_list/', 'search_order_list', search_order_list, methods=['POST', 'GET'])\n\nhospital_api.add_url_rule('/home/', 'home', home, methods=['POST', 'GET'])\nhospital_api.add_url_rule('/cat/', 'cat', cat, methods=['POST', 'GET'])\n\n\nhospital_api.add_url_rule('/reset_passwd/', 'reset_passwd', reset_passwd, methods=['POST', 'GET'])\n\n\nhospital_api.add_url_rule('/reset_passwd_post/', 'reset_passwd_post', reset_passwd_post, methods=['POST', 'GET'])\n\nhospital_api.add_url_rule('/cat_items/', 'cat_items', cat_items, methods=['POST', 'GET'])\n\n\n\nhospital_api.add_url_rule('/logout/', 'logout', logout, methods=['POST', 'GET'])\n\n\n\n\n" }, { "alpha_fraction": 0.5082690119743347, "alphanum_fraction": 0.5109466314315796, "avg_line_length": 32.23560333251953, "blob_id": "365d06c46b91ae8c769b05ee89deee851e44b07d", "content_id": "3c445564e3e25b262c1b7db539df8e44d728b8b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6411, "license_type": "no_license", "max_line_length": 125, "num_lines": 191, "path": "/ops/coupon.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import or_\nfrom sqlalchemy import and_\nfrom sqlalchemy import func\n\nfrom util.utils import dt_obj\nfrom util.utils import day_delta\nfrom models import db\nfrom models import UserCoupon\nfrom models import Coupon\nfrom ops.utils import get_page\nfrom ops.utils import count_items\n\n\nclass CouponService(object):\n\n @staticmethod\n def create_coupon(coupon_cat, cat_id, title, price, effective, item_id=None, sub_cat_id=None, is_trial=False, need=0):\n coupon = Coupon(\n item_id=item_id, sub_cat_id=sub_cat_id, title=title, coupon_cat=coupon_cat,\n cat_id=cat_id, price=price, effective=effective, is_trial=is_trial, need=need\n )\n db.session.add(coupon)\n db.session.commit()\n return coupon.id\n\n @staticmethod\n def update_coupon(coupon_id, **kw):\n count = Coupon.query.filter(Coupon.id==coupon_id).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def send_user_coupon(user_id, coupon_id):\n now = dt_obj.now()\n coupon = Coupon.query.filter(Coupon.id==coupon_id).first()\n\n price = coupon.price\n effective = coupon.effective\n coupon_cat = coupon.coupon_cat\n cat_id = coupon.cat_id\n sub_cat_id = coupon.sub_cat_id\n item_id = coupon.item_id\n title = coupon.title\n need = coupon.need\n is_trial = coupon.is_trial\n user_coupon = UserCoupon(\n title = title,\n is_trial = is_trial,\n remark = coupon.remark,\n user_id = user_id,\n price = price,\n need = need,\n cat_id = cat_id,\n coupon_cat = coupon_cat,\n item_id = item_id,\n sub_cat_id = sub_cat_id,\n coupon_id = coupon_id,\n end_time = now + day_delta*(effective/86400),\n create_time = now,\n )\n db.session.add(user_coupon)\n db.session.commit()\n return user_coupon.id\n\n @staticmethod\n def cat_query(cat_id_list):\n cat_query = or_()\n for cat_id in cat_id_list or []:\n tmp = and_(\n UserCoupon.cat_id==cat_id,\n UserCoupon.coupon_cat==1\n )\n cat_query.append(tmp)\n return cat_query\n\n @staticmethod\n def sub_cat_query(sub_cat_id_list):\n sub_cat_query = or_()\n for sub_cat_id in sub_cat_id_list or []:\n tmp = and_(\n UserCoupon.sub_cat_id==sub_cat_id,\n UserCoupon.coupon_cat==2\n )\n sub_cat_query.append(tmp)\n return sub_cat_query\n\n @staticmethod\n def get_user_coupon(user_coupon_id, user_id=None, item_id=None, cat_id_list=None, sub_cat_id_list=None, item_price=None):\n ''' '''\n cat_query = CouponService.cat_query(cat_id_list)\n sub_cat_query = CouponService.sub_cat_query(sub_cat_id_list) \n\n query = and_()\n if item_id:\n or_query = or_(\n UserCoupon.coupon_cat==0,\n sub_cat_query,\n cat_query,\n and_(\n UserCoupon.coupon_cat==3,\n UserCoupon.item_id==item_id\n )\n )\n else:\n or_query = None\n query.append(and_(\n UserCoupon.id==user_coupon_id,\n UserCoupon.status==0,\n UserCoupon.end_time>dt_obj.now(),\n ))\n if or_query is not None:\n query.append(or_query)\n if user_id: query.append(UserCoupon.user_id==user_id)\n where = or_(\n and_(\n query,\n UserCoupon.need==0\n ),\n and_(\n query,\n UserCoupon.need<=item_price\n )\n )\n return UserCoupon.query.filter(where).first()\n\n @staticmethod\n def get_user_coupon_by_id(user_coupon_id):\n coupon = UserCoupon.query.filter(UserCoupon.id==user_coupon_id).first()\n if coupon: return coupon.as_dict()\n\n @staticmethod\n def get_paged_user_coupons(**kw):\n return get_page(UserCoupon, {}, **kw)\n\n @staticmethod\n def count_coupon(where):\n return db.session.query(func.count(UserCoupon.id)).filter(where).scalar()\n\n @staticmethod\n def update_user_coupon_status(where, status):\n count = UserCoupon.query.filter(where).update({'status':status})\n db.session.commit()\n return count\n\n @staticmethod\n def get_paged_coupons(**kw):\n return get_page(Coupon, {}, **kw)\n\n @staticmethod\n def count(where):\n return count_items(Coupon, where)\n\n @staticmethod\n def get_coupon(coupon_id):\n ''' '''\n coupon = Coupon.query.filter(Coupon.id==coupon_id).first()\n if coupon:\n return coupon.as_dict()\n\n @staticmethod\n def get_coupon_by_ids(coupon_ids):\n ''' '''\n coupons = Coupon.query.filter(Coupon.id.in_(coupon_ids)).all()\n return [ i.as_dict() for i in coupons]\n\n @staticmethod\n def resend_user_coupon(from_id, to_id, user_coupon_id):\n ''' 转赠优惠券 '''\n query = and_(\n UserCoupon.user_id==from_id,\n UserCoupon.id==user_coupon_id,\n UserCoupon.status==0\n )\n coupon = UserCoupon.query.filter(query).first()\n assert coupon, '优惠券不存在'\n assert coupon.status==0 and coupon.end_time>=dt_obj.now(), '优惠券已使用或已过期'\n\n count = UserCoupon.query.filter(query).update({'status':1})\n db.session.commit()\n assert count, '优惠券已使用或已过期'\n\n copy_coupon = coupon.as_dict()\n copy_coupon['user_id'] = to_id\n copy_coupon['status'] = 0\n copy_coupon.pop('id')\n new_coupon = UserCoupon(**copy_coupon)\n db.session.add(new_coupon)\n db.session.commit()\n return count\n\n" }, { "alpha_fraction": 0.613382875919342, "alphanum_fraction": 0.6716232895851135, "avg_line_length": 27.821428298950195, "blob_id": "e4c93d16461550bfaad025700991eb18e5fa8e9b", "content_id": "6ee7762f36a46b60da29edd5b829808f9e382d53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 807, "license_type": "no_license", "max_line_length": 96, "num_lines": 28, "path": "/migrations/versions/3c706c57fa0_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3c706c57fa0\nRevises: 2ce138017f09\nCreate Date: 2015-12-11 16:07:38.421306\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3c706c57fa0'\ndown_revision = '2ce138017f09'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(u'trial_apply_ibfk_3', 'trial_apply', type_='foreignkey')\n op.create_foreign_key(None, 'trial_apply', 'user_coupon', ['coupon_id'], ['id'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'trial_apply', type_='foreignkey')\n op.create_foreign_key(u'trial_apply_ibfk_3', 'trial_apply', 'coupon', ['coupon_id'], ['id'])\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6464552283287048, "alphanum_fraction": 0.6893656849861145, "avg_line_length": 32.5, "blob_id": "4e72bee39cd26ba3649dfccca74e73debc6433f0", "content_id": "5a63539c9ab43015b8d4c99e666706693225a765", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1072, "license_type": "no_license", "max_line_length": 103, "num_lines": 32, "path": "/migrations/versions/2eed88b994ed_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 2eed88b994ed\nRevises: 3621ae6c4339\nCreate Date: 2015-10-31 10:12:42.506018\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2eed88b994ed'\ndown_revision = '3621ae6c4339'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('repayment', sa.Column('coupon_id', sa.Integer(), nullable=True))\n op.add_column('repayment', sa.Column('pay_method', mysql.TINYINT(display_width=1), nullable=False))\n op.create_unique_constraint(None, 'repayment', ['coupon_id'])\n op.create_foreign_key(None, 'repayment', 'user_coupon', ['coupon_id'], ['id'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'repayment', type_='foreignkey')\n op.drop_constraint(None, 'repayment', type_='unique')\n op.drop_column('repayment', 'pay_method')\n op.drop_column('repayment', 'coupon_id')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6116883158683777, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 26.5, "blob_id": "fb1c05fe450cfdeacced4b7a11ff507c7d783914", "content_id": "b8ea073ef84a3fa65312468c4ebe2d712c125626", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 770, "license_type": "no_license", "max_line_length": 89, "num_lines": 28, "path": "/migrations/versions/37878b76721_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 37878b76721\nRevises: 3107ca470fdf\nCreate Date: 2016-02-27 10:16:36.087236\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '37878b76721'\ndown_revision = '3107ca470fdf'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('school', sa.Column('pics_count', sa.Integer(), nullable=True))\n op.create_index(op.f('ix_school_pics_count'), 'school', ['pics_count'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_school_pics_count'), table_name='school')\n op.drop_column('school', 'pics_count')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5992576479911804, "alphanum_fraction": 0.6039815545082092, "avg_line_length": 32.79087448120117, "blob_id": "2a96b3f88e8ca54345536c696806d80b68be8243", "content_id": "49e0b0be977429c7d7bfbed7479554de9916b214", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9323, "license_type": "no_license", "max_line_length": 98, "num_lines": 263, "path": "/user/auth.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport datetime\n\nfrom flask import request\nfrom flask import redirect\nfrom flask import render_template\n\nfrom sqlalchemy import and_\nfrom models import ItemComment \nfrom util.utils import jsonify_response\nfrom util.utils import random_str\nfrom util.decorators import wechat_loggin_dec\nfrom util.validators import Optional\nfrom util.validators import Inputs\nfrom util.validators import MobileField\nfrom util.validators import TextField\nfrom util.validators import IdField\nfrom util.sign import sign_user\nfrom util.sign import set_cookie\nfrom util.sign import del_cookie\nfrom ops.bulks import fetch_user_refs\nfrom ops.item import ItemService\nfrom ops.user import UserService\nfrom ops.promote import PromoteService\nfrom ops.log import LogService\nfrom ops.cache import SmsCache\nfrom ops.cache import InvalidUserPasswdCache\nfrom ops.cache import InvalidUserSignupVcodeCache\nfrom ops.cache import InvalidUserResetVcodeCache\nfrom ops.comment import CommentService\nfrom constants import ResponseCode\nfrom thirdparty.sms import send_sms\nfrom thirdparty.sms import gen_vcode\nfrom thirdparty.wechat import exchange_code_for_token\nfrom settings import MAX_TODAY_PASSWD_ATTEMPT\nfrom settings import MAX_TODAY_VCODE_ATTEMPT\nfrom settings import CONTACT\n\n\nget_vcode_validator = Inputs(\n {\n 'phone' : MobileField(msg='请输入手机号'),\n }\n)\n@wechat_loggin_dec(required=False, validator=get_vcode_validator, app=True)\ndef get_vcode():\n ''' 获取验证码 '''\n phone = request.valid_data.get('phone')\n vcode = gen_vcode()\n user = UserService.get_user_by_phone(phone)\n assert user, '手机号不存在'\n\n send_sms.delay(phone, vcode)\n\n SmsCache.set_vcode(phone, vcode)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '发送成功'\n }\n return jsonify_response(result)\n\n\n\nget_reg_vcode_validator = Inputs(\n {\n 'phone' : MobileField(msg='请输入手机号'),\n }\n)\n@wechat_loggin_dec(required=False, validator=get_reg_vcode_validator, app=True)\ndef get_reg_vcode():\n ''' 获取注册验证码 '''\n phone = request.valid_data.get('phone')\n user = UserService.get_user_by_phone(phone)\n assert not user, '手机号码已存在'\n vcode = gen_vcode()\n send_sms.delay(phone, vcode)\n\n SmsCache.set_vcode(phone, vcode)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '发送成功'\n }\n return jsonify_response(result)\n\n\nreset_passwd_validator = Inputs(\n {\n 'phone' : MobileField(msg='请输入手机号'),\n 'passwd' : TextField(msg='请输入密码'),\n 'vcode' : TextField(msg='请输入验证码'),\n }\n)\n@wechat_loggin_dec(required=False, validator=reset_passwd_validator)\ndef reset_passwd():\n ''' 重置密码 '''\n phone = request.valid_data.get('phone')\n passwd = request.valid_data.get('passwd')\n vcode = request.valid_data.get('vcode')\n\n code = ResponseCode.SUCCESS\n msg = '重置成功'\n real_vcode = SmsCache.get_vcode(phone)\n print real_vcode, vcode, phone, passwd\n attempt = InvalidUserResetVcodeCache.incr(phone, 1)\n assert attempt<MAX_TODAY_VCODE_ATTEMPT+1, '今日重置密码验证码错误次数超限'\n if real_vcode==vcode:\n InvalidUserResetVcodeCache.incr(phone, -1)\n UserService.update_passwd(phone, passwd)\n SmsCache.expire_vcode(phone)\n InvalidUserPasswdCache.clear_today_counter(phone)\n else:\n code = ResponseCode.INVALID_VCODE\n msg = '验证码错误'\n result = {\n 'code' : code,\n 'msg' : msg\n }\n return jsonify_response(result)\n\n\n\nsignup_validator = Inputs(\n {\n 'phone' : MobileField(msg='请输入手机号'),\n 'passwd' : TextField(msg='请输入密码'),\n 'vcode' : TextField(msg='请输入验证码'),\n }\n)\n@wechat_loggin_dec(required=False, validator=signup_validator, app=True)\ndef signup_post():\n ''' 注册'''\n phone = request.valid_data.get('phone')\n passwd = request.valid_data.get('passwd')\n vcode = request.valid_data.get('vcode')\n\n attempt = InvalidUserSignupVcodeCache.incr(phone, 1)\n assert attempt<MAX_TODAY_VCODE_ATTEMPT+1, '今日注册验证码错误次数超限'\n real_vcode = SmsCache.get_vcode(phone)\n if real_vcode==vcode:\n InvalidUserSignupVcodeCache.incr(phone, -1)\n code = ResponseCode.SUCCESS\n msg = '注册成功'\n name = random_str(10)\n user_id = UserService.create_user(name, phone, passwd)\n SmsCache.expire_vcode(phone)\n token = sign_user(user_id)\n result = {\n 'code' : code,\n 'token' : token,\n 'msg' : msg,\n }\n if request.open_id:\n # 存在问题 退出美分分换个手机号注册 注册数加1\n PromoteService.set_wechat_user_id(request.open_id, user_id)\n print 'set_wechat_user_id', request.open_id, user_id, 'incr_promote----------<<<<<<<<'\n qrcode_user = PromoteService.get_qrcodeuser_by_open_id(request.open_id)\n if qrcode_user:\n qrcode = PromoteService.get_qrcode(qrcode_user.qrcode_id)\n print 'qrcode_user', qrcode_user.qrcode_id, 'incr_promote'\n if qrcode:\n print str(datetime.datetime.now()), request.open_id, user_id, 'incr_promote'\n PromoteService.incr_promote_reg_count(qrcode.promoter_id)\n if qrcode.act_type==9:\n PromoteService.incr_rd_reg_count(qrcode.id)\n rdcode = PromoteService.get_rd_code_by_qrcode_id(qrcode.id)\n# if rdcode.reg_count==3:\n# PromoteService.add_rd_draw_count(user_id, 2)\n else:\n print 'reg qrcode not exist', 'incr_promote------->>>>>'\n response= jsonify_response(result, with_response=True)\n set_cookie(response, 'sign_user', token, 86400*30)\n return response\n else:\n code = ResponseCode.INVALID_VCODE\n msg = '验证码错误'\n result = {\n 'code': code,\n 'msg' : msg\n }\n return jsonify_response(result)\n\n\nfrom flask import send_from_directory\n@wechat_loggin_dec(required=False, need_openid=True, app=True)\ndef signup():\n ''' 用户注册页面 '''\n #return render_template('user/reg.html')\n return send_from_directory('static/user', 'reg.html')\n\n\n@wechat_loggin_dec(required=False, need_openid=True, app=True)\ndef user_login():\n ''' 用户登录页面 '''\n return render_template('user/login.html')\n\n\nuser_login_post_validator = Inputs(\n {\n 'phone' : MobileField(msg='请输入手机号'),\n 'passwd' : TextField(min_length=1, max_length=100, msg='请输入密码'),\n }\n )\n@wechat_loggin_dec(required=False, validator=user_login_post_validator, app=True)\ndef user_login_post():\n ''' 用户登录post请求 '''\n phone = request.valid_data.get('phone')\n passwd = request.valid_data.get('passwd')\n\n token = ''\n user = UserService.get_user_by_phone(phone)\n attempt = InvalidUserPasswdCache.incr(phone, 1)\n assert attempt<MAX_TODAY_PASSWD_ATTEMPT+1, '今日密码错误次数超限,如需帮助,请联系美分分客服{}'.format(CONTACT)\n if not(user) or user.passwd!=passwd:\n code = ResponseCode.INVALID_USERNAME_OR_PASSWD\n msg = '用户名或密码错误'\n result = {\n 'code' : code,\n 'msg' : msg,\n }\n return jsonify_response(result)\n else:\n InvalidUserPasswdCache.incr(phone, -1)\n code = ResponseCode.SUCCESS\n msg = '登录成功'\n token = sign_user(user.id)\n print request.open_id, user.id , 'open-id user-id'\n if request.open_id: PromoteService.set_user_open_id(user.id, request.open_id)\n result = {\n 'code' : code,\n 'msg' : msg,\n 'token' : token\n }\n response= jsonify_response(result, with_response=True)\n set_cookie(response, 'sign_user', token, 86400*30)\n device_id = request.raw_data.get('device_id')\n if device_id: LogService.log_user_device(device_id, user.id)\n return response\n\n\n\ndef auth_wechat():\n code = request.args.get('code')\n state = request.args.get('state')\n info = exchange_code_for_token(code)\n access_token = info['access_token']\n open_id = info['openid']\n response = redirect('/user') if not state else redirect(state)\n qr_key = None\n PromoteService.log_qr_user(qr_key, open_id, -1)\n set_cookie(response, 'open_id', open_id, 86400*30)\n return response\n\n\n\ndef logout():\n try:\n response = redirect('/static/user/my-not-reg.html')\n del_cookie(response, 'sign_user')\n return response\n except:\n import traceback\n traceback.print_exc()\n return ''\n\n\n\n\n" }, { "alpha_fraction": 0.6377952694892883, "alphanum_fraction": 0.6850393414497375, "avg_line_length": 26.214284896850586, "blob_id": "7daa64cb4230cb57eb988b1e758f579672ea8beb", "content_id": "826ecd286e5d645f68c8c6b6fd9e5072ce38e35f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 93, "num_lines": 28, "path": "/migrations/versions/4cf4f86adc0c_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4cf4f86adc0c\nRevises: 3ecdf504db70\nCreate Date: 2015-11-11 11:44:50.579661\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4cf4f86adc0c'\ndown_revision = '3ecdf504db70'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('recommend_item', sa.Column('desc', sa.String(length=500), nullable=True))\n op.add_column('recommend_item', sa.Column('image', sa.String(length=300), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('recommend_item', 'image')\n op.drop_column('recommend_item', 'desc')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.625806450843811, "alphanum_fraction": 0.6806451678276062, "avg_line_length": 22.846153259277344, "blob_id": "9a79dd9a50930a918f14c6aeca1b051b7813b121", "content_id": "9d8a676577eb14a5a8092f50a93a03bf8e3b0da5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 620, "license_type": "no_license", "max_line_length": 91, "num_lines": 26, "path": "/migrations/versions/11926d4d1d71_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 11926d4d1d71\nRevises: 2ea902b5c7a3\nCreate Date: 2016-01-14 20:23:45.259966\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '11926d4d1d71'\ndown_revision = '2ea902b5c7a3'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('daily_coupon', sa.Column('title', sa.String(length=100), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('daily_coupon', 'title')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6362559199333191, "alphanum_fraction": 0.6954976320266724, "avg_line_length": 29.14285659790039, "blob_id": "78c137d45f3cd83adfed179c07dd1027e31b0faf", "content_id": "c82aa10408dc07a76432bcab46a8d377fc39632e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 844, "license_type": "no_license", "max_line_length": 113, "num_lines": 28, "path": "/migrations/versions/3107ca470fdf_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3107ca470fdf\nRevises: 57be38c1806e\nCreate Date: 2016-02-26 15:45:09.438185\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3107ca470fdf'\ndown_revision = '57be38c1806e'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('room_design_detail', sa.Column('pics_count', sa.Integer(), nullable=True))\n op.create_index(op.f('ix_room_design_detail_pics_count'), 'room_design_detail', ['pics_count'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_room_design_detail_pics_count'), table_name='room_design_detail')\n op.drop_column('room_design_detail', 'pics_count')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.65666264295578, "alphanum_fraction": 0.6938775777816772, "avg_line_length": 28.75, "blob_id": "2ac593549a31ea5d407809a1d051c1f5030b0b88", "content_id": "f328e3ca095af8bc2ebd9c46e1045fd038ca24b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 833, "license_type": "no_license", "max_line_length": 122, "num_lines": 28, "path": "/migrations/versions/314740bf50b9_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 314740bf50b9\nRevises: fe9b95bfea6\nCreate Date: 2015-11-29 12:14:02.763748\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '314740bf50b9'\ndown_revision = 'fe9b95bfea6'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('credit_use_log', sa.Column('status', mysql.TINYINT(display_width=1), nullable=False))\n op.drop_column('credit_use_log', 'cat')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('credit_use_log', sa.Column('cat', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False))\n op.drop_column('credit_use_log', 'status')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5988075137138367, "alphanum_fraction": 0.5992333889007568, "avg_line_length": 32.04225540161133, "blob_id": "b54b0c2779f598bc9132930005ad656ac2cf4e1a", "content_id": "f20bd685e552e2084929aa60a1a59bb650963aae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2348, "license_type": "no_license", "max_line_length": 130, "num_lines": 71, "path": "/ops/log.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport time\n\nfrom sqlalchemy import and_\nfrom models import db\nfrom models import PayNotifyLog\nfrom models import AlipayOrderUser\nfrom models import UserDevice\nfrom models import UserDeviceLog\n\nfrom util.utils import dt_obj\n\n\nclass LogService(object):\n ''' '''\n @staticmethod\n def log_pay_callback(pay_type, content):\n log = PayNotifyLog(pay_type=pay_type, content=content)\n db.session.add(log)\n db.session.commit()\n\n @staticmethod\n def log_alipay_buyer_email(order_no, buyer_email):\n try:\n ref = AlipayOrderUser(order_no=order_no, buyer_email=buyer_email)\n db.session.add(ref)\n db.session.commit()\n return ref.id\n except Exception as e:\n import traceback\n traceback.print_exc()\n db.session.rollback()\n\n @staticmethod\n def exist_device(device_id):\n return bool(UserDevice.query.filter(UserDevice.device_id==device_id).first())\n\n @staticmethod\n def log_device(device_id, **kw):\n exist = LogService.exist_device(device_id)\n if exist:\n kw['update_time'] = dt_obj.now()\n UserDevice.query.filter(UserDevice.device_id==device_id).update(kw)\n db.session.commit()\n else:\n device = UserDevice(device_id=device_id, **kw)\n db.session.add(device)\n db.session.commit()\n\n @staticmethod\n def exist_user_device(device_id, user_id):\n ''' '''\n query = and_(\n UserDeviceLog.device_id==device_id,\n UserDeviceLog.user_id==user_id)\n return bool(UserDeviceLog.query.filter(query).first())\n\n @staticmethod\n def update_device_user(device_id, user_id):\n count = UserDevice.query.filter(UserDevice.device_id==device_id).update({'user_id':user_id, 'update_time':dt_obj.now()})\n db.session.commit()\n return count\n \n @staticmethod\n def log_user_device(device_id, user_id):\n count = LogService.update_device_user(device_id, user_id)\n if not LogService.exist_user_device(device_id, user_id):\n log = UserDeviceLog(user_id=user_id, device_id=device_id)\n db.session.add(log)\n db.session.commit()\n return log.id\n\n\n" }, { "alpha_fraction": 0.6333853602409363, "alphanum_fraction": 0.6833073496818542, "avg_line_length": 22.740739822387695, "blob_id": "8dc98db247dc8d00c1b39d0bcf246205b4d6d781", "content_id": "d917c35e262a1a1a0f15fdd2808567ceb0296565", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 641, "license_type": "no_license", "max_line_length": 108, "num_lines": 27, "path": "/migrations/versions/55f4c256c989_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 55f4c256c989\nRevises: 10f3ed6c72ed\nCreate Date: 2015-11-27 16:58:52.410295\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '55f4c256c989'\ndown_revision = '10f3ed6c72ed'\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport models\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('order', sa.Column('total_fee', models.MoneyField(precision=10, scale=2), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('order', 'total_fee')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5616928935050964, "alphanum_fraction": 0.5646228790283203, "avg_line_length": 33.04251480102539, "blob_id": "e6a81293889492b3b14e8448d5650af661c513cf", "content_id": "fa8a2f2e58393f3fdf65f7139ee6d8ccce16cadf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18836, "license_type": "no_license", "max_line_length": 126, "num_lines": 541, "path": "/ops/promote.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import and_\nfrom sqlalchemy import or_\nfrom sqlalchemy import func\nfrom util.sqlerr import SQL_DUPLICATE\nfrom util.sqlerr import SQL_DUPLICATE_PHONE\nfrom util.utils import convert_locaton\nfrom models import db\nfrom models import Promoter\nfrom models import Qrcode\nfrom models import QrCodeUser\nfrom models import WechatLocation\nfrom models import FakeUser\nfrom models import WechatReg\nfrom models import RdUserQrcode\nfrom models import RdQrcodeUser\nfrom models import RdDrawCounter\nfrom models import RdDrawCounterLog\nfrom models import RdMoneyPrize\nfrom ops.utils import get_items\nfrom ops.utils import get_page\nfrom ops.utils import count_items\nfrom thirdparty.wechat import wechat\nfrom thirdparty.wechat import create_qrcode\nfrom thirdparty.qn import upload_img\nfrom settings import celery\n\n\nclass PromoteService(object):\n\n @staticmethod\n def log_qr_user(qr_key, open_id, status=1):\n ''' 记录从扫描二维码关注的用户 '''\n log = QrCodeUser(qrcode_id=qr_key, open_id=open_id, status=status)\n try:\n db.session.add(log)\n db.session.commit()\n return log.id\n except Exception as e:\n db.session.rollback()\n import traceback\n traceback.print_exc()\n if SQL_DUPLICATE.search(str(e)):\n print 'duplicate qrcode user...'\n data = {'qrcode_id':qr_key}\n if qr_key:\n qrcode = Qrcode.query.filter(Qrcode.id==qr_key).first()\n query = and_(\n QrCodeUser.open_id==open_id,\n QrCodeUser.qrcode_id==None\n )\n count = QrCodeUser.query.filter(query).update(data)\n db.session.commit()\n qrcode_user = QrCodeUser.query.filter(QrCodeUser.open_id==open_id).first()\n if qrcode and count and qrcode_user and qrcode_user.user_id:\n PromoteService.incr_promote_reg_count(qrcode.promoter_id)\n return count\n\n @staticmethod\n def check_user(phone, passwd):\n ''' 手机 密码 '''\n query = and_(\n Promoter.phone==phone,\n Promoter.passwd==passwd,\n Promoter.status==1\n )\n user = Promoter.query.filter(query).first()\n return user\n\n @staticmethod\n def create_promoter(phone, passwd, name, create_by=None, status=1):\n try:\n promoter = Promoter(phone=phone, status=status, passwd=passwd, name=name, create_by=create_by)\n db.session.add(promoter)\n db.session.commit()\n return promoter.id\n except Exception as e:\n db.session.rollback()\n if SQL_DUPLICATE_PHONE.search(str(e)):\n assert 0, '手机号已存在'\n\n @staticmethod\n def get_paged_promoters(**kw):\n return get_page(Promoter, {}, **kw)\n\n @staticmethod\n def count_promoters(where=None):\n return count_items(Promoter, where)\n\n @staticmethod\n def create_promoter_qrcode(promoter_id, act_type=None):\n ''' 创建推广者二维码 先返回美分分存的二维码id\n 然后下载图片 最终访问 都是用的七牛的链接\n 2016-03-02 act_type=9 创建用户拉用户二维码\n '''\n qrcode = Qrcode(promoter_id=promoter_id, act_type=act_type)\n db.session.add(qrcode)\n db.session.commit()\n return qrcode.id\n\n @staticmethod\n def get_promoter_by_phone(phone):\n ''' '''\n promoter = Promoter.query.filter(Promoter.phone==phone).first()\n if promoter: return promoter\n\n @staticmethod\n @celery.task\n def download_promoter_qrcode(qrcode_id):\n ''' 异步创建qrcode '''\n ticket, response = create_qrcode(qrcode_id)\n image_content = response.content\n image_key = 'promoter_qrcode_{}.jpg'.format(qrcode_id)\n upload_img(image_key, image_content)\n update_data = {\n 'ticket' : ticket,\n 'image' : image_key\n }\n count = Qrcode.query.filter(Qrcode.id==qrcode_id).update(update_data)\n db.session.commit()\n return count\n\n @staticmethod\n def count_promoters(where=None):\n ''' 推广人员数 '''\n return count_items(Promoter, where)\n\n @staticmethod\n def incr_promote_follow_count(promoter_id):\n ''' 推广关注人加1 '''\n count = Promoter.query.filter(Promoter.id==promoter_id).update({'follow_count':Promoter.follow_count+1})\n db.session.commit()\n return count\n\n @staticmethod\n def incr_promote_reg_count(promoter_id):\n ''' 推广注册人加1 '''\n count = Promoter.query.filter(Promoter.id==promoter_id).update({'reg_count':Promoter.reg_count+1})\n db.session.commit()\n return count\n\n @staticmethod\n def incr_promote_unfollow_count(promoter_id):\n ''' 推广取消关注人加1 '''\n count = Promoter.query.filter(Promoter.id==promoter_id).update({'unfollow_count':Promoter.unfollow_count+1})\n db.session.commit()\n return count\n\n @staticmethod\n def log_wechat_location(open_id, lng, lat):\n ''' 定位 '''\n log = WechatLocation(open_id=open_id, lng=lng, lat=lat)\n db.session.add(log)\n db.session.commit()\n return log.id\n\n @staticmethod\n def del_promoter(promoter_id):\n ''' 删除 '''\n count = Promoter.query.filter(Promoter.id==promoter_id).update({'status':0})\n db.session.commit()\n return count\n\n @staticmethod\n @celery.task\n def set_user_sex(open_id):\n ''' 获取用户性别 '''\n try:\n from thirdparty.wechat import wechat\n info = wechat.get_user_info(open_id)\n if info['subscribe']:\n sex = info['sex']\n city= info['city']\n nickname= info['nickname']\n headimgurl = info['headimgurl']\n data = {\n 'sex': sex,\n 'city': city,\n 'nickname': nickname,\n 'headimgurl': headimgurl,\n 'status': 1,\n }\n else:\n print open_id, '未关注'\n return\n count = QrCodeUser.query.filter(QrCodeUser.open_id==open_id).update(data)\n db.session.commit()\n return count\n except Exception as e:\n import traceback\n traceback.print_exc()\n wechat.refresh_wechat_token()\n\n\n @staticmethod\n def get_promoter_qrcodes_by_promoter_ids(promoter_ids):\n query = Qrcode.promoter_id.in_(promoter_ids)\n rows = Qrcode.query.filter(query).all()\n return [row.as_dict() for row in rows]\n\n @staticmethod\n def get_qrcode(qrcode_id):\n ''' '''\n return Qrcode.query.filter(Qrcode.id==qrcode_id).first()\n\n @staticmethod\n def get_qrcodeuser_by_open_id(open_id):\n return QrCodeUser.query.filter(QrCodeUser.open_id==open_id).first()\n\n @staticmethod\n def set_wechat_user_id(open_id, user_id):\n ''' 设置微信用户 美分分user_id'''\n count = QrCodeUser.query.filter(QrCodeUser.open_id==open_id).update({'user_id':user_id})\n reg = WechatReg(open_id=open_id, user_id=user_id)\n db.session.add(reg)\n db.session.commit()\n return count\n\n @staticmethod\n def add_fakeuser(user_id):\n fake_user = FakeUser(user_id=user_id)\n db.session.add(fake_user)\n db.session.commit()\n return fake_user.id\n\n @staticmethod\n def get_fakeuser_by_userid(user_id):\n ''' 判断是否是假用户 '''\n return FakeUser.query.filter(FakeUser.user_id==user_id).first()\n\n @staticmethod\n def count_promoter_admin_reg(promoter_ids):\n ''' 推广管理员手下人注册总数 '''\n query = and_(\n Promoter.create_by.in_(promoter_ids),\n )\n rows = db.session.query(\n Promoter.create_by, func.sum(Promoter.follow_count), func.sum(Promoter.reg_count),\n func.sum(Promoter.unfollow_count)). \\\n filter(query).group_by(Promoter.create_by).all()\n\n return rows\n\n\n @staticmethod\n def get_user_qrcodes_by_user_ids(user_ids):\n ''' 获取用户qrcode '''\n qrcode_users = QrCodeUser.query.filter(QrCodeUser.user_id.in_(user_ids)).all()\n return [i.as_dict() for i in qrcode_users]\n\n @staticmethod\n def get_promoter_user_id_suq(promoter_id):\n promoter_q = or_(\n Promoter.create_by==promoter_id,\n Promoter.id==promoter_id\n )\n admin_promoters = Promoter.query.filter(promoter_q).all()\n promoter_ids = [i.id for i in admin_promoters]\n qrcodes = Qrcode.query.filter(Qrcode.promoter_id.in_(promoter_ids)).all()\n \n qrcode_ids = [q.id for q in qrcodes]\n user_ids_suq = db.session.query(QrCodeUser.user_id).filter(QrCodeUser.qrcode_id.in_(qrcode_ids)).subquery()\n return user_ids_suq\n\n @staticmethod\n def get_qrcode_user_by_user_id(user_id):\n ''' '''\n qrcode_user = QrCodeUser.query.filter(QrCodeUser.user_id==user_id).first()\n return qrcode_user\n\n @staticmethod\n def get_first_location(open_id):\n ''' 首次地址 '''\n loc = WechatLocation.query.filter(WechatLocation.open_id==open_id).order_by(WechatLocation.id.asc()).first()\n if loc: return loc.as_dict()\n\n @staticmethod\n def set_first_location(open_id, lng, lat):\n try:\n lnglat = '{},{}'.format(lng, lat)\n location = convert_locaton(lnglat)\n name = location['result']['formatted_address']\n print lnglat, name\n data = {\n 'location': name,\n 'lnglat': lnglat\n }\n QrCodeUser.query.filter(QrCodeUser.open_id==open_id).update(data)\n db.session.commit()\n except Exception as e:\n import traceback\n traceback.print_exc()\n print 'convert location error...', lnglat\n\n @staticmethod\n def open_id_user_ids_suq(open_id):\n ''' '''\n query = and_()\n query.append(WechatReg.open_id==open_id)\n suq = db.session.query(WechatReg.user_id).filter(query).subquery()\n return suq\n\n @staticmethod\n def count_open_id_user_count(open_ids):\n query = WechatReg.open_id.in_(open_ids)\n result = db.session.query(WechatReg.open_id,func.count(WechatReg.id)).filter(query).group_by(WechatReg.open_id).all()\n return dict(result)\n\n @staticmethod\n def get_user_id_open_id_map(open_ids):\n rows = WechatReg.query.filter(WechatReg.open_id.in_(open_ids)).all()\n from collections import defaultdict\n data = {}\n for i in rows:\n data[i.user_id] = i.open_id\n return data\n\n @staticmethod\n def get_open_ids_by_user_ids(user_ids):\n rows = WechatReg.query.filter(WechatReg.user_id.in_(user_ids)).all()\n return [i.open_id for i in rows]\n\n @staticmethod\n def get_qrcodeusers_by_open_ids(open_ids):\n ''' '''\n join_query = and_(\n QrCodeUser.open_id.in_(open_ids),\n QrCodeUser.qrcode_id==Qrcode.id\n )\n rows = db.session.query(\n QrCodeUser.open_id, Qrcode.promoter_id).join(Qrcode, join_query).all()\n promoters = Promoter.query.all()\n promoter_parent_map = {}\n promoter_name_map = {}\n for p in promoters:\n if p.create_by:\n promoter_parent_map[p.id] = p.create_by\n promoter_name_map[p.id] = p.name\n result = {}\n for i in rows:\n open_id = i[0]\n promoter_id = i[1]\n promoter_parent_id = promoter_parent_map.get(promoter_id)\n if promoter_parent_id:\n tmp = {\n 'promoter': {'id': promoter_id, 'name': promoter_name_map[promoter_id]},\n 'parent' : {'id': promoter_parent_id, 'name': promoter_name_map[promoter_parent_id]}\n }\n result[open_id] = tmp\n return result\n\n @staticmethod\n def set_user_open_id(user_id, open_id):\n count = QrCodeUser.query.filter(QrCodeUser.open_id==open_id).update({'user_id':user_id})\n db.session.commit()\n return count\n\n @staticmethod\n def get_user_qrcode_id(user_id):\n user = QrCodeUser.query.filter(QrCodeUser.user_id==user_id).first()\n if user: return user.qrcode_id\n\n @staticmethod\n def get_qrcodeuser_by_id(qr_user_id):\n user = QrCodeUser.query.filter(QrCodeUser.id==qr_user_id).first()\n return user\n\n @staticmethod\n def set_wechat_status(open_id, status):\n '''\n 0 取消关注\n 1 已关注\n '''\n count = QrCodeUser.query.filter(QrCodeUser.open_id==open_id).update({'status':status})\n db.session.commit()\n return count\n\n @staticmethod\n def create_rd_user(user_id):\n ''' 创建分享二维码 \n 需要再建一层map\n '''\n promoter = PromoteService.get_rd_promoter()\n qrcode = Qrcode(promoter_id=promoter.id, act_type=9)\n db.session.add(qrcode)\n db.session.commit()\n PromoteService.download_promoter_qrcode.delay(qrcode.id)\n rd_user = RdUserQrcode(user_id=user_id, qrcode_id=qrcode.id)\n db.session.add(rd_user)\n db.session.commit()\n\n @staticmethod\n def get_rd_user(user_id):\n ''' '''\n return RdUserQrcode.query.filter(RdUserQrcode.user_id==user_id).first()\n \n @staticmethod\n def add_rd_draw_count(user_id, source):\n ''' 添加抽奖机会 '''\n source_map = {\n 1 : 1,\n 2 : 1,\n 3 : 3\n }\n assert source in source_map, '类型错误'\n query = and_(\n RdDrawCounterLog.user_id==user_id,\n RdDrawCounterLog.source==source\n )\n exists = RdDrawCounterLog.query.filter(query).first()\n if exists: return\n log = RdDrawCounterLog(user_id=user_id, source=source, count=source_map[source])\n db.session.add(log)\n db.session.commit()\n query = RdDrawCounter.user_id==user_id\n data = {\n 'total': RdDrawCounter.total+source_map[source]\n }\n count = RdDrawCounter.query.filter(query).update(data)\n db.session.commit()\n return count\n\n @staticmethod\n def incr_draw_used(user_id):\n ''' '''\n exist = RdDrawCounter.query.filter(RdDrawCounter.user_id==user_id).first()\n if not exist:\n counter = RdDrawCounter(user_id=user_id)\n db.session.add(counter)\n db.session.commit()\n query = and_(\n RdDrawCounter.used<RdDrawCounter.total,\n RdDrawCounter.user_id==user_id\n )\n data = {\n 'used': RdDrawCounter.used+1\n }\n count = RdDrawCounter.query.filter(query).update(data)\n db.session.commit()\n return count\n\n @staticmethod\n def count_invite_reg_count(user_id):\n ''' 邀请注册数 '''\n code = RdUserQrcode.query.filter(RdUserQrcode.user_id==user_id).first()\n if not code: return 0\n count = db.session.query(func.count(RdQrcodeUser.id)).filter(RdQrcodeUser.qrcode_id==code.qrcode_id).scalar()\n return count\n\n @staticmethod\n def incr_prized(prize_id, current_count):\n ''' 已奖励奖品 '''\n query = and_(\n RdMoneyPrize.id==prize_id,\n RdMoneyPrize.sent==RdMoneyPrize.total-current_count,\n RdMoneyPrize.sent<RdMoneyPrize.total\n )\n data = {\n 'sent': RdMoneyPrize.sent+1\n }\n count = RdMoneyPrize.query.filter(query).update(data)\n db.session.commit()\n return count\n\n @staticmethod\n def get_prize_remain(prize_id):\n ''' '''\n prize = RdMoneyPrize.query.filter(RdMoneyPrize.id==prize_id).first()\n if prize: return prize.total-prize.sent\n return 0\n\n @staticmethod\n def get_draw_logs(user_id):\n ''' 用户抽奖权限 '''\n logs = RdDrawCounterLog.query.filter(RdDrawCounterLog.user_id==user_id).all()\n privileges = dict()\n for i in logs:\n privileges[i.id] = True\n \n for i in range(1,4):\n privileges.setdefault(i, False)\n return privileges\n\n @staticmethod\n def get_user_can_draw_count(user_id):\n draw = RdDrawCounter.query.filter(RdDrawCounter.user_id==user_id).first()\n if not draw: return 0\n return draw.total - draw.used\n\n @staticmethod\n def get_reg_count(user_id):\n code = RdUserQrcode.query.filter(RdUserQrcode.user_id==user_id).first()\n if code: return code.reg_count\n return 0\n\n @staticmethod\n def incr_rd_reg_count(qrcode_id):\n query = and_(\n RdUserQrcode.qrcode_id==qrcode_id)\n data = {\n 'reg_count': RdUserQrcode.reg_count+1\n }\n count = RdUserQrcode.query.filter(query).update(data)\n db.session.commit()\n return count\n\n @staticmethod\n def incr_rd_follow_count(qrcode_id):\n query = and_(\n RdUserQrcode.qrcode_id==qrcode_id)\n data = {\n 'follow_count': RdUserQrcode.follow_count+1\n }\n count = RdUserQrcode.query.filter(query).update(data)\n db.session.commit()\n return count\n\n @staticmethod\n def get_prize_left():\n prizes = RdMoneyPrize.query.all()\n return [(i.id, i.total-i.sent) for i in prizes]\n\n @staticmethod\n def get_prize(prize_id):\n ''' '''\n prize = RdMoneyPrize.query.filter(RdMoneyPrize.id==prize_id).first()\n return prize\n\n @staticmethod\n def get_rd_promoter():\n ''' 现金抽奖推广员 '''\n promoter = Promoter.query.filter(Promoter.phone=='10000006666').first()\n return promoter\n\n @staticmethod\n def get_rd_code_by_qrcode_id(qrcode_id):\n ''' '''\n query = RdUserQrcode.qrcode_id==qrcode_id\n promote = RdUserQrcode.query.filter(query).first()\n return promote\n\n\n\n\n\n\n\n \n\n" }, { "alpha_fraction": 0.6166394948959351, "alphanum_fraction": 0.6786296963691711, "avg_line_length": 22.576923370361328, "blob_id": "6815c027e43f957a351b560bc69728e289eb32f3", "content_id": "025709ff64af338cc83f73ef8534088a0be938b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 613, "license_type": "no_license", "max_line_length": 83, "num_lines": 26, "path": "/migrations/versions/31291b2ba259_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 31291b2ba259\nRevises: 11926d4d1d71\nCreate Date: 2016-01-25 15:00:51.869707\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '31291b2ba259'\ndown_revision = '11926d4d1d71'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('item_cat', sa.Column('sort_order', sa.Integer(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('item_cat', 'sort_order')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5877431035041809, "alphanum_fraction": 0.5886476635932922, "avg_line_length": 30.105634689331055, "blob_id": "d8acfcfd83caec2adb89eeec91001a4c3b817c78", "content_id": "bc1b260712755feec0a4924bee5c4045ef0c6bac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4422, "license_type": "no_license", "max_line_length": 100, "num_lines": 142, "path": "/ops/beauty_tutorial.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import and_\nfrom sqlalchemy import func\nfrom models import db\nfrom models import RecommendBeautyItem\nfrom models import BeautyEntry\nfrom models import DailyUser\nfrom models import DailyCoupon\nfrom ops.utils import get_page\nfrom ops.utils import get_items\nfrom ops.utils import count_items\nfrom util.utils import format_rate\n\n\nclass TutorialService(object):\n\n @staticmethod\n def create_tutorial_entry(title, icon, image, photo, items):\n ''' '''\n entry = BeautyEntry(title=title, icon=icon, image=image, photo=photo, items=items)\n db.session.add(entry)\n db.session.commit()\n return entry.id\n\n @staticmethod\n def update_tutorial_entry(item_id, **kw):\n count = BeautyEntry.query.filter(BeautyEntry.id==item_id).update(kw)\n db.session.commit()\n db.session.commit()\n\n @staticmethod\n def set_tutorial_status(item_id, status):\n count = BeautyEntry.query.filter(BeautyEntry.id==item_id).update({'status':status})\n db.session.commit()\n return count\n\n @staticmethod\n def get_daily_coupon(daily_id):\n ''' '''\n daily = DailyCoupon.query.filter(DailyCoupon.id==daily_id).first()\n if daily: return daily.as_dict()\n\n @staticmethod\n def create_daily_coupon(title, coupon_id, start_time, end_time, total, use_time, use_condition):\n daily = DailyCoupon(\n title=title,\n coupon_id=coupon_id,\n start_time=start_time,\n end_time=end_time,\n total=total,\n use_time=use_time,\n use_condition=use_condition\n )\n db.session.add(daily)\n db.session.commit()\n return daily.id\n\n @staticmethod\n def update_daily_coupon(daily_id, **kw):\n ''' '''\n count = DailyCoupon.query.filter(DailyCoupon.id==daily_id).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def get_paged_tutorial_entries(**kw):\n return get_page(BeautyEntry, {}, **kw)\n\n @staticmethod\n def get_paged_daily_coupons(**kw):\n return get_page(DailyCoupon, {}, **kw)\n\n @staticmethod\n def count_daily_coupons(where=None):\n return count_items(DailyCoupon, where=where)\n\n @staticmethod\n def count_tutorials(where=None):\n return count_items(BeautyEntry, where=where)\n\n @staticmethod\n def get_tutorial(item_id):\n tutorial = BeautyEntry.query.filter(BeautyEntry.id==item_id).first()\n if tutorial: return tutorial.as_dict()\n\n @staticmethod\n def send_daily_coupon(user_id, daily_id):\n ''' '''\n try:\n log = DailyUser(user_id=user_id, daily_id=daily_id)\n db.session.add(log)\n db.session.commit()\n return log.id\n except Exception as e:\n import traceback\n traceback.print_exc()\n db.session.rollback()\n\n @staticmethod\n def get_user_daily_by_ids(user_id, daily_ids):\n query = and_(\n DailyUser.user_id==user_id,\n DailyUser.daily_id.in_(daily_ids)\n )\n rows = DailyUser.query.filter(query).all()\n return { i.daily_id:i.user_id for i in rows}\n\n @staticmethod\n def get_user_daily(user_id, daily_id):\n query = and_(\n DailyUser.user_id==user_id,\n DailyUser.daily_id==daily_id\n )\n daily = DailyUser.query.filter(query).first()\n if daily: return daily\n\n @staticmethod\n def incr_daily_coupon_received(daily_id):\n ''' '''\n query = and_(\n DailyCoupon.id==daily_id,\n DailyCoupon.total>=DailyCoupon.sent+1\n )\n count = DailyCoupon.query.filter(query).update({'sent':DailyCoupon.sent+1})\n db.session.commit()\n return count\n\n @staticmethod\n def incr_tutorial_view_count(tutorial_id):\n count = BeautyEntry.query.filter(\n BeautyEntry.id==tutorial_id).update({\"view_count\":BeautyEntry.view_count+1})\n db.session.commit()\n return count\n\n @staticmethod\n def get_daily_user_ids(**kw):\n return get_page(DailyUser, {}, **kw)\n\n @staticmethod\n def count_daily_users(where):\n return count_items(DailyUser, where)\n\n\n\n\n\n" }, { "alpha_fraction": 0.6231292486190796, "alphanum_fraction": 0.6639455556869507, "avg_line_length": 25.25, "blob_id": "430fc8ff08a4b4054cff81738cda5b37e07f5de9", "content_id": "a38765d77dc976757f68efae81d5bf89ff13f57d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 735, "license_type": "no_license", "max_line_length": 80, "num_lines": 28, "path": "/migrations/versions/35c5bd051538_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 35c5bd051538\nRevises: 456050d473e\nCreate Date: 2016-02-02 14:29:50.529374\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '35c5bd051538'\ndown_revision = '456050d473e'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('activity', sa.Column('city_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'activity', 'city', ['city_id'], ['id'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'activity', type_='foreignkey')\n op.drop_column('activity', 'city_id')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.48929503560066223, "alphanum_fraction": 0.5018276572227478, "avg_line_length": 31.913793563842773, "blob_id": "82f8d80b78babbe8dbe709d9a619f023e919cafd", "content_id": "12f0845a75afb1fce81c7cc29057523639fe06df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2015, "license_type": "no_license", "max_line_length": 105, "num_lines": 58, "path": "/static/admin/js/amap.js", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\n\n\n function addMarker(lnglatXY) {\n map.clearMap();\n marker = new AMap.Marker({\n icon: \"http://webapi.amap.com/images/marker_sprite.png\",\n position: lnglatXY\n });\n marker.setMap(map);\n map.setCenter(new AMap.LngLat(lnglatXY[0], lnglatXY[1]));\n return marker\n }\n\n function init_map() {\n var map = new AMap.Map('mapContainer', {\n // 设置中心点\n //center: [116.397428, 39.90923],\n \n // 设置缩放级别\n zoom: 12\n });\n //在地图中添加ToolBar插件\n map.plugin([\"AMap.ToolBar\"], function () {\n toolBar = new AMap.ToolBar();\n map.addControl(toolBar);\n });\n window.map = map;\n \n function geocoder_CallBack(data) {\n window.data = data;\n var resultStr = \"\";\n var poiinfo = \"\";\n var address;\n //返回地址描述\n address = data.regeocode.formattedAddress;\n console.log(address);\n document.getElementById(\"addr\").value = address;\n }\n \n //为地图注册click事件获取鼠标点击出的经纬度坐标\n var clickEventListener = map.on( 'click', function(e) {\n document.getElementById(\"lngX\").value = e.lnglat.getLng();\n document.getElementById(\"latY\").value = e.lnglat.getLat();\n \n AMap.service([\"AMap.Geocoder\"], function() {\n window.MGeocoder = new AMap.Geocoder({\n radius: 1000,\n extensions: \"all\"\n });\n });\n //逆地理编码\n var lnglatXY = [document.getElementById(\"lngX\").value, document.getElementById(\"latY\").value]\n addMarker(lnglatXY);\n MGeocoder.getAddress(lnglatXY, function(status, result) {\n if (status === 'complete' && result.info === 'OK') {\n geocoder_CallBack(result);\n }\n })\n });\n }" }, { "alpha_fraction": 0.637982189655304, "alphanum_fraction": 0.7047477960586548, "avg_line_length": 24.923076629638672, "blob_id": "608eb67a4b46c6dbae3326c9debc08131a9bc5c3", "content_id": "574a373a7f54f52d62c783c6c168e0df8fbc51f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 674, "license_type": "no_license", "max_line_length": 98, "num_lines": 26, "path": "/migrations/versions/3c12ca43b1ba_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3c12ca43b1ba\nRevises: 57366d94ca9a\nCreate Date: 2015-12-30 16:15:19.929785\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3c12ca43b1ba'\ndown_revision = '57366d94ca9a'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('hospital', sa.Column('status', mysql.TINYINT(display_width=1), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('hospital', 'status')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6453251242637634, "alphanum_fraction": 0.660429060459137, "avg_line_length": 46.05345916748047, "blob_id": "35ebefa9b78f4def8f9def6f53aafc154820993f", "content_id": "8585c148b6be4e684bb6a2b9d42cfddda1058bcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14963, "license_type": "no_license", "max_line_length": 89, "num_lines": 318, "path": "/migrations/versions/42e923c1238_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 42e923c1238\nRevises: None\nCreate Date: 2015-10-30 16:36:01.320765\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '42e923c1238'\ndown_revision = None\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\nimport models\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('admin_user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=True),\n sa.Column('passwd', sa.String(length=100), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('city',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('coupon',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('price', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('cat', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('effective', sa.Integer(), nullable=False),\n sa.Column('remark', sa.String(length=100), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('hospital',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=True),\n sa.Column('phone', sa.String(length=100), nullable=True),\n sa.Column('tags', sa.String(length=1000), nullable=True),\n sa.Column('addr', sa.String(length=300), nullable=True),\n sa.Column('long_lat', sa.String(length=30), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('item_cat',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=True),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('pay_notify_log',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('pay_type', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('content', sa.String(length=10000), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('period_pay_choice',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('period_count', sa.Integer(), nullable=False),\n sa.Column('period_fee', sa.Float(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('period_count')\n )\n op.create_table('school',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=True),\n sa.Column('city_name', sa.String(length=100), nullable=True),\n sa.Column('link', sa.String(length=100), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=80), nullable=True),\n sa.Column('phone', sa.String(length=80), nullable=True),\n sa.Column('passwd', sa.String(length=80), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name'),\n sa.UniqueConstraint('phone')\n )\n op.create_table('credit_apply',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('name', sa.String(length=100), nullable=True),\n sa.Column('id_no', sa.String(length=18), nullable=True),\n sa.Column('school', sa.String(length=100), nullable=True),\n sa.Column('enrollment_time', sa.DateTime(), nullable=True),\n sa.Column('major', sa.String(length=100), nullable=True),\n sa.Column('stu_no', sa.String(length=20), nullable=True),\n sa.Column('stu_years', sa.Integer(), nullable=True),\n sa.Column('addr', sa.String(length=100), nullable=True),\n sa.Column('parent_contact', sa.String(length=100), nullable=True),\n sa.Column('chsi_name', sa.String(length=100), nullable=True),\n sa.Column('chsi_passwd', sa.String(length=100), nullable=True),\n sa.Column('id_card_photo', sa.String(length=100), nullable=True),\n sa.Column('stu_card_photo', sa.String(length=100), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.Column('update_time', sa.DateTime(), nullable=True),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('user_id')\n )\n op.create_table('credit_change_log',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('amount', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('item_sub_cat',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=True),\n sa.Column('desc', sa.String(length=1000), nullable=True),\n sa.Column('icon', sa.String(length=100), nullable=True),\n sa.Column('cat_id', sa.Integer(), nullable=False),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.ForeignKeyConstraint(['cat_id'], ['item_cat.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('user_advice',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('content', sa.String(length=10000), nullable=True),\n sa.Column('contact', sa.String(length=100), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('user_coupon',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('coupon_id', sa.Integer(), autoincrement=False, nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('cat', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('price', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('end_time', sa.DateTime(), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=False),\n sa.Column('remark', sa.String(length=100), nullable=True),\n sa.ForeignKeyConstraint(['coupon_id'], ['coupon.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('user_credit',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('total', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('used', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('wechat',\n sa.Column('open_id', sa.String(length=32), autoincrement=False, nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('open_id')\n )\n op.create_table('item',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('orig_price', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('price', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('sub_cat_id', sa.Integer(), nullable=False),\n sa.Column('hospital_id', sa.Integer(), nullable=False),\n sa.Column('photos', sa.String(length=1000), nullable=True),\n sa.Column('title', sa.String(length=500), nullable=True),\n sa.Column('item_no', sa.String(length=50), nullable=True),\n sa.Column('support_choices', sa.String(length=50), nullable=True),\n sa.Column('sold_count', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['hospital_id'], ['hospital.id'], ),\n sa.ForeignKeyConstraint(['sub_cat_id'], ['item_sub_cat.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('item_comment',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('item_id', sa.Integer(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('photos', sa.String(length=1000), nullable=True),\n sa.Column('content', sa.String(length=10000), nullable=True),\n sa.Column('rate', sa.Float(), nullable=True),\n sa.Column('is_anonymous', sa.Boolean(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['item_id'], ['item.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('item_fav',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('item_id', sa.Integer(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['item_id'], ['item.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('user_id', 'item_id')\n )\n op.create_table('order',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('pay_method', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('item_id', sa.Integer(), nullable=False),\n sa.Column('order_no', sa.String(length=30), nullable=True),\n sa.Column('credit_choice_id', sa.Integer(), nullable=True),\n sa.Column('coupon_id', sa.Integer(), nullable=True),\n sa.Column('coupon_amount', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('credit_amount', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('price', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('total', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('credit_verified', mysql.TINYINT(display_width=1), nullable=False),\n sa.ForeignKeyConstraint(['coupon_id'], ['user_coupon.id'], ),\n sa.ForeignKeyConstraint(['credit_choice_id'], ['period_pay_choice.id'], ),\n sa.ForeignKeyConstraint(['item_id'], ['item.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('coupon_id'),\n sa.UniqueConstraint('order_no')\n )\n op.create_table('credit_use_log',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('amount', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('order_id', sa.Integer(), nullable=True),\n sa.Column('cat', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['order_id'], ['order.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('order_log',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('order_id', sa.Integer(), nullable=True),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('remark', sa.String(length=100), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['order_id'], ['order.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('period_pay_log',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('amount', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('fee', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('punish', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('order_id', sa.Integer(), nullable=True),\n sa.Column('period_pay_index', sa.Integer(), nullable=True),\n sa.Column('period_count', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.Column('deadline', sa.DateTime(), nullable=True),\n sa.Column('repayment_time', sa.DateTime(), nullable=True),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.ForeignKeyConstraint(['order_id'], ['order.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('service_code',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('order_id', sa.Integer(), nullable=True),\n sa.Column('code', sa.String(length=100), nullable=True),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['order_id'], ['order.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('order_id')\n )\n op.create_index(op.f('ix_service_code_code'), 'service_code', ['code'], unique=True)\n op.create_table('punish_log',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('log_id', sa.Integer(), nullable=True),\n sa.Column('amount', models.MoneyField(precision=10, scale=2), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['log_id'], ['period_pay_log.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('punish_log')\n op.drop_index(op.f('ix_service_code_code'), table_name='service_code')\n op.drop_table('service_code')\n op.drop_table('period_pay_log')\n op.drop_table('order_log')\n op.drop_table('credit_use_log')\n op.drop_table('order')\n op.drop_table('item_fav')\n op.drop_table('item_comment')\n op.drop_table('item')\n op.drop_table('wechat')\n op.drop_table('user_credit')\n op.drop_table('user_coupon')\n op.drop_table('user_advice')\n op.drop_table('item_sub_cat')\n op.drop_table('credit_change_log')\n op.drop_table('credit_apply')\n op.drop_table('user')\n op.drop_table('school')\n op.drop_table('period_pay_choice')\n op.drop_table('pay_notify_log')\n op.drop_table('item_cat')\n op.drop_table('hospital')\n op.drop_table('coupon')\n op.drop_table('city')\n op.drop_table('admin_user')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6265664100646973, "alphanum_fraction": 0.6597744226455688, "avg_line_length": 38.900001525878906, "blob_id": "83463ed4be59d38de4c6429e3c5d95afcbece1eb", "content_id": "522518afd174dcfc5e857dd26d7b36fbaaff1ab8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1596, "license_type": "no_license", "max_line_length": 117, "num_lines": 40, "path": "/migrations/versions/4e224649d340_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4e224649d340\nRevises: 4db11f101899\nCreate Date: 2015-12-09 15:44:31.540294\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4e224649d340'\ndown_revision = '4db11f101899'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('coupon', sa.Column('item_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'coupon', 'item', ['item_id'], ['id'])\n op.add_column('trial', sa.Column('coupon_id', sa.Integer(), nullable=True))\n op.drop_constraint(u'trial_ibfk_1', 'trial', type_='foreignkey')\n op.create_foreign_key(None, 'trial', 'coupon', ['coupon_id'], ['id'])\n op.drop_column('trial', 'item_id')\n op.add_column('user_coupon', sa.Column('item_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'user_coupon', 'item', ['item_id'], ['id'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'user_coupon', type_='foreignkey')\n op.drop_column('user_coupon', 'item_id')\n op.add_column('trial', sa.Column('item_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))\n op.drop_constraint(None, 'trial', type_='foreignkey')\n op.create_foreign_key(u'trial_ibfk_1', 'trial', 'item', ['item_id'], ['id'])\n op.drop_column('trial', 'coupon_id')\n op.drop_constraint(None, 'coupon', type_='foreignkey')\n op.drop_column('coupon', 'item_id')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.7132681012153625, "alphanum_fraction": 0.7134119868278503, "avg_line_length": 46.745704650878906, "blob_id": "a076af7a09cdb2eb83bdb8a8f8c4659b23632a99", "content_id": "9ffbaa7b6e6bc4ca73bedc1d0408a52065bf80b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14054, "license_type": "no_license", "max_line_length": 136, "num_lines": 291, "path": "/user/urls.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import request\nfrom flask import Blueprint, render_template, abort\nfrom jinja2 import TemplateNotFound\n\nfrom user.views import user_index\nfrom user.views import item_detail\nfrom user.views import item_list\nfrom user.views import item_filters\nfrom user.views import item_comment_list\nfrom user.views import user_fav_item\nfrom user.views import user_advice\nfrom user.views import user_order_list\nfrom user.views import order_preview\nfrom user.views import confirm_order\nfrom user.views import order_pay\nfrom user.views import repayment_pay\nfrom user.views import wx_pay_callback\nfrom user.views import wx_repayment_callback\nfrom user.views import uploads\nfrom user.views import order_detail\nfrom user.views import comment_post\nfrom user.views import my_period_bill\nfrom user.views import user_home\nfrom user.views import my_repayments\nfrom user.views import item_cats\nfrom user.views import my_favs\nfrom user.views import my_coupons\nfrom user.views import my_apply\nfrom user.views import help\nfrom user.views import repayment\nfrom user.views import get_help_entry\nfrom user.views import apply_credit_page\nfrom user.views import project_doctor_description\nfrom user.views import get_jssdk_js\nfrom user.views import get_school_list\nfrom user.views import hospital_detail\nfrom user.views import get_city_list\nfrom user.views import upload_image\nfrom user.views import apply_credit_post\nfrom user.views import apply_credit_photo\nfrom user.views import edit_name\nfrom user.views import my_item_comment_list\nfrom user.views import item_list_html\nfrom user.views import menu_credit_apply\nfrom user.views import my_order_bill\nfrom user.views import hospital_item_list\nfrom user.views import order_pay_success\nfrom user.views import repayment_pay_success\nfrom user.views import cancel_pay\nfrom user.views import cancel_order\nfrom user.views import finish_order\nfrom user.views import hospital_location\nfrom user.views import meifenfen_index\nfrom user.views import meifenfen_city\nfrom user.views import help_html\nfrom user.views import hospital_list_html\nfrom user.views import hospital_list\nfrom user.views import mei_tutorials\nfrom user.views import daily_coupons\nfrom user.views import tutorial_detail\nfrom user.views import receive_coupon\nfrom user.views import meifenfen_new_index\nfrom user.views import hospital_filters\nfrom user.views import resend_user_coupon\n\nfrom user.auth import get_reg_vcode\nfrom user.auth import signup_post\nfrom user.auth import get_vcode\nfrom user.auth import reset_passwd\nfrom user.auth import signup\nfrom user.auth import user_login\nfrom user.auth import user_login_post\nfrom user.auth import auth_wechat\nfrom user.auth import logout\n\n\nuser_api = Blueprint('user_api', __name__,\n template_folder='templates')\n\nuser_api.add_url_rule('/', 'user', user_index)\n\nuser_api.add_url_rule('/index/', 'meifenfen', meifenfen_new_index)\nuser_api.add_url_rule('/new/', 'meifenfen_new', meifenfen_new_index)\n\nuser_api.add_url_rule('/meifenfen_city/', 'meifenfen_city', meifenfen_city, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/item_cats/', 'item_cats', item_cats)\nuser_api.add_url_rule('/hospital_filters/', 'hospital_filters', hospital_filters)\nuser_api.add_url_rule('/item_filters/', 'item_filters', item_filters, methods=['POST', 'GET'])\nuser_api.add_url_rule('/item_list/', 'item_list', item_list, methods=['POST', 'GET'])\nuser_api.add_url_rule('/hospital_item_list/', 'hospital_item_list', hospital_item_list, methods=['POST', 'GET'])\nuser_api.add_url_rule('/item_list.html', 'item_list_html', item_list_html)\nuser_api.add_url_rule('/hospital_list.html', 'hospital_list_html', hospital_list_html)\nuser_api.add_url_rule('/hospital_list/', 'hospital_list', hospital_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/help.html', 'help_html', help_html)\n\n\nuser_api.add_url_rule('/item_detail/', 'item_detail', item_detail, methods=['POST', 'GET'])\nuser_api.add_url_rule('/hospital_detail/', 'hospital_detail', hospital_detail, methods=['POST', 'GET'])\nuser_api.add_url_rule('/hospital_location/', 'hospital_location', hospital_location, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/comment_list/', 'item_comment_list', item_comment_list, methods=['POST', 'GET'])\nuser_api.add_url_rule('/my_item_comment_list/', 'my_item_comment_list', my_item_comment_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/comment_post/', 'item_comment_post', comment_post, methods=['POST', 'GET'])\n\n#申请额度\nuser_api.add_url_rule('/apply_credit/', 'apply_credit', user_login_post, methods=['POST', 'GET']) #申请第一步\nuser_api.add_url_rule('/apply_credit_post/', 'apply_credit_post', apply_credit_post, methods=['POST', 'GET'])\nuser_api.add_url_rule('/apply_photo/', 'apply_photo', user_login_post, methods=['POST', 'GET']) #申请第二步\n#user_api.add_url_rule('/apply_photo_post/', 'apply_photo_post', apply_credit_photo, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/order_preview/', 'order_preview', order_preview, methods=['POST', 'GET'])\nuser_api.add_url_rule('/confirm_order/', 'confirm_order', confirm_order, methods=['POST', 'GET'])\nuser_api.add_url_rule('/order_pay/', 'order_pay', order_pay, methods=['POST', 'GET'])\nuser_api.add_url_rule('/repayment_pay/', 'repayment_pay', repayment_pay, methods=['POST', 'GET']) #选择支付方式 还款\nuser_api.add_url_rule('/order_pay_post/', 'order_pay_post', user_login_post, methods=['POST', 'GET'])\nuser_api.add_url_rule('/wx_pay_callback/', 'wx_pay_callback', wx_pay_callback, methods=['POST', 'GET']) #微信支付回调\nuser_api.add_url_rule('/wx_repayment_callback/', 'wx_repayment_callback', wx_repayment_callback, methods=['POST', 'GET']) #微信还款回调\n\n#上传图片\nuser_api.add_url_rule('/uploads/', 'uploads', uploads, methods=['POST', 'GET']) #上传图片 \n\n#个人接口\nuser_api.add_url_rule('/home/', 'user_home', user_home, methods=['POST', 'GET'])\nuser_api.add_url_rule('/edit_profile/', 'edit_profile', user_login_post, methods=['POST', 'GET'])\nuser_api.add_url_rule('/my_period_bill/', 'my_period_bill', my_period_bill, methods=['POST', 'GET']) #当期账单\nuser_api.add_url_rule('/my_repayments/', 'my_repayments', my_repayments, methods=['POST', 'GET']) #还款历史\nuser_api.add_url_rule('/my_order_bill/', 'my_order_bill', my_order_bill, methods=['POST', 'GET']) #订单每期账单\n\nuser_api.add_url_rule('/my_apply/', 'my_apply', my_apply, methods=['POST', 'GET']) #审核进度\nuser_api.add_url_rule('/my_orders/', 'my_orders', user_order_list, methods=['POST', 'GET']) #\nuser_api.add_url_rule('/my_coupons/', 'my_coupons', my_coupons, methods=['POST', 'GET']) #\nuser_api.add_url_rule('/order_detail/', 'order_detail', order_detail, methods=['POST', 'GET']) #\nuser_api.add_url_rule('/cancel_order/', 'cancel_order', cancel_order, methods=['POST', 'GET']) #\nuser_api.add_url_rule('/cancel_pay/', 'cancel_pay', cancel_pay, methods=['POST', 'GET']) #\nuser_api.add_url_rule('/my_favs/', 'my_favs', my_favs, methods=['POST', 'GET']) #我的心愿单\nuser_api.add_url_rule('/fav_item/', 'fav_item', user_fav_item, methods=['POST', 'GET']) #我的心愿单\n\n\nuser_api.add_url_rule('/login/', 'user_logn', user_login)\nuser_api.add_url_rule('/login_post/', 'user_login_post', user_login_post, methods=['POST'])\nuser_api.add_url_rule('/get_vcode/', 'get_vcode', get_vcode, methods=['POST', 'GET'])\nuser_api.add_url_rule('/get_reg_vcode/', 'get_reg_vcode', get_reg_vcode, methods=['POST', 'GET'])\nuser_api.add_url_rule('/signup/', 'signup', signup, methods=['GET'])\nuser_api.add_url_rule('/signup_post/', 'signup_post', signup_post, methods=['POST', 'GET'])\nuser_api.add_url_rule('/reset_passwd/', 'reset_passwd', reset_passwd, methods=['POST', 'GET'])\nuser_api.add_url_rule('/logout/', 'logout', logout, methods=['POST', 'GET'])\nuser_api.add_url_rule('/auth', 'auth_wechat', auth_wechat, methods=['POST', 'GET'])\n\n#帮助\nuser_api.add_url_rule('/help/', 'help', help, methods=['POST', 'GET'])\nuser_api.add_url_rule('/get_help_entry/', 'get_help_entry', get_help_entry, methods=['POST', 'GET'])\nuser_api.add_url_rule('/advice/', 'advice', user_advice, methods=['POST', 'GET'])\n\n\nuser_api.add_url_rule('/apply_credit_page/', 'apply_credit_page', apply_credit_page, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/project_doctor_description/', 'project_doctor_description', project_doctor_description, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/jssdk.js', 'get_jssdk_js', get_jssdk_js, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/get_school_list/', 'get_school_list', get_school_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/repayment/', 'repayment', repayment, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/get_city_list/', 'get_city_list', get_city_list, methods=['POST', 'GET'])\n\n\nuser_api.add_url_rule('/upload_image/', 'upload_image', upload_image, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/edit_name/', 'edit_name', edit_name, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/menu_credit_apply/', 'menu_credit_apply', menu_credit_apply, methods=['POST', 'GET'])\n\n\nuser_api.add_url_rule('/order_pay_success/', 'order_pay_success', order_pay_success)\nuser_api.add_url_rule('/repayment_pay_success/', 'repayment_pay_success', repayment_pay_success)\n\nuser_api.add_url_rule('/finish_order/', 'finish_order', finish_order, methods=['POST', 'GET'])\n\n\n\nfrom user.trial import trial_list\nfrom user.trial import my_trial_list\nfrom user.trial import comment_trial\nfrom user.trial import apply_trial\nfrom user.trial import trial_applyers\nfrom user.trial import trial_comment_list\n\nfrom user.trial import get_trial_detail\nfrom user.trial import get_history_apply\n\n#试用\nuser_api.add_url_rule('/trial_list/', 'trial_list', trial_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/my_trial_list/', 'my_trial_list', my_trial_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/comment_trial/', 'comment_trial', comment_trial, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/apply_trial/', 'apply_trial', apply_trial, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/trial_applyers/', 'trial_applyers', trial_applyers, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/trial_comment_list/', 'trial_comment_list', trial_comment_list, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/get_trial_detail/', 'get_trial_detail', get_trial_detail, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/get_history_apply/', 'get_history_apply', get_history_apply, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/mei_tutorials/', 'mei_tutorials', mei_tutorials, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/daily_coupons/', 'daily_coupons', daily_coupons, methods=['POST','GET'])\n\nuser_api.add_url_rule('/tutorial_detail/', 'tutorial_detail', tutorial_detail, methods=['POST','GET'])\n\nuser_api.add_url_rule('/receive_coupon/', 'receive_coupon', receive_coupon, methods=['POST','GET'])\n\nuser_api.add_url_rule('/resend_user_coupon/', 'resend_user_coupon', resend_user_coupon, methods=['POST','GET'])\n\n\nfrom user.room_design import get_room_detail\nfrom user.room_design import apply_room\nfrom user.room_design import room_list\nfrom user.room_design import vote_room\nfrom user.room_design import room_search\nfrom user.room_design import room_index\nfrom user.room_design import add_room_pics\nfrom user.room_design import school_rooms\nfrom user.room_design import get_vote_priviledges\nfrom user.room_design import room_about\n\nuser_api.add_url_rule('/get_room_detail/', 'get_room_detail', get_room_detail, methods=['POST','GET'])\nuser_api.add_url_rule('/apply_room/', 'apply_room', apply_room, methods=['POST','GET'])\nuser_api.add_url_rule('/vote_room/', 'vote_room', vote_room, methods=['POST','GET'])\nuser_api.add_url_rule('/room_list/', 'room_list', room_list, methods=['POST','GET'])\nuser_api.add_url_rule('/room_search/', 'room_search', room_search, methods=['POST','GET'])\nuser_api.add_url_rule('/room_index/', 'room_index', room_index, methods=['POST','GET'])\n\nuser_api.add_url_rule('/add_room_pics/', 'add_room_pics', add_room_pics, methods=['POST','GET'])\n\nuser_api.add_url_rule('/school_rooms/', 'school_rooms', school_rooms, methods=['POST','GET'])\n\nuser_api.add_url_rule('/get_vote_priviledges/', 'get_vote_priviledges', get_vote_priviledges, methods=['POST','GET'])\nuser_api.add_url_rule('/room_about/', 'room_about', room_about, methods=['POST','GET'])\n\n\n\n\nfrom user.redpack import new_question\nfrom user.redpack import new_question_post\nfrom user.redpack import my_questions\nfrom user.redpack import question_viewers\nfrom user.redpack import question_detail\nfrom user.redpack import redpack_pay\nfrom user.redpack import wx_redpack_callback\nfrom user.redpack import redpack_index\nfrom user.redpack import question_list\n\n\nuser_api.add_url_rule('/redpack_index/', 'redpack_index', redpack_index, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/my_questions/', 'my_questions', my_questions, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/new_question/', 'new_question', new_question, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/question_detail/', 'question_detail', question_detail, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/new_question_post/', 'new_question_post', new_question_post, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/redpack_pay/', 'redpack_pay', redpack_pay, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/question_viewers/', 'question_viewers', question_viewers, methods=['POST', 'GET'])\n\nuser_api.add_url_rule('/wx_redpack_callback/', 'wx_redpack_callback', wx_redpack_callback, methods=['POST','GET'])\n\nuser_api.add_url_rule('/question_list/', 'question_list', question_list, methods=['POST','GET'])\n\nfrom user.views import set_open_id\n\nuser_api.add_url_rule('/set_open_id/', 'set_open_id', set_open_id, methods=['POST','GET'])\n\n\nfrom user.draw_money import draw_index\nfrom user.draw_money import draw_money\n\nuser_api.add_url_rule('/draw_money/', 'draw_money', draw_money, methods=['POST','GET'])\nuser_api.add_url_rule('/draw_index/', 'draw_index', draw_index, methods=['POST','GET'])\n\n\n\n\n" }, { "alpha_fraction": 0.6566265225410461, "alphanum_fraction": 0.6907630562782288, "avg_line_length": 20.69565200805664, "blob_id": "0fbb9361fb1da24c7a45580a4b38610b860bdc28", "content_id": "36d74c86a0e40eb62406ed2c6232ea5ff12b1f8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 534, "license_type": "no_license", "max_line_length": 127, "num_lines": 23, "path": "/tasks/__init__.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom app import app as s\nfrom settings import celery\n# \n# from celery import current_app\n# result = current_app.AsyncResult(\"12212121\")\n# result.get()\n\n\[email protected]\ndef add_one():\n print 'add one test celery....'\n\n\n\[email protected]\ndef set_user(user):\n '''\n 用户 直接传model实例 当session关闭后 离线任务会报错\n DetachedInstanceError: Instance <User at 0x10641de50> is not bound to a Session; attribute refresh operation cannot proceed\n '''\n user_id = user.id\n print user.id" }, { "alpha_fraction": 0.5249085426330566, "alphanum_fraction": 0.5282859802246094, "avg_line_length": 37.96703338623047, "blob_id": "954d96159aad38c3e99fa69037abbded78540962", "content_id": "65d867446455ca2d04728399de607af28d3246ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3717, "license_type": "no_license", "max_line_length": 115, "num_lines": 91, "path": "/ops/actions.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import and_\n\nfrom models import db\nfrom models import Item\nfrom models import Hospital\nfrom models import ItemCat\nfrom models import ItemSubCat\nfrom models import Order\nfrom ops.order import OrderService\nfrom ops.coupon import CouponService\nfrom ops.credit import CreditService\n\n\n\nclass ActionService(object):\n\n @staticmethod\n def change_order_coupon(user_id, order_id, old_coupon_id, new_coupon_id):\n ''' 更换优惠券操作 '''\n old_coupon = CouponService.get_user_coupon(old_coupon_id) if old_coupon_id else 0\n new_coupon = CouponService.get_user_coupon(new_coupon_id) if new_coupon_id else 0\n assert old_coupon!=None, '旧优惠券不存在'\n assert new_coupon!=None, '新优惠券不存在'\n old_coupon_price= old_coupon.price if old_coupon else 0\n new_coupon_price= new_coupon.price if new_coupon else 0\n where = and_(\n Order.coupon_id==old_coupon_id,\n Order.id==order_id,\n Order.user_id==user_id\n )\n assert (new_coupon_price or old_coupon_price), '必须至少存在一个优惠券'\n\n order = OrderService.get_user_order(order_id, user_id)\n credit = CreditService.get_user_credit(user_id)\n\n assert (new_coupon_price or 0) <= order.total, '优惠券金额不能超过订单总额'\n data = {\n 'coupon_amount' : new_coupon_price,\n 'coupon_id' : new_coupon_id,\n 'credit_cmount' : new_credit_amount,\n 'price' : order.price,\n }\n\n success = False\n diff_price = old_coupon_price - new_coupon_price\n if diff_price<0:\n #使用了一张面值更大的优惠券\n if float(order.price)+diff_price>=0:\n data['price'] = float(order.price)+diff_price\n elif order.price>0:\n data['price'] = 0\n result = CreditService.modify_credit(user_id, (float(order.price)+diff_price), commit=False)\n elif float(order.price)+diff_price<0:\n result = CreditService.modify_credit(user_id, diff_price, commit=False)\n\n if diff_price>0:\n #使用了一张面值更小的优惠券\n #需要用更大的额度 或现金\n credit_remain = credit.total - credit.used\n if order.price:\n data['price'] = order.price+diff_price\n elif credit_remain>=diff_price:\n result = CreditService.modify_credit(user_id, diff_price, commit=False)\n data['credit_amount'] = Order.credit_amount + credit_remain\n if result:\n db.session.commit()\n success = True\n else:\n db.session.rollback()\n success = False\n else:\n data['price'] = diff_price - credit_remain\n data['credit_amount'] = Order.credit_amount + credit_remain\n\n result = CreditService.modify_credit(user_id, credit_remain, commit=False)\n if result:\n db.session.commit()\n success = True\n else:\n db.session.rollback()\n success = False\n\n if success:\n count = OrderService.update_order(where, commit=False)\n if count:\n db.session.commit()\n return success\n \n return False\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.4410722255706787, "alphanum_fraction": 0.4468320906162262, "avg_line_length": 31.474821090698242, "blob_id": "6b00f24a2aa1820178e10f324fce4db8f12c7f0b", "content_id": "4c93bd6ad3c2df2735a2af7f713fc90cebe1b106", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 4776, "license_type": "no_license", "max_line_length": 172, "num_lines": 139, "path": "/static/admin/tpl/user_detail.html", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\n<style>\n label {\n width: 100px;\n }\n</style>\n<div class='app-content'>\n\n <h1>用户详情</h1>\n\n <hr>\n\n <label>美分分用户名</label> {{item.name}}\n <br>\n <label>手机号</label> {{item.phone}}\n <br>\n <label>注册时间</label> {{item.create_time}}\n <br>\n <div ng-if='apply.id'>\n <a href='#/index/apply_detail/{{apply.id}}'>申请资料</a>\n <br />\n <label>真实姓名</label> {{apply.name}}\n <br />\n <label>身份证号</label> {{apply.id_no}}\n <br />\n <label>学校</label> {{apply.school}}\n <br />\n <label>专业</label> {{apply.major}}\n <br />\n <label>地址</label> {{apply.addr}}\n <br />\n <label>入学时间</label> {{apply.enrollment_time}}\n <br />\n <label>毕业时间</label> {{apply.graduate_time}}\n </div>\n\n\n <div ng-if='wechat_info'>\n <label>微信昵称</label> {{wechat_info.nickname}}\n <br>\n <label>微信城市</label> {{wechat_info.city}}\n <br>\n <label>微信性别</label> <span ng-if='wechat_info.sex==1'>男</span><span ng-if='wechat_info.sex==2'>女</span>\n <br>\n <img src='{{wechat_info.headimgurl}}' style='width:150px'> </img>\n </div>\n\n <div class=\"form-group\" ng-if='location' ng-controller=\"AmapCtrl\">\n <label>首次定位地址</label>\n <position-field item='location' ng-if='location'></position-field>\n </div>\n\n <br/>\n <br/>\n <table class=\"table table-striped col-lg-3\" style='width:36%;' ng-if='user_orders.length>0'>\n <caption style='text-align: right'>\n <a href='#/index/order_list?keyword={{item.phone}}'>查看{{item.name}}的全部{{user_orders_total}}个订单</a>\n </caption>\n <thead>\n <tr>\n <th>id</th>\n <th>状态</th>\n <th>下单时间</th>\n </tr>\n </thead>\n <tbody>\n <tr ng-repeat=\"info in user_orders\">\n <td>{{info.id}}</td>\n <td>{{info.status_label}}</td>\n <td>{{info.create_time}}</td>\n </tr>\n </tbody>\n</table>\n <span ng-if='infos.length>0'>相同微信号注册用户</span>\n <div ng-if='infos.length>0'>\n <table class=\"table table-hover\" ng-if=\"true\" style='margin:0'>\n <thead>\n <tr>\n <th>序号</th>\n <th>名称</th>\n <th>手机号</th>\n <th>微信性别</th>\n <th>微信头像</th>\n <th>微信昵称</th>\n <th>微信城市</th>\n <th>首次定位</th>\n <th>注册时间</th>\n <th>重复注册数</th>\n <th>推广员</th>\n <th>操作</th>\n </tr>\n </thead>\n <tbody>\n <tr ng-repeat=\"info in infos\">\n <td>{{info.id}}</td>\n <td>{{info.name}}</td>\n\n <td>{{info.phone}}</td>\n\n <td>\n <span ng-if='info.wechat_info.sex==1'>男</span>\n <span ng-if='info.wechat_info.sex==2'>女</span>\n </td>\n\n <td>\n <img nf-if='info.wechat_info' class='user-list-avatar' style='width:50px' src={{info.wechat_info.headimgurl}}></img>\n </td>\n <td>{{info.wechat_info.nickname}}</td>\n <td>{{info.wechat_info.city}}</td>\n <td lng='{{info.wechat_info.lnglat}}'>{{info.wechat_info.location}}</td>\n <td>{{info.create_time}}</td>\n <td>{{info.same_user_count}}</td>\n <td><span ng-if='info.promoter'><a href='#/index/user_list?page=1&promoter_id={{info.parent.id}}'>{{info.parent.name}}</a>({{info.promoter.name}})</span></td>\n <td class='item-actions'>\n <a href='#/index/user_detail/{{info.id}}'>详情</a>\n </td>\n </tr>\n </tbody>\n </table>\n <nav>\n <ul class=\"pagination\" ng-if=\"page_info.total>1\">\n <li>\n <a href=\"#\" aria-label=\"Previous\" ng-click='routeTo(1)'>\n <span aria-hidden=\"true\">&laquo;</span>\n </a>\n </li>\n <li ng-repeat=\"page in page_info.pages\" ng-class=\"[{active: currentpage==page}]\">\n <a ng-click='routeTo(page)' ng-if='page>0'>{{page}}</a>\n <span ng-if='!(page>0)' style='color:gray;'>...</span>\n <li>\n <li>\n <a href=\"#\" aria-label=\"Next\" ng-click='routeTo(page_info.total)'>\n <span aria-hidden=\"true\">&raquo;</span>\n </a>\n </li>\n </ul>\n </nav>\n </div>\n\n</div>" }, { "alpha_fraction": 0.47336769104003906, "alphanum_fraction": 0.4936998784542084, "avg_line_length": 17.76344108581543, "blob_id": "8aed3f90ba4cee3ff5dac700de8b75ea7b45ef91", "content_id": "552049e5ff8af0ef2e7634c377cea9daf6f00401", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5744, "license_type": "no_license", "max_line_length": 105, "num_lines": 186, "path": "/setting/help.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\n\n\n\ncats = [\n {'id':1, 'name':'分期问题'},\n {'id':2, 'name':'帐单还款'},\n {'id':3, 'name':'售后服务'}\n ]\n\n\nquestions = [\n {\n 'cat_id' : 1,\n 'title' : '哪些人能分期',\n 'content': '美分分的分期目前仅限全国正规统招全日制在校研究生、本科以及专科的学生。'\n },\n {\n 'cat_id' : 1,\n 'title' : '最多能分几期?',\n 'content': '''您可选的最多分期期数取决于您的毕业时间,您需要在毕业前六个月将最后一期还清,如下:\n<style>\n table {\n width: 100%;\n border: 0;\n text-align: center;\n }\n</style>\n<table border=\"1\">\n<tr>\n <th>学制</th>\n <td>年级</td>\n <td>最多分期数</td>\n</tr>\n\n<tr>\n <th rowspan=\"3\">大专(三年制)</th>\n <td>专一</td>\n <td>24期</td>\n</tr>\n<tr>\n <td>专二</td>\n <td>16期</td>\n</tr>\n<tr>\n <td>专三</td>\n <td>4期</td>\n</tr>\n\n<tr>\n <th rowspan=\"4\">本科(四年制)</th>\n <td>大一</td>\n <td>24期</td>\n</tr>\n<tr>\n <td>大二</td>\n <td>24期</td>\n</tr>\n<tr>\n <td>大三</td>\n <td>16期</td>\n</tr>\n<tr>\n <td>大四</td>\n <td>4期</td>\n</tr>\n\n<tr>\n <th rowspan=\"2\">研究生(两年制)</th>\n <td>研一</td>\n <td>16期</td>\n</tr>\n<tr>\n <td>研二</td>\n <td>4期</td>\n</tr>\n\n<tr>\n <th rowspan=\"3\">研究生(三年制)</th>\n <td>研一</td>\n <td>24期</td>\n</tr>\n<tr>\n <td>研二</td>\n <td>16期</td>\n</tr>\n<tr>\n <td>研三</td>\n <td>4期</td>\n</tr>\n\n\n</table>'''\n },\n {\n 'cat_id' : 1,\n 'title' : '如何分期下单',\n 'content': '''1、通过美分分微信公众号找到你想要的项目\n2、选择你的分期计划,然后点击[立即购买]进入到提交订单页面\n3、如还未申请消费额度需要先[申请额度],提交相关申请信息资料\n4、确认订单以及分期金额,如项目总价超过预计额度6000元,需首付超出部分\n5、提交支付订单即可分期下单成功'''\n },\n {\n 'cat_id' : 1,\n 'title' : '申请额度需要哪些资料',\n 'content': '''1、填写基础信息(姓名、身份证号、学校校区、入学时间、学历、专业、学号、宿舍详细地址、学信网账号、学信网密码、父或母联系号码)\n2、上传手持证件照片(手持身份证照片、手持学生证照片)'''\n },\n {\n 'cat_id' : 1,\n 'title' : '额度不够怎么办',\n 'content': '当额度不够时,需要你首付超出部分(项目总价-你的额度)'\n },\n {\n 'cat_id' : 1,\n 'title' : '下单成功后如何前往服务',\n 'content': '''1、打电话到医院进行预约,确认手术时间。注意:预约不需要提供服务码,只需要提供手机号、姓名即可\n2、按照预约时间前往医院,通过服务码、身份证验证后,即可进行手术'''\n },\n {\n 'cat_id' : 1,\n 'title' : '为什么会额度申请失败',\n 'content': '''如果出现额度申请失败,有可能是以下原因:\n1、填写的基础信息错误或不真实\n2、上传的手持证件照片不清晰\n3、提交的资料和学信网不一致'''\n },\n {\n 'cat_id' : 1,\n 'title' : '额度还会提高吗',\n 'content': '''会的,美分分会根据你的信息资料和账单还款情况,适时为你调高消费额度'''\n },\n {\n 'cat_id' : 2,\n 'title' : '什么时候需要还款',\n 'content': '在你手术完成后即生成账单,并记录在下期账单里,你将有至少30天的时间然后开始还款,每期账单还款截止时间为次月1日'\n },\n {\n 'cat_id' : 2,\n 'title' : '如何还款',\n 'content': '你可以通过微信支付进行还款'\n },\n {\n 'cat_id' : 2,\n 'title' : '可以一次性多还吗',\n 'content': '可以的,你可以在我要还款里分别对账单进行还款'\n },\n {\n 'cat_id' : 2,\n 'title' : '账单逾期会怎样',\n 'content': '如在还款日前未及时清还应还账单,将产生滞纳金,每日滞纳金为当月应还金额的1%,同时逾期记录会记录在您的信用记录中,建议及时还款,珍惜个人信用'\n },\n {\n 'cat_id' : 2,\n 'title' : '拒绝还款会怎样',\n 'content': '如果您存在恶意拖欠行为,将会对您的信用记录产生负面影响,这将直接影响您未来购房购车和一切与信用有关的行为。同时,我们也会保留根据授信合同采取司法手段追回逾期款项的权利。'\n },\n {\n 'cat_id' : 3,\n 'title' : '如何取消订单',\n 'content': '未预约订单您可以在个人-我的订单里找到相应订单进行取消操作'\n },\n {\n 'cat_id' : 3,\n 'title' : '取消订单需要费用吗',\n 'content': '未手术订单取消是不需要费用的,取消后会及时恢复您的额度,如有首付款项也会原路返还'\n },\n {\n 'cat_id' : 3,\n 'title' : '取消订单后首付怎么办',\n 'content': '如取消订单,首付款项会原路返还'\n },\n {\n 'cat_id' : 3,\n 'title' : '预约后可以取消订单吗?',\n 'content': '已成功预约的订单,如想取消,需先打电话给医院取消预约,后方可操作取消订单'\n },\n {\n 'cat_id' : 3,\n 'title' : '手术完成后有问题怎么办',\n 'content': '如手术完成后出现问题或异议,你都可以拨打我们的客服电话进行反馈核实,我们将协助你共同和医院协商解决'\n },\n\n]\n\n\n" }, { "alpha_fraction": 0.5786516666412354, "alphanum_fraction": 0.584269642829895, "avg_line_length": 21.375, "blob_id": "78f9d330f4c35beff35c1c45b922c1a05c707d34", "content_id": "2252b36a805eb9fb32b2a9592a47765902146303", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 178, "license_type": "permissive", "max_line_length": 69, "num_lines": 8, "path": "/static/mobile/js/choujiang.js", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "jQuery(document).ready(function($) {\n\t\n $('.box li:nth-child(5)').click(function(event) {\n \t $(this).addClass('current').siblings().removeClass('current')\n\n });\n\n});" }, { "alpha_fraction": 0.5857197642326355, "alphanum_fraction": 0.5973653793334961, "avg_line_length": 26.424083709716797, "blob_id": "a8d23c30af7910a3a2df1d15afded05bd92c519f", "content_id": "50512a19648fa38a1158982ac41d567e37493b05", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5434, "license_type": "permissive", "max_line_length": 342, "num_lines": 191, "path": "/static/user/js/coupon.js", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "//未使用\nvar has_more1;\nvar offset1=undefined;\nvar has_more2;\nvar offset2=undefined;\nvar has_more3;\nvar offset3=undefined;\nfunction donCoupon(){\n\tif(offset1){\n\t\tvar Url=\"http://\"+getHostName()+\"/user/my_coupons/?cat=1\"+token+\"&offset=\"+offset1\n\t}else{\n\t\tvar Url=\"http://\"+getHostName()+\"/user/my_coupons/?cat=1\"+token;\n\t}\n\t$.ajax({\n\t\txhrFields: {withCredentials: true},\n\t\ttype:\"post\",\n\t\turl:Url,\n\t\tdataType:'json',\n\t\tsuccess:function(data){\n\t\t\tvar span='';\n\t\t\tvar infos=data.infos;\n\t\t\tif(infos.length==0){\n\t\t\t\tif(!offset1){\n\t\t\t\t\t$('#item1mobile .noCoupon').show();\t\n\t\t\t\t}\n\t\t\n\t\t\t}\n\t\t\thas_more1=data.has_more;\n\t\t\toffset1=data.offset;\n\t\t\tfor(var i=0;i<infos.length;i++){\n\t\t\t\tvar scope=infos[i].cat_str;\t\t\t\n\t\t\t\tif(infos[i].is_trial==1){\n\t\t\t\t\tspan=\"<span>全免</span>\"\n\t\t\t\t}else{\t\t\t\t\t\n\t\t\t\t\tspan=\"<span>¥\"+infos[i].price+\"</span>\"\n\t\t\t\t}\t\n\t\t\t\tstr=$('<div class=\"coupon-cont\"><div class=\"fl yellow\">'+span+'</div><div class=\"fr\"><ul><li><h4>'+infos[i].title+'</h4></li><li><span class=\"color-grey\">使用范围:</span><span class=\"color-black\">'+scope+'</span></li><li><span class=\"color-grey\">有效期:</span><span class=\"color-black\">'+infos[i].remain_str+'</span></li></ul></div></div>');\n\t\t\t\tif (i>0) {\n\t\t\t\t\tstr.addClass('has-m-top');\n\t\t\t\t};\n\t\t\t\t$('#item1mobile').append(str);\n\t\t\t}\n\t\t},error:function(){\n\t\t\talert('网络开小差了');\n\t\t}\n\t});\n}\n\n//已经使用\nfunction useCoupon(){\n\tif(offset2){\n\t\tvar Url=\"http://\"+getHostName()+\"/user/my_coupons/?cat=2\"+token+\"&offset=\"+offset2\n\t}else{\n\t\tvar Url=\"http://\"+getHostName()+\"/user/my_coupons/?cat=2\"+token;\n\t}\n\t$.ajax({\n\t\txhrFields: {withCredentials: true},\n\t\ttype:\"post\",\n\t\turl:Url,\n\t\tdataType:'json',\n\t\tsuccess:function(data){\n\t\t\tvar span='';\n\t\t\tvar infos=data.infos;\n\t\t\tif(infos.length==0){\n\t\t\t\tif(!offset2){\n\t\t\t\t\t$('#item2mobile .noCoupon').show();\t\n\t\t\t\t}\n\t\t\n\t\t\t}\n\t\t\thas_more2=data.has_more;\n\t\t\toffset2=data.offset;\t\t\n\t\t\tfor(var i=0;i<infos.length;i++){\n\t\t\t\tvar scope=infos[i].cat_str;\n\t\t\t\tif(infos[i].is_trial==0){\n\t\t\t\t\tspan=\"<span>全免</span>\"\n\t\t\t\t}else{\t\t\t\t\t\n\t\t\t\t\tspan=\"<span>¥\"+infos[i].price+\"</span>\"\n\t\t\t\t}\n\t\t\t\tstr=$('<div class=\"coupon-cont\"><div class=\"fl blue\">'+span+'</div><div class=\"fr used\"><ul><li><h4>'+infos[i].title+'</h4></li><li><span class=\"color-grey\">使用范围:</span><span class=\"color-black\">'+scope+'</span></li><li><span class=\"color-grey\">有效期:</span><span class=\"color-black\">'+infos[i].remain_str+'</span></li></ul></div></div>');\n\t\t\t\tif (i>0) {\n\t\t\t\t\tstr.addClass('has-m-top');\n\t\t\t\t};\n\t\t\t\t$('#item2mobile').append(str);\n\t\t\t}\n\t\t},error:function(){\n\t\t\talert('网络开小差了');\n\t\t}\n\t});\n}\n\n//已经过期\nfunction overdueCoupon(){\n\tif(offset3){\n\t\tvar Url=\"http://\"+getHostName()+\"/user/my_coupons/?cat=3\"+token+\"&offset=\"+offset3\n\t}else{\n\t\tvar Url=\"http://\"+getHostName()+\"/user/my_coupons/?cat=3\"+token;\n\t}\n\t$.ajax({\n\t\txhrFields: {withCredentials: true},\n\t\ttype:\"post\",\n\t\turl:\"http://\"+getHostName()+\"/user/my_coupons/?cat=3\"+token,\n\t\tdataType:'json',\n\t\tsuccess:function(data){\t\n\t\t\tvar span='';\n\t\t\tvar infos=data.infos;\n\t\t\tif(infos.length==0){\n\t\t\t\tif(!offset3){\n\t\t\t\t\t$('#item3mobile .noCoupon').show();\t\n\t\t\t\t}\n\t\t\n\t\t\t}\n\t\t\thas_more3=data.has_more;\n\t\t\toffset3=data.offset;\t\n\t\t\tfor(var i=0;i<infos.length;i++){\n\t\t\t\tvar scope=infos[i].cat_str;\t\t\t\n\t\t\t\tif(infos[i].is_trial==0){\n\t\t\t\t\tspan=\"<span>全免</span>\"\n\t\t\t\t}else{\t\t\t\t\t\n\t\t\t\t\tspan=\"<span>¥\"+infos[i].price+\"</span>\"\n\t\t\t\t}\n\t\t\t\tstr=$('<div class=\"coupon-cont\"><div class=\"fl grey\">'+span+'</div><div class=\"fr pased\"><ul><li><h4>'+infos[i].title+'</h4></li><li><span class=\"color-grey\">使用范围:</span><span class=\"color-black\">'+scope+'</span></li><li><span class=\"color-grey\">有效期:</span><span class=\"color-black\">'+infos[i].remain_str+'</span></li></ul></div></div>');\n\t\t\t\tif (i>0) {\n\t\t\t\t\tstr.addClass('has-m-top');\n\t\t\t\t};\n\t\t\t\t$('#item3mobile').append(str);\n\t\t\t}\n\t\t},error:function(){\n\t\t\talert('网络开小差了');\n\t\t}\n\t});\n}\n\n\ndonCoupon()\nuseCoupon()\noverdueCoupon()\n\n\t\t\t//获取滚动条当前的位置 \n\t\t\tfunction getScrollTop() { \n\t\t\t\tvar scrollTop = 0; \n\t\t\t\tif (document.documentElement && document.documentElement.scrollTop) { \n\t\t\t\t\tscrollTop = document.documentElement.scrollTop; \n\t\t\t\t} \n\t\t\t\telse if (document.body) { \n\t\t\t\t\tscrollTop = document.body.scrollTop; \n\t\t\t\t} \n\t\t\t\treturn scrollTop; \n\t\t\t} \n\t\t\t\n\t//获取当前可是范围的高度 \n\tfunction getClientHeight() { \n\t\tvar clientHeight = 0; \n\t\tif (document.body.clientHeight && document.documentElement.clientHeight) { \n\t\t\tclientHeight = Math.min(document.body.clientHeight, document.documentElement.clientHeight); \n\t\t} \n\t\telse { \n\t\t\tclientHeight = Math.max(document.body.clientHeight, document.documentElement.clientHeight); \n\t\t} \n\t\treturn clientHeight; \n\t} \n\t\n\t//获取文档完整的高度 \n\tfunction getScrollHeight() { \n\t\treturn Math.max(document.body.scrollHeight, document.documentElement.scrollHeight); \n\t};\n\n\twindow.onscroll = function () { \n\t\tif (getScrollTop() + getClientHeight() == getScrollHeight()) { \n\t\t\tif (has_more1) {\n\t\t\t\tdonCoupon()\n\t\t\t};\n\t\t\tif (has_more2) {\n\t\t\t\tuseCoupon()\n\t\t\t};\n\t\t\tif (has_more3) {\n\t\t\t\toverdueCoupon()\n\t\t\t};\n\t\t} \n\t} \n\t//新增转增优惠券功能\n\t\t\t$('#segmentedControl .mui-control-item').on('tap',function(){\n\t\t\t\tvar index=$(this).index();\n\t\t\t\tvar $mui_bar=$('nav.mui-bar');\n\t\t\t\tif(index!=0){\n\t\t\t\t\t$mui_bar.hide();\n\t\t\t\t\t$('.mui-content').css(\"padding-bottom\",\"0px\")\n\t\t\t\t}else{\n\t\t\t\t\t$mui_bar.show();\n\t\t\t\t\t$('.mui-content').css(\"padding-bottom\",\"50px\")\n\t\t\t\t}\n\t\t\t})\n" }, { "alpha_fraction": 0.6159121990203857, "alphanum_fraction": 0.6817558407783508, "avg_line_length": 25.035715103149414, "blob_id": "0ee3447b677f773bfba60293fc6af38b8d5ea016", "content_id": "fba16dc9f4d6f68a1b5a0df4068e6dd5ea8bf30a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 729, "license_type": "no_license", "max_line_length": 86, "num_lines": 28, "path": "/migrations/versions/54d0e0d4445b_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 54d0e0d4445b\nRevises: 4f4ce8bff86a\nCreate Date: 2016-01-11 15:39:22.872865\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '54d0e0d4445b'\ndown_revision = '4f4ce8bff86a'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('city', sa.Column('amap_code', sa.String(length=30), nullable=True))\n op.create_unique_constraint(None, 'city', ['amap_code'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'city', type_='unique')\n op.drop_column('city', 'amap_code')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6530953645706177, "alphanum_fraction": 0.6876742839813232, "avg_line_length": 43.82500076293945, "blob_id": "bfcd8d3c68cc0ca3130a06fbfec63de90c2d45d7", "content_id": "e844f0273834b7abf3119ad32422bf1112c77089", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1793, "license_type": "no_license", "max_line_length": 128, "num_lines": 40, "path": "/migrations/versions/4593874013ba_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4593874013ba\nRevises: 282274e764ca\nCreate Date: 2015-12-05 16:48:09.332665\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4593874013ba'\ndown_revision = '282274e764ca'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('promoter_qrcode', sa.Column('qrcode_id', sa.Integer(), nullable=True))\n op.drop_index('ix_promoter_qrcode_scene_id', table_name='promoter_qrcode')\n op.create_foreign_key(None, 'promoter_qrcode', 'qrcode', ['qrcode_id'], ['id'])\n op.drop_column('promoter_qrcode', 'scene_id')\n op.add_column('qr_code_user', sa.Column('qrcode_id', sa.Integer(), nullable=True))\n op.drop_index('ix_qr_code_user_scene_id', table_name='qr_code_user')\n op.create_foreign_key(None, 'qr_code_user', 'qrcode', ['qrcode_id'], ['id'])\n op.drop_column('qr_code_user', 'scene_id')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('qr_code_user', sa.Column('scene_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))\n op.drop_constraint(None, 'qr_code_user', type_='foreignkey')\n op.create_index('ix_qr_code_user_scene_id', 'qr_code_user', ['scene_id'], unique=False)\n op.drop_column('qr_code_user', 'qrcode_id')\n op.add_column('promoter_qrcode', sa.Column('scene_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))\n op.drop_constraint(None, 'promoter_qrcode', type_='foreignkey')\n op.create_index('ix_promoter_qrcode_scene_id', 'promoter_qrcode', ['scene_id'], unique=False)\n op.drop_column('promoter_qrcode', 'qrcode_id')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.62848299741745, "alphanum_fraction": 0.6888545155525208, "avg_line_length": 22.925926208496094, "blob_id": "9052b7c5f95b9c4c4a48d5f63be210f0f5fc022b", "content_id": "f0e128b1c169af6849b766aa106d92f6912ec13d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 646, "license_type": "no_license", "max_line_length": 112, "num_lines": 27, "path": "/migrations/versions/36d5b6be1479_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 36d5b6be1479\nRevises: 4a4cc4517bb\nCreate Date: 2015-11-11 16:43:11.679555\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '36d5b6be1479'\ndown_revision = '4a4cc4517bb'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\nimport models\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('activity_item', sa.Column('price', models.MoneyField(precision=10, scale=2), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('activity_item', 'price')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6475903391838074, "alphanum_fraction": 0.6942771077156067, "avg_line_length": 24.538461685180664, "blob_id": "97c43cc5f023aa41fb2833055778f50526ea6d18", "content_id": "47d46b9c9813d3cc37088e9f28c66dd108da5d7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 664, "license_type": "no_license", "max_line_length": 102, "num_lines": 26, "path": "/migrations/versions/48dc9132c95d_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 48dc9132c95d\nRevises: 492dcdfca828\nCreate Date: 2016-01-06 14:09:01.813368\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '48dc9132c95d'\ndown_revision = '492dcdfca828'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('beauty_entry', sa.Column('status', mysql.TINYINT(display_width=1), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('beauty_entry', 'status')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6210235357284546, "alphanum_fraction": 0.6735823154449463, "avg_line_length": 24.821428298950195, "blob_id": "d1d4d795e29f43caef720eaf30a6b313fc894b07", "content_id": "d60ad5834acb030ff78b63666bb44768423d4b9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 723, "license_type": "no_license", "max_line_length": 86, "num_lines": 28, "path": "/migrations/versions/92d3f0175a1_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 92d3f0175a1\nRevises: 33a1596e092f\nCreate Date: 2015-11-26 17:26:27.429010\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '92d3f0175a1'\ndown_revision = '33a1596e092f'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('city', sa.Column('city_code', sa.String(length=30), nullable=True))\n op.create_unique_constraint(None, 'city', ['city_code'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'city', type_='unique')\n op.drop_column('city', 'city_code')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5928223133087158, "alphanum_fraction": 0.6043420433998108, "avg_line_length": 27.200000762939453, "blob_id": "9f438e3f587b04f4fba4d3a05506dd711ddbb28c", "content_id": "a896dd4796598c4c0ee1b1c7eae9cf76d699c031", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2527, "license_type": "no_license", "max_line_length": 72, "num_lines": 80, "path": "/setting/celeryconfig.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nfrom datetime import timedelta\n\nfrom celery.schedules import crontab\nfrom settings import MAIN_MYSQL_URI\nfrom settings import REDIS_PORT\nfrom celery import platforms\nplatforms.C_FORCE_ROOT = True\n\nBROKER_URL = 'amqp://monitor:[email protected]:5672/'\nif os.environ.get('APP_ENV')=='local':\n from setting.local import *\nelif os.environ.get('APP_ENV')=='dev':\n from setting.dev import *\nelif os.environ.get('APP_ENV')=='production':\n from setting.production import *\nelse:\n from setting.local import *\n\n\nprint BROKER_URL\n\nCELERYD_CONCURRENCY = 3\nCELERYD_MAX_TASKS_PER_CHILD = 1\n\nCELERYBEAT_SCHEDULE = {\n 'refresh_access_token': {\n 'task': 'thirdparty.wechat.refresh_access_token',\n 'schedule': timedelta(seconds=3200),\n 'args': (),\n },\n}\n\n#CELERY_RESULT_BACKEND = 'db+'+MAIN_MYSQL_URI\nCELERY_IGNORE_RESULT = True\n#CELERY_RESULT_BACKEND = 'amqp://'\nCELERY_RESULT_BACKEND = BROKER_URL\n\n# BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 86400*7}\n# \n# CELERY_DEFAULT_QUEUE = \"default_klbb\" # 默认的队列,如果一个消息不符合其他的队列就会放在默认队列里面\n# \n# CELERY_QUEUES = {\n# \"default_klbb\": { # 这是上面指定的默认队列\n# \"exchange\": \"default_klbb\",\n# \"exchange_type\": \"direct\",\n# \"routing_key\": \"default_klbb\"\n# },\n# \"topicqueue\": { # 这是一个topic队列 凡是topictest开头的routing key都会被放到这个队列\n# \"routing_key\": \"topictest.#\",\n# \"exchange\": \"topic_exchange\",\n# \"exchange_type\": \"topic\",\n# },\n# \"test\": { # test和test2是2个fanout队列,注意他们的exchange相同\n# \"exchange\": \"broadcast_tasks\",\n# \"exchange_type\": \"fanout\",\n# \"binding_key\": \"broadcast_tasks\",\n# }\n# }\n# \n# class MyRouter(object):\n# \n# def route_for_task(self, task, args=None, kwargs=None):\n# \n# if task.startswith('topictest'):\n# return {\n# 'queue': 'topicqueue',\n# }\n# # 我的dongwm.tasks文件里面有2个任务都是test开头\n# elif task.startswith('klbb.tasks.test'):\n# return {\n# \"exchange\": \"broadcast_tasks\",\n# }\n# # 剩下的其实就会被放到默认队列\n# else:\n# return None\n# \n# # CELERY_ROUTES本来也可以用一个大的含有多个字典的字典,但是不如直接对它做一个名称统配\n# CELERY_ROUTES = (MyRouter(), )\n\n" }, { "alpha_fraction": 0.6412556171417236, "alphanum_fraction": 0.7055306434631348, "avg_line_length": 24.730770111083984, "blob_id": "e840f84837a02cbaa80e88ee40ef9d2de43732cf", "content_id": "244a965885acbc7554f2584ff295d5db01a2908a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "no_license", "max_line_length": 101, "num_lines": 26, "path": "/migrations/versions/345ee23bca8_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 345ee23bca8\nRevises: 480dd7e7caac\nCreate Date: 2015-12-09 17:48:46.918282\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '345ee23bca8'\ndown_revision = '480dd7e7caac'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('trial_apply', sa.Column('status', mysql.TINYINT(display_width=1), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('trial_apply', 'status')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6190780997276306, "alphanum_fraction": 0.6549295783042908, "avg_line_length": 32.23404312133789, "blob_id": "5f0847e11379320237bc77295a247f5dab404b6c", "content_id": "c103ffc6e0a620c15facd81b531b0f13289c3b57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1562, "license_type": "no_license", "max_line_length": 63, "num_lines": 47, "path": "/migrations/versions/3d1f1303d3e0_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3d1f1303d3e0\nRevises: 37878b76721\nCreate Date: 2016-03-03 09:31:04.874943\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3d1f1303d3e0'\ndown_revision = '37878b76721'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('rd_qrcode_user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('qrcode_id', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['qrcode_id'], ['qrcode.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('rd_user_qrcode',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('qrcode_id', sa.Integer(), nullable=True),\n sa.Column('follow_count', sa.Integer(), nullable=True),\n sa.Column('reg_count', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['qrcode_id'], ['qrcode.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('user_id', 'qrcode_id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('rd_user_qrcode')\n op.drop_table('rd_qrcode_user')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5956385135650635, "alphanum_fraction": 0.5987029671669006, "avg_line_length": 38.367977142333984, "blob_id": "1bbe0e752ca42e27f4ff05ea940273142dd80128", "content_id": "25fc9bfec7430d6eb37c95adf2c8ee9946575fba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14492, "license_type": "no_license", "max_line_length": 130, "num_lines": 356, "path": "/user/redpack.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nfrom flask import request\nfrom flask import redirect\nfrom flask import render_template\n\nfrom sqlalchemy import and_\nfrom sqlalchemy import or_\nfrom models import db\nfrom models import RedpackQuestion\nfrom models import RedpackUserQuestion\nfrom models import RedpackPay\nfrom models import RedpackPayUser\n\nfrom util.utils import jsonify_response\nfrom util.utils import random_str\nfrom util.utils import str_to_int_list\nfrom util.utils import random_redpack_price\nfrom util.utils import keep_fields_from_list\nfrom util.utils import random_redpack_price\nfrom util.decorators import wechat_loggin_dec\nfrom util.validators import Optional\nfrom util.validators import Inputs\nfrom util.validators import MobileField\nfrom util.validators import TextField\nfrom util.validators import IdField\nfrom util.validators import FloatField\nfrom util.validators import IntChoiceField\nfrom util.sign import sign_user\nfrom util.sign import set_cookie\nfrom util.sign import del_cookie\nfrom ops.bulks import fetch_user_refs\nfrom ops.bulks import fetch_qrcodeuser_refs\nfrom ops.log import LogService\nfrom ops.order import OrderService\nfrom ops.redpack import RedpackService\nfrom ops.promote import PromoteService\n\nfrom thirdparty.wechat import exchange_code_for_token\nfrom thirdparty.wx_pay import Notify_pub\nfrom thirdparty.wx_pay import get_redpack_pay_params\nfrom constants import REDPACK_PAY_STATUS\nfrom constants import PAY_METHOD\nfrom constants import ResponseCode\nfrom ops.tasks import send_redpack_after_pay\nfrom settings import DEFAULT_IMAGE\n\n\n@wechat_loggin_dec(required=False, need_openid=True, validator=None)\ndef redpack_index():\n ''' '''\n user = RedpackService.get_qruser_by_openid(request.open_id)\n has_followed = False\n avatar = ''\n question_count = 0\n if user:\n has_followed = True if user.nickname else False\n avatar = user.headimgurl\n question_count = RedpackService.count_user_question(user.id)\n if not has_followed: PromoteService.set_user_sex.delay(request.open_id)\n else:\n qrcode_id = None\n open_id = request.open_id\n PromoteService.log_qr_user(qrcode_id, open_id, -1)\n PromoteService.set_user_sex.delay(open_id)\n need_login = question_count>=5 and not request.user_id\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'has_followed' : has_followed,\n 'question_count': question_count,\n 'avatar' : avatar or DEFAULT_IMAGE,\n 'need_login' : need_login\n }\n return render_template('user/redpack_index.html', **result)\n\n\ndef new_question():\n ''' '''\n return render_template('user/create_question.html')\n\n\ndef question_list():\n ''' '''\n where = RedpackQuestion.status==1\n fields = ['id', 'content']\n has_more, infos = RedpackService.get_paged_redpack_questions(where=where, limit=50, fields=fields)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'infos' : infos\n }\n return jsonify_response(result)\n\n\nnew_question_post_validator = Inputs(\n {\n 'question_id' : Optional(IdField(msg='请选择问题')),\n 'question' : TextField(min_length=1, max_length=100, msg='请输入问题内容'),\n 'answer' : TextField(min_length=1, max_length=100, msg='请输入答案内容'),\n 'is_random' : IntChoiceField(choices=[0,1], msg='是否随机奖励'),\n 'is_custom' : IntChoiceField(choices=[0,1], msg='是否自定义问题'),\n 'price' : Optional(FloatField(msg='请输入红包金额')),\n }\n)\n@wechat_loggin_dec(required=False, need_openid=True, validator=new_question_post_validator)\ndef new_question_post():\n ''' 创建收红包问答 '''\n question_id = request.valid_data.get('question_id') or None\n question = request.valid_data.get('question')\n answer = request.valid_data.get('answer')\n is_random = request.valid_data.get('is_random')\n is_custom = request.valid_data.get('is_custom')\n price = request.valid_data.get('price') or None\n question = (question or '').replace('xianpeng', '王前发')\n question = (question or '').replace('显鹏', '王前发')\n if not is_random: assert price and 1<=price<=200, '请输入红包金额,范围在1~200'\n else: price = random_redpack_price()\n\n if not is_custom: assert question, '请输入问题内容'\n\n user = RedpackService.get_qruser_by_openid(request.open_id)\n\n assert user and user.nickname, '请先关注我们的微信'\n\n if RedpackService.count_user_question(user.id)>5: assert request.user_id, '想收取更多红包,请先注册成我们的用户吧'\n question_id = RedpackService.create_user_question(user.id, question_id, question, answer, is_custom, is_random, price)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'user_question_id' : question_id,\n 'msg' : '创建成功'\n }\n return jsonify_response(result)\n\n\n\nmy_questions_validator = Inputs(\n {\n 'offset' : Optional(TextField(min_length=0, max_length=100, msg='请输入问题内容')),\n }\n)\n@wechat_loggin_dec(required=False, need_openid=True, validator=my_questions_validator)\ndef my_questions():\n ''' 我的收红包纪录列表 '''\n offset = request.valid_data.get('offset')\n user = RedpackService.get_qruser_by_openid(request.open_id)\n assert user and user.nickname, '请先关注我们的微信'\n\n where = RedpackUserQuestion.qr_user_id==user.id\n has_more, questions = RedpackService.get_paged_user_question(where=where, offset=offset)\n\n all_viewers = []\n for question in questions:\n latest_viewers = []\n if question['view_count']:\n where = RedpackPayUser.user_question_id==question['id']\n _, latest_viewers = RedpackService.get_question_viewers(where=where, limit=2)\n question['latest_viewers'] = latest_viewers\n all_viewers.extend(latest_viewers)\n if question['question_id']:\n mff_question = RedpackService.get_question_by_id(question['question_id'])\n question['question']= mff_question['content']\n fetch_qrcodeuser_refs(all_viewers, fields=['id', 'nickname', 'headimgurl'])\n for i in all_viewers:\n i['qr_user']['nickname'] = i['qr_user'].get('nickname') or '好友'\n\n offset = ''\n if questions:\n offset = str(questions[-1]['id'])\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'infos' : questions,\n 'has_more' : has_more,\n 'offset' : offset\n }\n return jsonify_response(result)\n\n\nquestion_detail_validator = Inputs(\n {\n 'user_question_id' : IdField(msg='问题id'),\n }\n)\n@wechat_loggin_dec(required=False, need_openid=True, validator=question_detail_validator, app=True)\ndef question_detail():\n ''' 问题详情 '''\n user_question_id = request.valid_data.get('user_question_id')\n user = RedpackService.get_qruser_by_openid(request.open_id)\n\n has_viewed = RedpackService.get_user_question_viewer(user_question_id, user.id) if user else False\n question = RedpackService.get_user_question_by_id(user_question_id)\n assert question, '问题不存在'\n creator = PromoteService.get_qrcodeuser_by_id(question['qr_user_id'])\n if question['question_id']:\n mff_question = RedpackService.get_question_by_id(question['question_id'])\n question['question']= mff_question['content']\n where = RedpackPayUser.user_question_id==user_question_id\n has_more, infos = RedpackService.get_question_viewers(where=where)\n fetch_qrcodeuser_refs(infos, fields=['id', 'nickname', 'headimgurl'])\n for i in infos:\n i['qr_user']['nickname'] = i['qr_user']['nickname'] or '好友'\n is_myself = question['qr_user_id']==user.id if user else False\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'is_myself' : is_myself,\n 'infos' : infos,\n 'creator' : creator,\n 'has_viewed' : has_viewed,\n 'question' : question\n }\n return render_template('user/redpack_detail.html', **result)\n return jsonify_response(result)\n\n\nredpack_pay_validator = Inputs(\n {\n 'user_question_id' : IdField(msg='问题id'),\n }\n)\n@wechat_loggin_dec(required=False, need_openid=True, validator=redpack_pay_validator, app=True)\ndef redpack_pay():\n ''' 问题详情 '''\n user = RedpackService.get_qruser_by_openid(request.open_id)\n if not user:\n if request.open_id:\n PromoteService.log_qr_user(None, request.open_id, -1)\n user = RedpackService.get_qruser_by_openid(request.open_id)\n else:\n assert False, '请先关注我们的微信公众号美分分'\n user_question_id = request.valid_data.get('user_question_id')\n question = RedpackService.get_user_question_by_id(user_question_id)\n assert question, '问题不存在'\n has_viewed = RedpackService.get_user_question_viewer(user_question_id, user.id) if user else False\n assert not has_viewed, '您已查看过此问题'\n if question['is_random']: question['price'] = random_redpack_price()\n order_no = OrderService.create_no()\n RedpackService.add_pay(user.id, user_question_id, order_no, question['price'])\n wx_pay_params, err = get_redpack_pay_params(request.open_id, question['price'], order_no);\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'question' : question,\n 'params' : wx_pay_params\n }\n return jsonify_response(result)\n\n\nquestion_viewers_validator = Inputs(\n {\n 'user_question_id' : IdField(msg='问题id'),\n 'offset' : Optional(TextField(min_length=0, max_length=100, msg='请输入问题内容')),\n }\n)\n@wechat_loggin_dec(required=False, need_openid=True, validator=question_viewers_validator, app=True)\ndef question_viewers():\n ''' 问题红包查看人列表 '''\n user_question_id = request.valid_data.get('user_question_id')\n offset = request.valid_data.get('offset')\n where = RedpackPayUser.user_question_id==user_question_id\n has_more, infos = RedpackService.get_question_viewers(where=where, offset=offset)\n\n offset = ''\n if infos:\n offset = str(infos[-1]['id'])\n fetch_qrcodeuser_refs(infos, fields=['id', 'nickname', 'headimgurl'])\n for i in infos:\n i['qr_user']['nickname'] = i['qr_user']['nickname'] or '好友'\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'has_more' : has_more,\n 'infos' : infos,\n 'offset' : offset\n }\n return jsonify_response(result)\n\n\n\n\ndef wx_redpack_callback():\n ''' 微信红包支付回调 '''\n xml = request.data\n LogService.log_pay_callback(PAY_METHOD.WECHAT_WEB, xml)\n notify = Notify_pub()\n rs = notify.check_sign(xml)\n re = {}\n if not rs:\n re['return_code'] = 'FAIL'\n re['return_msg'] = '签名失败'\n return notify.arrayToXml(re)\n\n data = notify.get_data()\n result_code = data['result_code']\n order_no = str(data['out_trade_no'])\n total_fee = data['total_fee']\n transaction_id = data['transaction_id']\n\n pay = RedpackService.get_pay_by_orderno(order_no)\n if not pay:\n re['return_code'] = 'FAIL'\n re['return_msg'] = '订单不存在:'+order_no\n return notify.arrayToXml(re)\n\n total_price = float(total_fee)/100\n order_price = float(pay.price)\n if order_price != total_price and (os.environ.get('APP_ENV')=='production'):\n print order_price, total_price, '金额不匹配'\n re['return_code'] = 'FAIL'\n re['return_msg'] = '金额不匹配'\n return notify.arrayToXml(re)\n\n msg = ''\n if (pay.status==REDPACK_PAY_STATUS.PAY_SUCCESS):\n re = {'return_code':'SUCCESS','return_msg':'ok'}\n return notify.arrayToXml(re)\n if result_code.upper() == 'FAIL':\n re['return_code'] = 'FAIL'\n redpack_error_action(pay)\n elif result_code.upper()=='SUCCESS':\n re['return_code'] = 'SUCCESS'\n redpack_success_action(pay, transaction_id=transaction_id)\n else:\n print 'wxpay_notify:',result_code\n re['return_code'] = 'SUCCESS'\n msg = '未知返回码'\n\n re['return_msg'] = msg\n return notify.arrayToXml(re)\n\n\ndef redpack_error_action(pay):\n pass\n\n\ndef redpack_success_action(pay, **kw):\n where = and_(\n RedpackPay.id==pay.id,\n RedpackPay.status.in_([REDPACK_PAY_STATUS.TO_PAY, REDPACK_PAY_STATUS.NEW]),\n )\n kw['status'] = REDPACK_PAY_STATUS.PAY_SUCCESS\n count = RedpackService.update_redpack_pay(where, **kw)\n print 'redpack pay success'\n if count:\n RedpackService.add_redpack_user(pay.id, pay.qr_user_id, pay.user_question_id, pay.price)\n RedpackService.incr_question_view_count(pay.user_question_id)\n user_question = RedpackService.get_user_question_by_id(pay.user_question_id)\n creator_id = user_question['qr_user_id']\n user = PromoteService.get_qrcodeuser_by_id(pay.qr_user_id)\n creator = PromoteService.get_qrcodeuser_by_id(creator_id)\n RedpackService.incr_user_question_money(pay.user_question_id, pay.price)\n send_redpack_after_pay.delay(creator.open_id, pay.price, user.nickname or '')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6465739011764526, "alphanum_fraction": 0.6676970720291138, "avg_line_length": 37.05882263183594, "blob_id": "01f1d19435e6776136a47a795d70760570ca2ac5", "content_id": "944e925f96c4c0100614a2680f34105ec98258a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1941, "license_type": "no_license", "max_line_length": 105, "num_lines": 51, "path": "/migrations/versions/6a2ac421f56_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 6a2ac421f56\nRevises: 18e20ed0da8d\nCreate Date: 2016-02-18 10:53:08.382571\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '6a2ac421f56'\ndown_revision = '18e20ed0da8d'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user_device',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('device_id', sa.String(length=50), nullable=True),\n sa.Column('push_token', sa.String(length=50), nullable=True),\n sa.Column('os_version', sa.String(length=10), nullable=True),\n sa.Column('app_version', sa.String(length=10), nullable=True),\n sa.Column('device_name', sa.String(length=100), nullable=True),\n sa.Column('cat', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.Column('update_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('device_id')\n )\n op.create_table('user_device_log',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('device_id', sa.String(length=50), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_device_log_device_id'), 'user_device_log', ['device_id'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_user_device_log_device_id'), table_name='user_device_log')\n op.drop_table('user_device_log')\n op.drop_table('user_device')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.4429347813129425, "alphanum_fraction": 0.5380434989929199, "avg_line_length": 25.35714340209961, "blob_id": "662b0d8ef0de130994e22873d916618c22ea63db", "content_id": "1b909fb6e48bf1bcd5ed572d4da54dc0b3d9d201", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 368, "license_type": "no_license", "max_line_length": 65, "num_lines": 14, "path": "/setting/production.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\n\n\nDOMAIN = '.meifenfen.com'\nSERVER_NAME = 'www.meifenfen.com'\n\nCACHE_DB_NO = 0\n\nLOG_FILE_NAME = '/data/api/production/api_logger.log'\nLOG_PORT = 8010\n\nWECHAT_APP_ID = 'wx284c24dbdca7b377'\nWECHAT_APP_SECRET = 'df80d4d3213883e804eca31137c1de1f'" }, { "alpha_fraction": 0.627811849117279, "alphanum_fraction": 0.6554192304611206, "avg_line_length": 26.94285774230957, "blob_id": "4ce14ef530da579d6c7fc56dd5bc97720251f92d", "content_id": "8432e2395e11544a7be239feb5971d48898fe841", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 978, "license_type": "no_license", "max_line_length": 65, "num_lines": 35, "path": "/migrations/versions/11da3b568bd2_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 11da3b568bd2\nRevises: 5853d4187f15\nCreate Date: 2016-01-07 17:16:51.367630\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '11da3b568bd2'\ndown_revision = '5853d4187f15'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('daily_user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('daily_id', sa.Integer(), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['daily_id'], ['daily_coupon.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('daily_id', 'user_id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('daily_user')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6227272748947144, "alphanum_fraction": 0.6613636612892151, "avg_line_length": 28.33333396911621, "blob_id": "e1bedd46247a11c3e1a7f4bc82d4ef7418a02750", "content_id": "8a268d1359948833a65f3e85c057ccf932dabcb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 880, "license_type": "no_license", "max_line_length": 84, "num_lines": 30, "path": "/migrations/versions/4ba650ba661f_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4ba650ba661f\nRevises: 3bed4aa05b5a\nCreate Date: 2015-12-07 10:15:09.015480\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4ba650ba661f'\ndown_revision = '3bed4aa05b5a'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('qr_code_user', sa.Column('sex', sa.Integer(), nullable=True))\n op.add_column('qr_code_user', sa.Column('user_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'qr_code_user', 'user', ['user_id'], ['id'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'qr_code_user', type_='foreignkey')\n op.drop_column('qr_code_user', 'user_id')\n op.drop_column('qr_code_user', 'sex')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6187306642532349, "alphanum_fraction": 0.6195046305656433, "avg_line_length": 32.11281967163086, "blob_id": "b5ea6ed08f97b21649f275adadef83000f5df51f", "content_id": "2aa2baf2b5d9aad471257a1ace598f6981e97b9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6486, "license_type": "no_license", "max_line_length": 122, "num_lines": 195, "path": "/ops/redpack.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import and_\nfrom sqlalchemy import or_\nfrom sqlalchemy import func\nfrom util.sqlerr import SQL_DUPLICATE\nfrom util.sqlerr import SQL_DUPLICATE_PHONE\nfrom util.utils import convert_locaton\nfrom util.utils import dt_obj\n\nfrom models import db\nfrom models import RedpackQuestion\nfrom models import RedpackUserQuestion\nfrom models import RedpackPay\nfrom models import QrCodeUser\nfrom models import RedpackPayUser\nfrom ops.utils import get_items\nfrom ops.utils import get_page\nfrom ops.utils import count_items\nfrom ops.cache import RoomDesignVoteCounter\nfrom thirdparty.qn import upload_img\nfrom settings import celery\n\n\n\nclass RedpackService(object):\n ''' '''\n\n @staticmethod\n def get_qruser_by_openid(open_id):\n ''' '''\n return QrCodeUser.query.filter(QrCodeUser.open_id==open_id).first()\n\n @staticmethod\n def create_question(content):\n ''' '''\n question = RedpackQuestion(content=content)\n db.session.add(question)\n db.session.commit()\n return question.id\n\n @staticmethod\n def create_user_question(qr_user_id, question_id, question, answer, is_custom, is_random, price):\n ''' '''\n question = RedpackUserQuestion(\n qr_user_id=qr_user_id, question_id=question_id, question=question, answer=answer, is_custom=is_custom,\n is_random=is_random, price=price)\n db.session.add(question)\n db.session.commit()\n return question.id\n\n @staticmethod\n def get_user_question_by_id(user_question_id):\n question = RedpackUserQuestion.query.filter(RedpackUserQuestion.id==user_question_id).first()\n if question: return question.as_dict()\n\n @staticmethod\n def get_question_by_id(question_id):\n question = RedpackQuestion.query.filter(RedpackQuestion.id==question_id).first()\n if question: return question.as_dict()\n\n @staticmethod\n def count_user_question(qr_user_id):\n where = RedpackUserQuestion.qr_user_id==qr_user_id\n return count_items(RedpackUserQuestion, where)\n\n @staticmethod\n def count_redpack(where=None):\n return count_items(RedpackPayUser, where)\n\n @staticmethod\n def get_paged_user_question(**kw):\n return get_page(RedpackUserQuestion, {}, **kw)\n\n @staticmethod\n def get_questions_by_ids(question_ids, **kw):\n where = RedpackQuestion.id.in_(question_ids)\n _, infos = RedpackService.get_paged_redpack_questions(where=where, limit=len(question_ids))\n return infos\n\n @staticmethod\n def get_pay_by_orderno(order_no):\n ''' '''\n return RedpackPay.query.filter(RedpackPay.order_no==order_no).first()\n\n @staticmethod\n def update_redpack_pay(where, **kw):\n ''' '''\n count = RedpackPay.query.filter(where).update(kw, synchronize_session=False)\n db.session.commit()\n return count\n\n @staticmethod\n def add_redpack_user(pay_id, qr_user_id, user_question_id, price):\n user = RedpackPayUser(pay_id=pay_id, qr_user_id=qr_user_id, user_question_id=user_question_id, price=price)\n db.session.add(user)\n db.session.commit()\n return user.id\n\n @staticmethod\n def incr_question_view_count(user_question_id):\n ''' '''\n where = RedpackUserQuestion.id==user_question_id\n data = {\n 'view_count': RedpackUserQuestion.view_count+1\n }\n count = RedpackUserQuestion.query.filter(where).update(data)\n db.session.commit()\n return count\n\n @staticmethod\n def get_question_viewers(**kw):\n ''' 问题答案发红包查看用户列表 '''\n return get_page(RedpackPayUser, {}, **kw)\n\n @staticmethod\n def add_pay(qr_user_id, user_question_id, order_no, price):\n pay = RedpackPay(\n qr_user_id=qr_user_id,\n user_question_id=user_question_id,\n order_no=order_no,\n price=price\n )\n db.session.add(pay)\n db.session.commit()\n return pay.id\n\n @staticmethod\n def get_user_question_viewer(user_question_id, qr_user_id):\n query = and_(\n RedpackPayUser.user_question_id==user_question_id,\n RedpackPayUser.qr_user_id==qr_user_id\n )\n return RedpackPayUser.query.filter(query).first()\n\n @staticmethod\n def update_redpack_status(question_id, status):\n ''' '''\n query = and_(\n RedpackQuestion.id==question_id\n )\n data = {\n 'status':status \n }\n count = RedpackQuestion.query.filter(query).update(data, synchronize_session=False)\n db.session.commit()\n return count\n\n @staticmethod\n def get_paged_redpack_questions(**kw):\n ''' '''\n return get_page(RedpackQuestion, {}, **kw)\n\n @staticmethod\n def count_redpack_question():\n return count_items(RedpackQuestion, None)\n @staticmethod\n def count_redpack_user_question(where=None):\n return count_items(RedpackUserQuestion, where)\n\n @staticmethod\n def get_question_viewers(**kw):\n return get_page(RedpackPayUser, {}, **kw)\n\n @staticmethod\n def get_qr_user_by_ids(qr_user_ids, **kw):\n ''' '''\n where = QrCodeUser.id.in_(qr_user_ids)\n _, infos = get_page(QrCodeUser, {}, where=where, **kw)\n return infos\n\n @staticmethod\n def total_money():\n total = db.session.query(\n func.sum(RedpackPayUser.price)\n ).scalar() or 0\n return float(total)\n\n @staticmethod\n def total_money_group_by_question(question_ids=None):\n query = db.session.query(RedpackPayUser.user_question_id, func.sum(RedpackPayUser.price))\n if question_ids:\n where = RedpackPayUser.user_question_id.in_(question_ids)\n query = query.filter(where)\n result = query.group_by(RedpackPayUser.user_question_id).all()\n return dict([(i[0], float(i[1]))for i in result])\n\n @staticmethod\n def incr_user_question_money(user_question_id, price):\n data = {\n 'money': RedpackUserQuestion.money+price\n }\n count = RedpackUserQuestion.query.filter(RedpackUserQuestion.id==user_question_id).update(data)\n db.session.commit()\n return count\n\n\n\n" }, { "alpha_fraction": 0.6280992031097412, "alphanum_fraction": 0.676033079624176, "avg_line_length": 22.269229888916016, "blob_id": "5a0ca5e1c633cec0839994cd595f9afd11ae1ca0", "content_id": "50684f09888093d7731c2b50b8af5b406559bd1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 605, "license_type": "no_license", "max_line_length": 85, "num_lines": 26, "path": "/migrations/versions/9969fdf8fc9_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 9969fdf8fc9\nRevises: 1b538e70897\nCreate Date: 2016-03-07 16:32:03.339046\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '9969fdf8fc9'\ndown_revision = '1b538e70897'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('order', sa.Column('remark', sa.String(length=300), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('order', 'remark')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6165289282798767, "alphanum_fraction": 0.6793388724327087, "avg_line_length": 22.269229888916016, "blob_id": "41ee24b090bf5e6aaeb4e9139fe4c044322e4a01", "content_id": "a999c19990a2c9051a3d3e722c58bea5ac9f218b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 605, "license_type": "no_license", "max_line_length": 89, "num_lines": 26, "path": "/migrations/versions/51187e1e4dbc_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 51187e1e4dbc\nRevises: 51aaf1e0ecdd\nCreate Date: 2015-11-16 10:27:52.435910\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '51187e1e4dbc'\ndown_revision = '51aaf1e0ecdd'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('hospital', sa.Column('photos', sa.String(length=1000), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('hospital', 'photos')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6105769276618958, "alphanum_fraction": 0.6858974099159241, "avg_line_length": 23, "blob_id": "8c68f9ccc7a0bf12843089e491fc29a98d8008c0", "content_id": "925eafb790d86b8d15e742de2edca5eeb4a73b01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 624, "license_type": "no_license", "max_line_length": 93, "num_lines": 26, "path": "/migrations/versions/348398960582_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 348398960582\nRevises: 5329d119ee5f\nCreate Date: 2015-11-05 11:06:06.364444\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '348398960582'\ndown_revision = '5329d119ee5f'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('order', sa.Column('transaction_id', sa.String(length=100), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('order', 'transaction_id')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6259863376617432, "alphanum_fraction": 0.6549184918403625, "avg_line_length": 28.671875, "blob_id": "1752b158f8342163e22ba8227716a359c80f5346", "content_id": "faf225b785ae40e05b7e3988cd45ac5f19ab3a4f", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 1915, "license_type": "no_license", "max_line_length": 102, "num_lines": 64, "path": "/thirdparty/push/apple_push.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nimport logging\nlogging.basicConfig()\nfrom apnsclient import *\nimport OpenSSL\nOpenSSL.SSL.SSLv3_METHOD = OpenSSL.SSL.TLSv1_METHOD\n\nCURRENT_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\npush_name = 'push_production'\npem_file = 'ck_production.pem'\ncon = Session().new_connection(push_name, cert_file=CURRENT_DIR+\"/\"+pem_file, passphrase='2016')\nsrv = APNs(con)\n\n# Persistent connection for intensive messaging.\n# Keep reference to session instance in some class static/global variable,\n# otherwise it will be garbage collected and all connections will be closed.\n#session = Session()\n#con = session.get_connection(\"push_sandbox\", cert_file=\"ck.pem\")\n\n\ndef send_message(tokens, alert, badge=1, sound='bingbong.aiff', extra=None):\n message = Message(tokens, alert, badge=badge, sound=sound, extra=extra)\n print 'apns', alert, tokens, sound, extra\n try:\n res = srv.send(message)\n except:\n import traceback\n traceback.print_exc()\n print \"Can't connect to APNs, looks like network is down\"\n else:\n for token, reason in res.failed.items():\n code, errmsg = reason\n print \"Device failed: {0}, reason: {1}\".format(token, errmsg)\n\n for code, errmsg in res.errors:\n print \"Error: {}\".format(errmsg)\n \n # Check if there are tokens that can be retried\n if res.needs_retry():\n retry_message = res.retry()\n\ntry:\n from settings import celery\n send_message = celery.task(send_message)\nexcept:\n pass\n\n\n\ndef send_push(token):\n ''' '''\n token = token.replace(' ', '')\n alert = \"您有一条新消息\"\n badge = 1\n sound='bingbong.aiff'\n send_message((token,), alert, badge, sound, extra={'a':2})\n\n\nif __name__=='__main__':\n token = \"bf33007c b3141c26 54658090 f7f57c51 210b7375 2985f237 6311aded 1b8d9b28\".replace(' ', '')\n send_push(token)\n\n\n" }, { "alpha_fraction": 0.6354680061340332, "alphanum_fraction": 0.674876868724823, "avg_line_length": 28, "blob_id": "78efdd9a0cf9e3cf84ab5601f0344d0bb68188f4", "content_id": "9e45b93575a9236a575bafa68c4a1b6126044d71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 812, "license_type": "no_license", "max_line_length": 118, "num_lines": 28, "path": "/migrations/versions/3a64535997fb_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3a64535997fb\nRevises: 3d20dc8132b4\nCreate Date: 2015-12-09 16:06:25.045848\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3a64535997fb'\ndown_revision = '3d20dc8132b4'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(u'trial_ibfk_2', 'trial', type_='foreignkey')\n op.drop_column('trial', 'user_id')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('trial', sa.Column('user_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False))\n op.create_foreign_key(u'trial_ibfk_2', 'trial', 'user', ['user_id'], ['id'])\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.8103092908859253, "alphanum_fraction": 0.830515444278717, "avg_line_length": 41.920352935791016, "blob_id": "c2be1ee2bd8e301e5a0df51722e25d057dba0173", "content_id": "6cbd3c19220f19df22af5f21d6082010f9f83f0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 12432, "license_type": "no_license", "max_line_length": 244, "num_lines": 113, "path": "/static/user/SXXY.html", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html>\n\t<head>\n\t\t<meta charset=\"utf-8\">\n\t\t<meta name=\"viewport\" content=\"width=device-width,initial-scale=1,minimum-scale=1,maximum-scale=1,user-scalable=no\" />\n\t\t<title>美分分授信协议书</title>\n\t\t<style>\n\t\t*{\n\t\t\tpadding:0;\n\t\t\tmargin: 0;\n\t\t\tlist-style: none;\n\t\t}\n\t\t\tbody{\n\t\t\t\tbackground:#F4F4F4;\n\t\t\t\tfont-size:14px;\n\t\t\t\tfont-family: \"微软雅黑\";\n\t\t\t\tpadding: 10px;\n\t\t\t}\n\t\t\th1{\n\t\t\t\ttext-align: center;\n\t\t\t\tfont-weight: normal;\n\t\t\t\tfont-size:1.4rem;\n\t\t\t}\n\t\t\tdiv{\n\t\t\t\ttext-indent: 20px;margin-top:10px;\n\t\t\t}\n\t\t\tpre{\n\t\t\t\tword-wrap: break-word\n\t\t\t}\n\t\t</style>\n<script>\nvar _hmt = _hmt || [];\n(function() {\n var hm = document.createElement(\"script\");\n hm.src = \"//hm.baidu.com/hm.js?61b678558a59e95c85431d0243d592ef\";\n var s = document.getElementsByTagName(\"script\")[0]; \n s.parentNode.insertBefore(hm, s);\n})();\n</script>\n\n\t</head>\n\t\n\t<body>\n\t\t<h1>美分分授信协议书</h1>\n<pre>\t\t\t\n美分分授信协议书\n甲方: 【系统填充】 \n身份证号: 【系统填充】 \n美分分网站用户名: 【系统填充】 \n乙方:美分期网络科技(上海)有限公司\n甲方是在美分分网站(www.meifenfen.com)、美分分移动端应用程序APP(以下合称“美分分网站”)上注册的会员,在中华人民共和国境内为完全民事行为能力人。乙方是一家在中国上海市依照中华人民共和国法律法规合法成立并有效存续的有限责任公司,是美分分网站的所有人及运营方。\n甲方因在美分分网站购买服务的需要向乙方申请额度授信,在自愿、平等、互利、诚实信用的基础上,签订本《美分分授信协议书》(以下简称“本协议”),当事双方同意由乙方向甲方提供授信额度,并达成以下各项条款,以资共同遵守。本协议是由甲方和乙方于【系统填充】年【系统填充】月【系统填充】日在上海签订。\n一、额度申请\n1.甲方须为年满十八周岁并具备完全民事行为能力及良好信用的正规统招全日制在校专科学历(包括在读专科)或以上的在校男/女大学生并且甲方应按乙方要求提供真实完整、准确有效的资料及信息,包括但不限于个人身份信息(包括但不限于身份证、中华人民共和国护照)、学生身份证明(包括但不限于学生证,在读证明或学历证明)、个人彩色证件照片等资料。若甲方不具备前述额度申请条件或提供无效的或者不真实的虚假信息,乙方应立即停止对甲方进行额度授信,由此遭受的损失以及其他任何不良后果的,由甲方或其监护人自行承担。\n3.甲方在申请额度过程中中所提供的信息内容(包括但不限于手机号码、通讯地址、电子邮件等资料)如有变更或失效的,应立即联系乙方办理变更手续,否则甲方将自行承担由于未及时变更个人信息所造成的后果、损失及法律责任。\n4.乙方有权向信贷征信业监督管理部门批准建立的征信信息数据库及其他渠道查询、处理、留存和使用其个人信息和信用状况相关的资料,以确定甲方的信用状况和身份信息。乙方查询获得的信用报告可用于额度审批、异议核查等合法的用途,对于超出授权查询范围的一切后果及法律责任由乙方承担。乙方承诺对上述查询到的甲方资料保密。\n5.乙方有权向信贷征信业监督管理部门批准建立的征信信息数据库报送甲方的信用状况、身份信息等相关资料;乙方有权根据司法机关或其它行政机关要求处理上述资料,或依照法律法规规定的程序及要求,将上述资料提供给社会征信机构。在法律规定的范围内,乙方亦有权将上述资料用于乙方自行或委托第三方进行案件调查,债务追索等情况。如发生甲方的不良信用信息等报送情形且依法需通知甲方的,乙方可以通过电话、短信、电子邮件、对账单、催收单等方式通知甲方,甲方联系方式以其提交并存留在乙方的有效联络信息为准。\n6.甲方上述授权视同同意乙方通过各种合法渠道获取并报送甲方的个人信用信息。乙方将对获取并报送的涉及甲方隐私的信息审慎合法使用并保密。甲方可能因此承担由于自身或其他原因造成信贷记录不良而导致的不良后果和影响。\n二、双方责任和权利\n(一)甲方责任和权利\n1、甲方申请额度获得审批后,可以于授信额度的有效期内,即【】年【】月【】日至【】年【】月【】日之间,在总额度范围内一次或多次向乙方申请使用额度并在美分分网站进行分期消费。\n2、甲方在申请并使用额度后应在约定的每期最晚付款日前按照规定将每期应付款足额支付给乙方,否则,即视为逾期付款,甲方应当向乙方支付按照如下公式计算的违约金:当期应付款全额*1%*违约天数。\n3、甲方可在授信额度的有效期前,提前部分或全部支付所申请使用的额度。\n4、甲方申请获得的额度将由美分分网站直接支付给甲方指定的服务方,甲方可在该服务方处获得相应服务。 \n(二)乙方责任和权利 \n1、乙方有权根据甲方通过美分分网站提交申请额度的资料后决定是否授予甲方消费额度以及允许甲方使用的额度大小。\n2、甲方申请额度获得审批后,在授信额度有效期内,乙方应当允许甲方在美分分网站进行一次或多次在额度范围内的分期消费,但甲方每一次消费都应当事先向乙方申请并征得乙方同意(一般情况下,乙方依照甲方的申请主动向甲方指定的服务方支付额度视为乙方对甲方申请的同意),乙方亦有权利拒绝甲方的申请;甲方分批申请使用的总额度,累计不得超过乙方向甲方授予的总消费额度。\n3、经甲方申请并获得的额度可以由乙方指定的第三方提供。\n4、乙方应在现有技术基础上维护美分分网站的正常运行,并努力提升和改进技术,更好的为用户提供平台服务。 \n5、乙方应对甲方在使用平台服务中所遇到的有关问题及反映的情况,及时作出回复。 \n6、乙方应当依照法律法规规定为甲方提供平台服务,并对美分分网站平台服务承担相应的基础保障义务,但无法对由于信息网络设备维护、互联网连接故障,电脑、通讯或其他系统的故障,电力故障,罢工,暴乱,火灾,洪水,风暴,爆炸,战争,政府行为,司法行政机关的命令或因第三方原因而给甲方造成的损害结果承担责任。\n三、双方陈述和保证\n(一)甲方的陈述与保证 \n1、甲方具有完全民事行为能力,具有签署并履行本协议的资格和能力。\n2、甲方知悉、理解、同意本协议的全部内容,签署本协议是甲方的真实意思表示。 \n3、本协议构成对甲方的合法、有效和有约束力的义务,该义务依本协议之条款可强制执行。 \n4、甲方向乙方提供的所有文件及信息是真实、准确、完整、有效的。\n5、甲方向乙方申请的额度仅限于在美分分网站上使用,并不会被用于其他任何非法目的。\n6、甲方授权乙方及/或第三方平台、第三方机构办理本合同项下对甲方的授信均为甲方真实意思表示,乙方及/或第三方平台、第三方机构因对甲方授信而产生的法律后果由甲方承担。 \n(二)乙方的陈述与保证 \n1、乙方是按照中华人民共和国法律法规合法注册成立并有效存续的一家公司。 \n2、乙方知悉、理解、同意本协议的全部内容,签署本协议是乙方的真实意思表示。 \n3、本协议构成对乙方的合法、有效和有约束力的义务。 \n四、违约责任\n1、甲乙双方均应严格履行本协议项下义务,一方不履行其在本协议项下义务或者履行本协议项下义务不符合约定的,守约方有权要求违约方按照法律规定及本协议的约定承担继续履行、采取补救措施、支付违约金或者赔偿损失等违约责任。 \n2、如果甲方出现以下任一情形:(1)逾期付款超过30天;(2)连续逾期付款三次以上(含三次);(3)累计逾期达五次以上(含五次);(4)乙方发现甲方出现逃避本协议项下义务、拒绝与乙方沟通、恶意拖欠还款及平台服务费等危害本协议执行的情形,乙方有权根据情节做出以下任一决定:\n(1)全部、部分调减、中止或终止甲方的授信额度;\n(2)全部、部分中止或终止乙方原准备向甲方发放但尚未发放的额度;\n(3)宣布甲方按照本协议项下尚未到期的借款全部或部分立即到期;\n(4)终止或解除本授信协议;\n(5)将甲方上述违约失信的相关信息在美分分网站上公布、并上报记入国家及地方公民征信系统;\n (6)不再接受甲方任何的额度申请并对甲方在美分分网站的账号予以注销收回;\n(7)将甲方个人信息向媒体、所在学校、公安机关、检察机关及其他相关第三方披露,由此产生的甲方不良信用记录或其他对甲方声誉、信誉等不利影响及后果由甲方自己承担,乙方对此不承担任何法律责任;\n(8)其他乙方认为可以采取的必要措施。 \n3、如果甲方发生违约,产生无法按时归还向乙方借款的情形时,甲方在此确认并同意乙方或其聘请的任何第三方有权以直接上门或网络、电子邮件、电话、短讯或其他任何合理途径或方式向甲方发出提示或进行追索,甲方不得提出任何异议或提出任何诉讼,由此产生的一切费用均由甲方承担。 \n4、违约方应承担守约方因违约方的违约行为而产生的全部费用和损失,包括但不限于调查费用、诉讼费、公告费、执行费、律师费等。 \n五、适用法律与争议解决\n1、本协议的签订、履行、终止、解释和争议解决受中华人民共和国法律管辖。 \n2、因本协议引发的或与本协议有关的争议,由甲、乙双方协商解决,也可由有关部门调解;协商或调解不成的,由乙方所在地上海市杨浦区人民法院管辖。 \n六、、其他事项\n1、如果本协议有一条或多条规定根据任何法律或法规在任何方面被裁定为无效、不合法或不可强制执行,本协议其余规定的有效性、合法性或可强制执行性也不应在任何方面受到影响或损害。双方应通过诚意磋商,争取以法律许可以及双方期望的最大限度内有效的规定取代该等无效、不合法或不可强制执行的规定,而该等有效的规定所产生的经济效果应尽可能与该些无效、不合法或不可强制执行的规定所产生的经济效果相似。\n2、未经乙方书面同意,甲方不得将本协议中的全部或部分权利、义务转让给第三方。\n3、乙方若由于自身需要,向第三方委托或转让履行本协议中全部或部分的权利或义务,包括但不限于对第三方转让乙方对甲方拥有的债权。乙方仅需就上述事宜对甲方进行通知,无需征得甲方的同意。甲方对本协议的签署,即视为乙方已对上述的本协议约定中可能产生的债权转让履行了通知义务。\n4、在不影响本协议约定的情形下,本协议对双方及各自依法产生的承继和受让人均具有法律约束力。 \n5、对本协议作出的任何修改和补充均应为书面形式。甲乙双方另行签署的与本协议有关的修改协议和补充协议应是本协议不可分割的组成部分,并应具有与本协议同等的法律效力。 \n6、本协议自用户勾选“已阅读并同意”并点击“提交申请”按钮之日起生效,至双方在本协议项下的全部义务履行完毕之日终止。非经双方协商一致或依照本协议约定,任何一方不得解除本协议。 \n甲方:(签章) \n【系统填充】年【系统填充】月【系统填充】日 \n乙方:(签章)美分期网络科技(上海)有限公司 \n【系统填充】年【系统填充】月【系统填充】日 \n</pre>\n\t</body>\n</html>\n" }, { "alpha_fraction": 0.5613440871238708, "alphanum_fraction": 0.5688981413841248, "avg_line_length": 29.715999603271484, "blob_id": "63e368fbcf2e3f8fb1ea813f7161ea7ed2c2c4b2", "content_id": "080dbe480d7e294c806345a05ace0646cce330e4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8126, "license_type": "permissive", "max_line_length": 365, "num_lines": 250, "path": "/static/user/js/detail.js", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "var para = Common.UrlGet();\nvar state;\n$.ajax({\n\txhrFields: {\n\t\twithCredentials: true\n\t},\n\turl: 'http://' + getHostName() + '/user/item_detail?item_id=' + para.item_id,\n\ttype: 'post',\n\tdataType: 'json',\n\tsuccess: function(data) {\n\t\tvar infos = data.comments.infos;\n\t\tvar hospital = data.hospital;\n\t\tvar item = data.item;\n\t\tvar pay_choices = data.pay_choices;\n\t\twx.ready(function() {\n\n\t\t\twx.onMenuShareTimeline({\n\n\t\t\t\ttitle: item.title, // 分享标题\n\n\t\t\t\tlink: 'http://' + getHostName() + '/static/user/detail.html?item_id=' + item.id, // 分享链接\n\n\t\t\t\timgUrl: item.photo_list[0], // 分享图标\n\n\t\t\t\tsuccess: function() {\n\n\t\t\t\t\tconsole.log('success')\n\n\t\t\t\t\t// 用户确认分享后执行的回调函数\n\n\t\t\t\t},\n\n\t\t\t\tcancel: function() {\n\n\t\t\t\t\tconsole.log('cancel')\n\n\t\t\t\t\t// 用户取消分享后执行的回调函数\n\n\t\t\t\t}\n\n\t\t\t});\n\n\t\t\twx.onMenuShareAppMessage({\n\t\t\t\ttitle: item.title, // 分享标题\n\t\t\t\tlink: 'http://' + getHostName() + '/static/user/detail.html?item_id=' + item.id, // 分享链接\n\t\t\t\timgUrl: item.photo_list[0], // 分享图标 \n\t\t\t\tdesc: 'www.meifenfen.com', // 分享描述\n\t\t\t\tsuccess: function() {\n\n\t\t\t\t\tconsole.log('success')\n\n\t\t\t\t\t// 用户确认分享后执行的回调函数\n\n\t\t\t\t},\n\n\t\t\t\tcancel: function() {\n\n\t\t\t\t\tconsole.log('cancel')\n\n\t\t\t\t\t// 用户取消分享后执行的回调函数\n\n\t\t\t\t}\n\n\t\t\t});\n\n\t\t});\n\n\t\t//\t\tif(data.credit_amount<item.price){\n\t\t//\t\t\t$('.shoufu').show();\n\t\t//\t\t\t$('.check-stages').css('margin-bottom','0')\n\t\t//\t\t\t$('.check-stages:after').css('height','0')\n\t\t//\t\t}\n\t\tif (data.has_fav) {\n\t\t\tstate = 0;\n\t\t\t$('#wish').html('移除心愿单');\n\t\t} else {\n\t\t\tstate = 1;\n\t\t\t$('#wish').html('放入心愿单');\n\t\t}\n\t\t//假用户可以评价\n\t\tif (data.can_comment) {\n\t\t\t$('#fake-user-comment').click(function() {\n\t\t\t\twindow.location = '/static/user/judge-edit.html' + location.search\n\t\t\t})\n\n\t\t}\n\t\t$('#item_note > pre').html(item.note);\n\t\t$('#item_use_time > pre').html(item.use_time);\n\t\t$('#phone').html('咨询电话:' + hospital.phone)\n\t\t$('#phone').on('click', function() {\n\t\t\tlocation = \"tel:\" + hospital.phone;\n\t\t})\n\t\t$('.hospital-link').attr('hospital-id', data.hospital.id)\n\t\t$('.hospital-link').click(function() {\n\t\t\twindow.location = '/user/hospital_detail?hospital_id=' + data.hospital.id;\n\t\t})\n\t\tfor (var i = 0; i < item.photo_list.length - 1; i++) {\n\t\t\tvar e = $($('.swiper-slide')[0]);\n\t\t\te.clone().insertAfter(e);\n\t\t}\n\t\tfor (var i = 0; i < item.photo_list.length - 1; i++) {\n\t\t\tvar e = $($('.swiper-pagination-bullet')[0]);\n\t\t\te.clone().insertAfter(e);\n\t\t}\n\t\tfor (var i = 0; i < item.photo_list.length; i++) {\n\t\t\t$('.img').eq(i).attr('src', item.photo_list[i]);\n\t\t\t$('.img').eq(i).attr('data', item.photo_list[i]);\n\t\t}; //替换banner背景图片\n\t\tvar swiper = new Swiper('.swiper-container', {\n\t\t\tpagination: '.swiper-pagination',\n\t\t\tpaginationClickable: true,\n\t\t\tautoplayDisableOnInteraction: false,\n\t\t\tautoplay: 3000,\n\t\t\tloop: true,\n\t\t});\n\t\twindow.item_photos = item.photo_list;\n\t\t$('.img').click(wx_img_preview);\n\t\t$('#hospitalName').html(hospital.name);\n\t\t$('#hospitalTag').find('.aptitude').remove(); //先删除医院标签在动态添加\n\t\tfor (var j = 0; j < hospital.tag_list.length; j++) {\n\t\t\tvar hospitaList = $('<span class=\"aptitude color-blue size-sm\">' + hospital.tag_list[j] + '</span>');\n\t\t\t$('#hospitalTag').append(hospitaList);\n\t\t}\n\t\t$('.user-judge').remove()\n\n\t\t$('#comment').find('a').html('用户评价(' + data.comments.total + ')');\n\t\tif (data.comments.total == 0) {\n\t\t\t$('.noJudge').show();\n\t\t}\n\t\t//动态修改医院信息\n\t\tfor (var k = 0; k < infos.length; k++) {\n\t\t\tif (k > 0) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvar str = '<div class=\"user-judge bg-white\"><div class=\"fl\"><img src=\"' + infos[k].user.avatar + '\"/></div><div class=\"intro-cont\"><div class=\"fr\"><span class=\"color-grey p-name\">' + infos[k].user.name + '</span>';\n\t\t\tfor (var x = 0; x < 5; x++) {\n\t\t\t\tif (x < infos[k].rate)\n\t\t\t\t\tstr += '<i class=\"iconfont color-gold star\">&#xe602; </i>';\n\t\t\t\telse\n\t\t\t\t\tstr += '<i class=\"iconfont color-grey star\">&#xe602; </i>';\n\t\t\t}\n\t\t\tvar photo_str = '';\n\t\t\tfor (var q = 0; q < infos[k].photo_list.length; q++) {\n\t\t\t\tphoto_str += '<span class=\"show-par\"><img class=\"show-pic item-comment-img\" src=\"' + infos[k].photo_list[q] + '\"/></span>'\n\t\t\t}\n\t\t\tvar additional = '';\n\t\t\tif (infos[k].is_re_comment) {\n\t\t\t\tadditional = '<span style=\"vertical-align:1px;margin-left: 5px;\">追加评价</span>'\n\t\t\t}\n\t\t\tstr += '<h5 class=\"color-black mui-ellipsis-2 lower-line-height\">' + infos[k].content + '</h5>' + photo_str + '<p class=\"size-sm\"><i class=\"iconfont\">&#xe63a;</i><span>' + infos[k].create_time + '</span>' + additional + '</p></div></div></div>';\n\t\t\t$('#comment').after(str);\n\t\t} //评论信息加载\n\t\t$('.item-comment-img').unbind('click');\n\t\t$('.item-comment-img').click(item_comment_img_preview);\n\t\t//主体title部分\n\t\t$('#mainTitle').html(item.title);\n\t\t$('#mainTitle').html(item.title);\n\t\t$('#yyname').html(hospital.name);\n\t\t$('#price').html(item.price);\n\t\t$('#oldPrice').html(item.orig_price);\n\t\t// $('#payment').html('需首付 '+data.need_pay+' 元')\n\t\t// if (data.need_pay==0) {\n\t\t// \t$('#payDetails').css('display','none')\n\n\t\t// };\n\t\t$('#payDetails').html('(首付金额=总价-你的可用额度' + data.total_period_amount + '元)')\n\t\t$('.text-cont').remove()\n\t\tfor (var j = pay_choices.length - 1; j >= 0; j--) {\n\t\t\tif (pay_choices[j].disable) {\n\t\t\t\tpay_choices[j].disable = 0;\n\t\t\t} else {\n\t\t\t\tpay_choices[j].disable = 1;\n\t\t\t}\n\t\t\tif (pay_choices[j].id == 0) {\n\t\t\t\tpay_choices[j].period_fee = '(直购)';\n\t\t\t\tvar payNode = '<div class=\"text-cont\" choice-id=' + pay_choices[j].id + ' need=' + pay_choices[j].need_pay + ' period=' + pay_choices[j].credit_used + '><p>¥' + pay_choices[j].period_total + '</p><p>' + pay_choices[j].period_fee + '</p><i class=\"iconfont\">&#xe614;</i></div>';\n\t\t\t} else if (pay_choices[j].period_fee == 0) {\n\t\t\t\tpay_choices[j].period_fee = '(无服务费)';\n\t\t\t\tvar payNode = '<div class=\"text-cont\" choice-id=' + pay_choices[j].id + ' need=' + pay_choices[j].need_pay + ' period=' + pay_choices[j].credit_used + ' option=' + pay_choices[j].disable + '><p>' + \"¥\" + pay_choices[j].period_total + \"x\" + pay_choices[j].period_count + \"期\" + '</p><p>' + pay_choices[j].period_fee + '</p><i class=\"iconfont\">&#xe614;</i></div>';\n\t\t\t} else {\n\t\t\t\tpay_choices[j].period_fee = '(含每期服务费¥' + pay_choices[j].period_fee + ')';\n\t\t\t\tvar payNode = '<div class=\"text-cont\" choice-id=' + pay_choices[j].id + ' need=' + pay_choices[j].need_pay + ' period=' + pay_choices[j].credit_used + ' option=' + pay_choices[j].disable + '><p>' + \"¥\" + pay_choices[j].period_total + \"x\" + pay_choices[j].period_count + \"期\" + '</p><p>' + pay_choices[j].period_fee + '</p><i class=\"iconfont\">&#xe614;</i></div>';\n\t\t\t}\n\n\n\t\t\t$('#option').after(payNode);\n\t\t}\n\n\t},\n\terror: function() {\n\n\t}\n});\n// $(document).on('tap','.text-cont',function(){\n\n// })\n$('#buy').on('click', function(event) {\n\tif (!getCookie('sign_user')) {\n\t\tlocation.href = '/static/user/login.html?next=' + location.href;\n\t\treturn;\n\t}\n\tevent.preventDefault();\n\tvar active = $('.text-cont.active').attr('choice-id');\n\tvar option = $('.text-cont.active').attr('option');\n\tif (active) {\n\t\tif (option == 0) {\n\t\t\talert('选择分期期数需小于现在到毕业前六个月的月数');\n\t\t\treturn;\n\t\t}\n\t\tlocation.href = '/static/user/submit-order.html?period_choice_id=' + active + '&item_id=' + para.item_id;\n\t} else {\n\t\talert('请选择分期')\n\t}\n})\n$('#wish').on('click', function() {\n\tif (!getCookie('sign_user')) {\n\t\tlocation.href = '/static/user/login.html?next=' + location.href;\n\t\treturn;\n\t}\n\t$.ajax({\n\t\txhrFields: {\n\t\t\twithCredentials: true\n\t\t},\n\t\ttype: \"post\",\n\t\turl: \"http://\" + getHostName() + \"/user/fav_item/?\" + token,\n\t\tdataType: 'json',\n\t\tdata: {\n\t\t\titem_id: para.item_id,\n\t\t\tstatus: state\n\t\t},\n\t\tsuccess: function(data) {\n\t\t\tif (data.code == 0) {\n\t\t\t\tif (state == 0) {\n\t\t\t\t\t$('#wish').html('放入心愿单');\n\t\t\t\t\talert(data.msg)\n\t\t\t\t\tstate = 1;\n\t\t\t\t} else {\n\t\t\t\t\t$('#wish').html('移除心愿单');\n\t\t\t\t\talert(data.msg)\n\t\t\t\t\tstate = 0;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\talert(data.msg)\n\t\t\t}\n\t\t},\n\t\terror: function() {\n\n\t\t}\n\t});\n})" }, { "alpha_fraction": 0.44191452860832214, "alphanum_fraction": 0.44458121061325073, "avg_line_length": 36.68041229248047, "blob_id": "41891200069657a13610b86639486dcf3746e3ea", "content_id": "e59f378e26d2670cd2db62f008d16055526cfdc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14903, "license_type": "no_license", "max_line_length": 100, "num_lines": 388, "path": "/util/decorators.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport re\nimport time\nimport json\nfrom functools import wraps\n\nfrom flask import request\nfrom flask import Response\nfrom flask import redirect\nfrom flask import make_response\nfrom util.utils import union_dict\nfrom util.utils import jsonify_response\nfrom util.utils import jsonify \nfrom util.sign import extract_user_id\nfrom util.sign import get_cookie\nfrom util.sign import sign_user\nfrom util.sign import check_token\nfrom util.sign import del_cookie\nfrom util.sign import check_hospital_token\nfrom util.sign import check_promote_token\nfrom thirdparty.wechat import get_user_snsapi_base_url\nfrom constants import ResponseCode\nfrom udp_server import send_msg\n\n\ndef get_err_response(msg):\n ''' 异常响应模版 '''\n return jsonify_response(\n {\n \"code\": ResponseCode.SERVER_ERROR,\n \"msg\": msg\n },\n with_response=True\n )\n\np = re.compile('.*?\"code\": (?P<code>\\d+)')\n\ndef is_err_response(result):\n ''' '''\n result = p.search(result or '')\n if result:\n a = result.groupdict() or {}\n if a.get('code')!='0':\n return True\n\n\ndef udp_log(resp=None):\n ''' udp日志 '''\n timestamp = time.time()\n data = {\n 'timestamp' : timestamp,\n 'remote_addr' : request.remote_addr,\n 'url' : request.path,\n 'user_id' : getattr(request, 'user_id', None),\n 'data' : getattr(request, 'valid_data', None),\n 'args' : request.args,\n 'form' : request.form,\n 'cookies' : request.cookies.get('sign_user'),\n 'cookies' : request.cookies.get('sign_user'),\n 'open_id' : getattr(request, 'open_id', None),\n 'user_agent' : request.headers.get('User-Agent'),\n 'resp' : resp\n }\n data_line = jsonify(data)\n send_msg(data_line)\n\n\ndef admin_udp_log():\n ''' admin udp日志 '''\n timestamp = time.time()\n data = {\n 'timestamp' : timestamp,\n 'remote_addr' : request.remote_addr,\n 'url' : request.path,\n 'data' : getattr(request, 'raw_data', None),\n 'args' : request.args,\n 'form' : request.form,\n 'token' : request.cookies.get('token'),\n 'user_agent' : request.headers.get('User-Agent'),\n 'log_cat' : 1, #1管理端\n }\n data_line = jsonify(data)\n send_msg(data_line)\n\n\n'''\n只有在支付页面才需要open_id\n'''\ndef wechat_loggin_dec(required=True, next='', need_openid=False, validator=None, app=True):\n ''' need_openid 如果是在微信浏览器里面 则跳转到静默登录获取openid '''\n def _wechat_loggin_dec(func):\n @wraps(func)\n def _func(*args, **kw):\n has_err = True\n try:\n result = None\n request.is_app = app\n order_id = str(request.args.get('order_id') or '')\n oauth_url = get_user_snsapi_base_url(state=request.url)\n req_open_id = get_cookie('open_id')\n req_user_sign = get_cookie('sign_user')\n req_user_id = extract_user_id(req_user_sign)\n if need_openid and not(req_open_id) and 'meifenfen.com' in request.url:\n print 'redirect oauth'\n print oauth_url\n result= make_response(redirect(oauth_url))\n return result\n request.valid_data = None\n if validator:\n raw_get_data = request.args.to_dict()\n raw_post_data = request.form.to_dict()\n raw_data = union_dict(raw_get_data, raw_post_data)\n err, valid_data = validator.validate(raw_data)\n assert not err, err\n request.valid_data = valid_data\n request.raw_data = raw_data\n req_user_sign = req_user_sign or valid_data.get('sign_user') or ''\n if not req_user_sign:#开发调试 兼容app\n req_user_sign = request.args.get('sign_user') or request.form.get('sign_user')\n req_user_id = extract_user_id(req_user_sign)\n if req_user_sign: req_user_id = extract_user_id(req_user_sign)\n print 'req_user_sign:', req_user_sign\n if not(app) and not(req_open_id):\n result = redirect(oauth_url)\n return result\n if required and not(req_user_id and req_user_sign==sign_user(req_user_id)):\n if not app:\n result = redirect('/user/login?next='+(next or request.url))\n else:\n result = jsonify_response(\n {\n 'code': ResponseCode.NEED_LOGIN,\n 'msg': '请登录'\n }, with_response=True\n )\n del_cookie(result, 'sign_user')\n return result\n request.open_id = req_open_id\n user_id = req_user_id\n if user_id and user_id.isdigit() and req_user_sign==sign_user(req_user_id):\n request.user_id = int(user_id)\n else:\n request.user_id = None\n result = func(*args, **kw)\n has_err = False\n return result\n except ValueError as e:\n import traceback\n traceback.print_exc()\n result = get_err_response(getattr(e, 'message', '服务器异常'))\n except AssertionError as e:\n import traceback\n traceback.print_exc()\n result = get_err_response(getattr(e, 'message', '服务器异常'))\n except Exception as e:\n import traceback\n traceback.print_exc()\n result = get_err_response('服务器异常')\n else:\n return result\n finally:\n try:\n resp = None\n data = None\n if isinstance(result, (list,tuple)):\n data = result[0]\n elif isinstance(result, Response):\n try:\n data = getattr(result, 'data', None)\n except:\n pass\n if has_err:# or is_err_response(data):\n resp=data\n udp_log(resp)\n return result\n except Exception as e:\n import traceback\n traceback.print_exc()\n return get_err_response('服务器异常')\n return _func\n return _wechat_loggin_dec\n\n\ndef admin_json_dec(required=True, validator=None, roles=[]):\n def _admin_json_dec(func):\n @wraps(func)\n def _func(*args, **kw):\n from ops.admin import AdminService\n try:\n request.valid_data = None\n if validator:\n raw_get_data = request.args.to_dict()\n raw_post_data = request.form.to_dict()\n raw_req_data = json.loads(request.data) if request.data else {}\n raw_data = union_dict(raw_get_data, raw_post_data, raw_req_data)\n err, valid_data = validator.validate(raw_data)\n assert not err, err\n request.valid_data = valid_data\n admin_name = None\n is_valid = False\n if get_cookie('token'):\n is_valid, admin_name = check_token(get_cookie('token'))\n if required and not(get_cookie('token') and is_valid):\n response = jsonify_response(\n {\n \"code\": -1,\n \"msg\": '请登录'\n }\n )\n return response\n if required:\n admin = AdminService.get_admin(admin_name)\n request.admin = admin\n request.name = admin_name\n if not admin or (roles and admin.cat not in roles):\n return jsonify_response(\n {\n \"code\": -2,\n \"msg\": '没有相关权限'\n }\n )\n result = func(*args, **kw) or ''\n except ValueError as e:\n import traceback\n traceback.print_exc()\n return jsonify_response(\n {\n \"code\": 10000,\n \"msg\": getattr(e, 'msg', '服务器异常')\n }\n )\n except AssertionError as e:\n print 'assert'\n import traceback\n traceback.print_exc()\n return jsonify_response(\n {\n \"code\": 10000,\n \"msg\": getattr(e, 'message', '') or '服务器异常'\n }\n )\n except Exception as e:\n import traceback\n traceback.print_exc()\n return get_err_response('服务器异常')\n else:\n return result\n finally:\n try:\n admin_udp_log()\n except Exception as e:\n pass\n return _func\n return _admin_json_dec\n\n\n\ndef hospital_dec(required=True, validator=None):\n def _hospital_dec(func):\n @wraps(func)\n def _func(*args, **kw):\n try:\n request.valid_data = None\n if validator:\n raw_get_data = request.args.to_dict()\n raw_post_data = request.form.to_dict()\n raw_req_data = json.loads(request.data) if request.data else {}\n print raw_req_data, 'raw_req_data'\n raw_data = union_dict(raw_get_data, raw_post_data, raw_req_data)\n err, valid_data = validator.validate(raw_data)\n print err, valid_data\n assert not err, err\n request.valid_data = valid_data\n print request.url\n print request.path\n print get_cookie('sign'), 'sign'\n is_valid, name = check_hospital_token(get_cookie('sign'))\n if required and not(get_cookie('sign') and is_valid):\n return redirect('/hospital/login/')\n return jsonify_response(\n {\n \"code\": -1,\n \"msg\": '请登录'\n }\n )\n request.name = name\n result = func(*args, **kw) or ''\n except ValueError as e:\n import traceback\n traceback.print_exc()\n return jsonify_response(\n {\n \"code\": 10000,\n \"msg\": getattr(e, 'msg', '服务器异常')\n }\n )\n except AssertionError as e:\n print 'assert'\n import traceback\n traceback.print_exc()\n return jsonify_response(\n {\n \"code\": 10000,\n \"msg\": getattr(e, 'message', '') or '服务器异常'\n }\n )\n except Exception as e:\n import traceback\n traceback.print_exc()\n return get_err_response('服务器异常')\n else:\n return result\n return _func\n return _hospital_dec\n\n\n\ndef promote_dec(required=True, validator=None):\n ''' 推广登录 '''\n def _promote_dec(func):\n @wraps(func)\n def _func(*args, **kw):\n try:\n request.valid_data = None\n if validator:\n raw_get_data = request.args.to_dict()\n raw_post_data = request.form.to_dict()\n raw_req_data = json.loads(request.data) if request.data else {}\n print raw_req_data, 'raw_req_data'\n raw_data = union_dict(raw_get_data, raw_post_data, raw_req_data)\n err, valid_data = validator.validate(raw_data)\n print err, valid_data\n assert not err, err\n request.valid_data = valid_data\n print request.url\n print request.path\n print get_cookie('promote_sign'), 'promote_sign'\n is_valid, name = check_promote_token(get_cookie('promote_sign'))\n if required and not(get_cookie('promote_sign') and is_valid):\n return redirect('/promote/login/')\n return jsonify_response(\n {\n \"code\": -1,\n \"msg\": '请登录'\n }\n )\n request.name = name\n result = func(*args, **kw) or ''\n except ValueError as e:\n import traceback\n traceback.print_exc()\n return jsonify_response(\n {\n \"code\": 10000,\n \"msg\": getattr(e, 'msg', '服务器异常')\n }\n )\n except AssertionError as e:\n print 'assert'\n import traceback\n traceback.print_exc()\n return jsonify_response(\n {\n \"code\": 10000,\n \"msg\": getattr(e, 'message', '') or '服务器异常'\n }\n )\n except Exception as e:\n import traceback\n traceback.print_exc()\n return get_err_response('服务器异常')\n else:\n return result\n return _func\n return _promote_dec\n\n\n\ndef dev_dec(func):\n ''' 调试debug '''\n @wraps(func)\n def _inner(*args, **kw):\n try:\n return func(*args,**kw)\n except Exception as e:\n import traceback\n traceback.print_exc()\n return _inner\n\n\n\n\n\n" }, { "alpha_fraction": 0.5956644415855408, "alphanum_fraction": 0.6456173658370972, "avg_line_length": 32.1875, "blob_id": "9ea3b40fa26296fe01185cea6cd242153854a64c", "content_id": "5b8aab447602f34b8e6f939542a701dbb07a9b0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1147, "license_type": "no_license", "max_line_length": 75, "num_lines": 32, "path": "/ops/tasks.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom settings import celery\n\nfrom thirdparty.wx_pay import send_redpack\nfrom thirdparty.wx_pay import send_draw_cash\nfrom thirdparty.sms import send_sms\n\nfrom ops.redpack import RedpackService\nfrom ops.cache import cache\n\[email protected]\ndef send_redpack_after_pay(open_id, price, name):\n ''' 用户查看问题支付后发红包给收红包用户 '''\n result = send_redpack(open_id, price, name)\n total = RedpackService.total_money()\n thousands_count = int(total) / 1000\n total_key = 'total_redpack_send_money'\n if result['result_code'] == 'FAIL':\n send_sms('18801794295', '{}失败{}'.format(total, result['err_code']))\n if thousands_count: #每超过1000发短信给所有运营\n if cache.sadd(total_key, thousands_count):\n print 'send sms '\n send_sms('18801794295', '已使用{}元'.format(total))\n send_sms('18621955395', '已使用{}元'.format(total))\n send_sms('18750552673', '已使用{}元'.format(total))\n\n\n\[email protected]\ndef send_user_draw_cash(open_id, price):\n ''' '''\n return send_draw_cash(open_id, price)" }, { "alpha_fraction": 0.6282608509063721, "alphanum_fraction": 0.6597825884819031, "avg_line_length": 26.058822631835938, "blob_id": "2fb430c230bc0df242f908a9eecd857fcdbb8635", "content_id": "85dc2ac6840b39e0719063f4af2789d3d7db5597", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 920, "license_type": "no_license", "max_line_length": 67, "num_lines": 34, "path": "/migrations/versions/3ecdf504db70_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3ecdf504db70\nRevises: 1cf4dd01482b\nCreate Date: 2015-11-11 10:13:33.887260\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3ecdf504db70'\ndown_revision = '1cf4dd01482b'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('recommend_subcat',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('sub_cat_id', sa.Integer(), nullable=True),\n sa.Column('sort_order', sa.Integer(), nullable=True),\n sa.Column('icon', sa.String(length=300), nullable=True),\n sa.ForeignKeyConstraint(['sub_cat_id'], ['item_sub_cat.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('sub_cat_id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('recommend_subcat')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.44573643803596497, "alphanum_fraction": 0.4593023359775543, "avg_line_length": 23.619047164916992, "blob_id": "04c20f25f5c1e5b67c6722f5c3b0f107dba8fc38", "content_id": "3c7c8b5bbc72f8a0096ebd2bb38ae39135152687", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 516, "license_type": "permissive", "max_line_length": 72, "num_lines": 21, "path": "/static/mobile/js/huodejihui.js", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "jQuery(document).ready(function($) {\n\t\n $('.foot2').click(function(event) {\n \t \n \t $('.footer1').hide();\n \t $('.footer2').show();\n $(this).addClass('current').siblings().removeClass('current');\n });\n $('.foot1').click(function(event) {\n \t \n \t $('.footer1').show();\n \t $('.footer2').hide();\n $(this).addClass('current').siblings().removeClass('current');\n });\n\n $('.bg1').click(function(event) {\n $('.share').hide();\n \n });\n \n});" }, { "alpha_fraction": 0.6275861859321594, "alphanum_fraction": 0.6689655184745789, "avg_line_length": 24.89285659790039, "blob_id": "5257fb277a867f36296fe8ed62dd3bf760901c75", "content_id": "850a566f1ac82689bf9c18c0e97da0a3f73cb338", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 725, "license_type": "no_license", "max_line_length": 86, "num_lines": 28, "path": "/migrations/versions/fe9b95bfea6_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: fe9b95bfea6\nRevises: 4eefa5b6eb51\nCreate Date: 2015-11-28 15:09:32.579622\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'fe9b95bfea6'\ndown_revision = '4eefa5b6eb51'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('item', sa.Column('note', sa.String(length=500), nullable=True))\n op.add_column('item', sa.Column('use_time', sa.String(length=300), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('item', 'use_time')\n op.drop_column('item', 'note')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5770810842514038, "alphanum_fraction": 0.5794594883918762, "avg_line_length": 29.80666732788086, "blob_id": "55d97033bc17691653b417edf8bc1d6a44db8a67", "content_id": "64adadf2a4b51b23735420df1e767895a9b75677", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4709, "license_type": "no_license", "max_line_length": 88, "num_lines": 150, "path": "/ops/user.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\n\nfrom util.sqlerr import SQL_DUPLICATE_NAME\nfrom util.sqlerr import SQL_DUPLICATE_PHONE\n\nfrom models import db\nfrom models import User\nfrom models import Wechat\nfrom models import CreditApply\nfrom models import UserAdvice\nfrom models import EditNameLog\nfrom ops.utils import get_items\nfrom ops.utils import get_page\nfrom ops.utils import count_items\n\n\nclass UserService(object):\n\n @staticmethod\n def create_user(name, phone, passwd):\n ''' 创建用户 '''\n try:\n user = User(name=name, phone=phone, passwd=passwd)\n db.session.add(user)\n db.session.commit()\n return user.id\n except Exception as e:\n db.session.rollback()\n if SQL_DUPLICATE_NAME.search(str(e)):\n assert False, '用户名已存在'\n elif SQL_DUPLICATE_PHONE.search(str(e)):\n assert False, '手机号码已存在'\n else:\n import traceback\n traceback.print_exc()\n raise(e)\n\n @staticmethod\n def update_user(user_id, **kw):\n count = User.query.filter(User.id==user_id).update(kw)\n db.session.commit()\n\n @staticmethod\n def get_users_by_ids(user_ids, **kw):\n return get_items(User, user_ids, **kw)\n\n @staticmethod\n def update_passwd(phone, passwd):\n count = User.query.filter(User.phone==phone).update({'passwd':passwd})\n db.session.commit()\n return count\n\n @staticmethod\n def get_user_by_phone(phone):\n user = User.query.filter(User.phone==phone).first()\n return user\n\n @staticmethod\n def advice(user_id, content, contact):\n advice = UserAdvice(user_id=user_id, content=content, contact=contact)\n db.session.add(advice)\n db.session.commit()\n return advice.id\n @staticmethod\n def get_advice_dict_by_id(advice_id):\n advice = UserAdvice.query.filter(UserAdvice.id==advice_id).first()\n if advice: return advice.as_dict()\n @staticmethod\n def get_paged_user_advices(**kw):\n return get_page(UserAdvice, {}, **kw)\n @staticmethod\n def count_advices(where=None):\n return count_items(UserAdvice, where)\n\n @staticmethod\n def get_userwechat_by_openid(open_id):\n wechat = Wechat.query.filter(Wechat.open_id==open_id).first()\n return wechat\n\n @staticmethod\n def add_wechat(open_id):\n try:\n wechat = Wechat(open_id=open_id, status=0)\n db.session.add(wechat)\n db.session.commit()\n except Exception as e:\n import traceback\n traceback.print_exc()\n db.session.rollback()\n\n @staticmethod\n def update_wechat_user(open_id, user_id):\n ''' 登录 '''\n query = Wechat.open_id==open_id\n count = Wechat.query.filter(query).update({'status':1, 'user_id':user_id})\n db.session.commit()\n return count\n\n @staticmethod\n def logout_wechat_user(open_id):\n ''' 退出登录 '''\n query = Wechat.open_id==open_id\n count = Wechat.query.filter(query).update({'status':-1})\n db.session.commit()\n return count\n\n @staticmethod\n def get_user_by_id(user_id):\n ''' '''\n user = User.query.filter(User.id==user_id).first()\n return user\n\n @staticmethod\n def get_credit_applies_by_ids(item_ids, **kw):\n where = CreditApply.user_id.in_(item_ids)\n return get_page(CreditApply, {}, limit=10000, where=where, **kw)[1]\n\n @staticmethod\n def get_paged_user(**kw):\n return get_page(User, {}, **kw)\n\n @staticmethod\n def count_user(where=None):\n return count_items(User, where=where)\n\n @staticmethod\n def update_name(user_id, name):\n ''' 修改名字 '''\n log = UserService.get_edit_name_log(user_id)\n if os.environ.get('APP_ENV')=='production':\n assert not log, '名字只能修改一次'\n try:\n count = User.query.filter(User.id==user_id).update({'name':name})\n db.session.commit()\n return count\n except Exception as e:\n db.session.rollback()\n if SQL_DUPLICATE_NAME.search(str(e)):\n assert 0, '用户名字已存在'\n\n @staticmethod\n def get_edit_name_log(user_id):\n return EditNameLog.query.filter(EditNameLog.user_id==user_id).first()\n\n @staticmethod\n def add_edit_name_log(user_id):\n log = EditNameLog(user_id=user_id)\n db.session.add(log)\n db.session.commit()\n\n\n\n\n" }, { "alpha_fraction": 0.5637382864952087, "alphanum_fraction": 0.5679515600204468, "avg_line_length": 34.45185089111328, "blob_id": "26b112ec79fd9c25dd17ce56a0d605b7c274a0e1", "content_id": "33cdbf2674b8aee0a5f2111ec5203b574d2d384c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29491, "license_type": "no_license", "max_line_length": 127, "num_lines": 810, "path": "/hospital/views.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport math\nfrom collections import defaultdict\nfrom sqlalchemy import and_\nfrom sqlalchemy import or_\n\nfrom flask import request\nfrom flask import redirect\nfrom flask import Blueprint\nfrom flask import render_template\nfrom flask import make_response\n\nfrom util.utils import jsonify_response\nfrom util.utils import template_response\nfrom util.utils import abbreviated_pages\nfrom util.sign import gen_hospital_token\nfrom util.sign import del_cookie\nfrom util.sign import set_cookie\nfrom util.decorators import hospital_dec\nfrom util.validators import Optional\nfrom util.validators import Inputs\nfrom util.validators import MobileField\nfrom util.validators import TextField\nfrom util.validators import IdField\nfrom util.validators import IntChoiceField\nfrom util.validators import MobileField\nfrom models import db\nfrom models import Order\nfrom models import User\nfrom models import ItemCat\nfrom models import Item\nfrom models import ItemSubCat\nfrom models import ServiceCode\nfrom ops.hospital import HospitalService\nfrom ops.cache import HospitalInvalidUserPasswdCache\nfrom ops.data import DataService\nfrom ops.item import ItemService\nfrom ops.comment import CommentService\nfrom ops.user import UserService\nfrom ops.order import OrderService\nfrom ops.room_design import RoomDesignService\nfrom ops.promote import PromoteService\nfrom ops.bulks import fetch_credit_refs\nfrom ops.bulks import fetch_user_refs\nfrom ops.bulks import fetch_item_refs\nfrom ops.bulks import fetch_servicecode_refrence\nfrom ops.bulks import fetch_apply_refs\nfrom constants import ResponseCode\nfrom constants import ORDER_STATUS\nfrom constants import SERVICE_STATUS\nfrom settings import CONTACT\n\n\n\n\ndef set_order_status(order, comment=None, servicecode=None):\n ''' 根据服务码状态 是否已评论重新订单状态'''\n if not order['credit_verified']:\n order['status'] = ORDER_STATUS.VERIFYING\n elif order['status']==ORDER_STATUS.PAY_SUCCESS:\n if servicecode['status'] == 1:\n order['status'] = ORDER_STATUS.BOOKED\n elif servicecode['status'] == 2:\n order['status'] = ORDER_STATUS.CONFIRMED\n elif order['status'] == ORDER_STATUS.FINISH and not comment:\n order['status'] = ORDER_STATUS.TO_COMMENT\n\n\n\nindex_validator = Inputs(\n {\n 'offset' : Optional(TextField(min_length=0, max_length=100, msg='分页')),\n 'cat' : Optional(IntChoiceField(choices=[1,2,3,4], msg='订单类型')),\n 'phone' : Optional(MobileField(msg='客户手机号')),\n }\n )\n@hospital_dec(required=True, validator=index_validator)\ndef index():\n ''' 订单列表 '''\n offset = request.valid_data.get('offset')\n cat = request.valid_data.get('cat') or 1\n phone = request.valid_data.get('phone')\n if not phone: assert cat, '订单类型'\n if not cat: assert phone, '客户手机号'\n\n user = HospitalService.get_user_by_name(request.name)\n hospital_id = user.hospital_id\n\n conditions = []\n query_one = and_(\n Order.hospital_id==hospital_id,\n Order.credit_verified==1,\n Order.status.in_([ORDER_STATUS.PAY_SUCCESS, ORDER_STATUS.FINISH, ORDER_STATUS.CANCELED])\n )\n sub_query = db.session.query(ServiceCode.order_id).filter(ServiceCode.status==SERVICE_STATUS.STANDBY).subquery()\n query_two = and_(\n Order.hospital_id==hospital_id,\n Order.credit_verified==1,\n Order.status==ORDER_STATUS.PAY_SUCCESS,\n Order.id.in_(sub_query)\n )\n sub_query = db.session.query(ServiceCode.order_id).filter(ServiceCode.status==SERVICE_STATUS.BOOKED).subquery()\n conditions = [\n Order.hospital_id==hospital_id,\n Order.status==ORDER_STATUS.PAY_SUCCESS,\n Order.credit_verified==1,\n Order.id.in_(sub_query)\n ]\n query_three = and_(*conditions)\n sub_query = db.session.query(ServiceCode.order_id).filter(ServiceCode.status==SERVICE_STATUS.VERIFYED).subquery()\n conditions[:] = [\n Order.hospital_id==hospital_id,\n Order.status==ORDER_STATUS.PAY_SUCCESS,\n Order.credit_verified==1,\n Order.id.in_(sub_query)\n ]\n query_four = and_(*conditions)\n\n counters = {}\n\n condition_query = {\n 1: query_one,\n 2: query_two,\n 3: query_three,\n 4: query_four\n }\n for i in [1,2,3,4]:\n counters[i] = OrderService.count_order(condition_query[i])\n where = condition_query[cat]\n if phone:\n subquery = db.session.query(User.id).filter(User.phone==phone).subquery()\n where = and_(\n Order.hospital_id==hospital_id,\n Order.user_id.in_(subquery),\n Order.credit_verified==1,\n Order.status.in_([\n ORDER_STATUS.FINISH, ORDER_STATUS.PAY_SUCCESS\n , ORDER_STATUS.CANCELED\n ]\n )\n )\n has_more, orders = OrderService.get_paged_orders(where=where, offset=offset)\n\n order_ids = [i['id'] for i in orders]\n\n order_id_service_code_map = OrderService.get_service_codes_by_order_ids(order_ids)\n for order in orders:\n if order['status']==ORDER_STATUS.PAY_SUCCESS:\n order['label'] = '待预约'\n elif order['status']==ORDER_STATUS.BOOKED and not order_id_service_code_map.get(order['id']):\n order['label'] = '待验证'\n elif order['status']==ORDER_STATUS.BOOKED and order_id_service_code_map.get(order['id']):\n order['label'] = '待完成'\n else:\n order['label'] = '全部'\n\n offset = str(orders[-1]['id']) if orders else ''\n\n data = defaultdict(list)\n data[cat] = orders\n\n fields = ['name','id_no', 'user_id']\n fetch_credit_refs(orders, dest_key='credit', fields=fields, keep_id=True)\n fetch_user_refs(orders, keep_id=True)\n fields = ['id', 'title', 'price', 'orig_price', 'image', 'hospital_id']\n fetch_item_refs(orders, fields=fields, keep_id=True)\n fetch_servicecode_refrence(orders, 'id', dest_key='service_code_dict', keep_id=True)\n\n fetch_apply_refs(orders, dest_key='trial_apply', keep_id=True)\n\n order_item_map = {order['id']:order['item_id'] for order in orders}\n comments = CommentService.get_comments_by_item_ids(order_item_map.values())\n item_comment_map = {i['item_id']:i['id'] for i in comments}\n for order in orders:\n set_order_status(order, comment=item_comment_map.get(order['item_id']), servicecode=order['service_code_dict'])\n \n top_nav = {\n cat: 'mui-active'\n }\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'offset' : offset,\n 'counters' : counters,\n 'phone' : phone,\n 'data' : data,\n 'msg' : '',\n 'has_more' : has_more,\n 'infos' : orders,\n 'top_nav' : top_nav,\n }\n\n template_name = 'hospital/index.html'\n if phone:\n template_name = 'hospital/index.html'\n if request.args.get('json'):\n return jsonify_response(result)\n return render_template(template_name, **result)\n\n\npaged_orders_validator = Inputs(\n {\n 'offset' : Optional(TextField(min_length=0, max_length=100, msg='分页')),\n 'cat' : Optional(IntChoiceField(choices=[1,2,3,4], msg='订单类型')),\n 'phone' : Optional(MobileField(msg='客户手机号')),\n }\n )\n@hospital_dec(required=True, validator=paged_orders_validator)\ndef get_paged_orders():\n ''' 分页订单列表 '''\n offset = request.valid_data.get('offset')\n cat = request.valid_data.get('cat') or 1\n phone = request.valid_data.get('phone')\n if not phone: assert cat, '订单类型'\n if not cat: assert phone, '客户手机号'\n\n user = HospitalService.get_user_by_name(request.name)\n hospital_id = user.hospital_id\n\n conditions = []\n query_one = and_(\n Order.hospital_id==hospital_id,\n Order.credit_verified==1,\n Order.status.in_([ORDER_STATUS.PAY_SUCCESS, ORDER_STATUS.FINISH, ORDER_STATUS.CANCELED])\n )\n sub_query = db.session.query(ServiceCode.order_id).filter(ServiceCode.status==SERVICE_STATUS.STANDBY).subquery()\n query_two = and_(\n Order.hospital_id==hospital_id,\n Order.credit_verified==1,\n Order.status==ORDER_STATUS.PAY_SUCCESS,\n Order.id.in_(sub_query)\n )\n sub_query = db.session.query(ServiceCode.order_id).filter(ServiceCode.status==SERVICE_STATUS.BOOKED).subquery()\n conditions = [\n Order.hospital_id==hospital_id,\n Order.status==ORDER_STATUS.PAY_SUCCESS,\n Order.credit_verified==1,\n Order.id.in_(sub_query)\n ]\n query_three = and_(*conditions)\n sub_query = db.session.query(ServiceCode.order_id).filter(ServiceCode.status==SERVICE_STATUS.VERIFYED).subquery()\n conditions[:] = [\n Order.hospital_id==hospital_id,\n Order.status==ORDER_STATUS.PAY_SUCCESS,\n Order.credit_verified==1,\n Order.id.in_(sub_query)\n ]\n query_four = and_(*conditions)\n\n counters = {}\n\n condition_query = {\n 1: query_one,\n 2: query_two,\n 3: query_three,\n 4: query_four\n }\n for i in [1,2,3,4]:\n counters[i] = OrderService.count_order(condition_query[i])\n where = condition_query[cat]\n if phone:\n subquery = db.session.query(User.id).filter(User.phone==phone).subquery()\n where = and_(\n Order.hospital_id==hospital_id,\n Order.user_id.in_(subquery),\n Order.credit_verified==1,\n Order.status.in_([\n ORDER_STATUS.FINISH, ORDER_STATUS.PAY_SUCCESS\n , ORDER_STATUS.CANCELED\n ]\n )\n )\n has_more, orders = OrderService.get_paged_orders(where=where, offset=offset)\n\n order_ids = [i['id'] for i in orders]\n\n order_id_service_code_map = OrderService.get_service_codes_by_order_ids(order_ids)\n for order in orders:\n if order['status']==ORDER_STATUS.PAY_SUCCESS:\n order['label'] = '待预约'\n elif order['status']==ORDER_STATUS.BOOKED and not order_id_service_code_map.get(order['id']):\n order['label'] = '待验证'\n elif order['status']==ORDER_STATUS.BOOKED and order_id_service_code_map.get(order['id']):\n order['label'] = '待完成'\n else:\n order['label'] = '全部'\n\n offset = str(orders[-1]['id']) if orders else ''\n\n data = defaultdict(list)\n data[cat] = orders\n\n fields = ['name','id_no', 'user_id']\n fetch_credit_refs(orders, dest_key='credit', fields=fields, keep_id=True)\n fetch_user_refs(orders)\n fields = ['id', 'title', 'price', 'orig_price', 'image', 'hospital_id']\n fetch_item_refs(orders, fields=fields, keep_id=True)\n fetch_servicecode_refrence(orders, 'id', dest_key='service_code_dict', keep_id=True)\n\n order_item_map = {order['id']:order['item_id'] for order in orders}\n comments = CommentService.get_comments_by_item_ids(order_item_map.values())\n item_comment_map = {i['item_id']:i['id'] for i in comments}\n for order in orders:\n set_order_status(order, comment=item_comment_map.get(order['item_id']), servicecode=order['service_code_dict'])\n \n template = ''\n for order in orders:\n template += render_template('hospital/entry.html', info=order)\n\n result = {\n 'code': ResponseCode.SUCCESS,\n 'msg' : '',\n 'has_more': has_more,\n 'infos': template,\n 'offset': offset\n }\n return jsonify_response(result)\n\n\ndef login():\n ''' 医院端登录 '''\n return render_template('hospital/hospital_login.html')\n\n\nlogin_post_validator = Inputs(\n {\n 'name' : TextField(min_length=1, max_length=100, msg='请输入账号'),\n 'passwd' : TextField(min_length=1, max_length=100, msg='请输入密码')\n }\n )\n@hospital_dec(required=False, validator=login_post_validator)\ndef login_post():\n name = request.valid_data.get('name')\n passwd = request.valid_data.get('passwd')\n count = HospitalInvalidUserPasswdCache.incr(name)\n assert count<10, '今日密码错误次数超限,如需帮助,请联系美分分客服{}'.format(CONTACT)\n if HospitalService.check_user(name, passwd):\n response = jsonify_response({'code':ResponseCode.SUCCESS}, with_response=True)\n token = gen_hospital_token(name)\n set_cookie(response, 'hospital_name', name, 86400*30)\n set_cookie(response, 'sign', token, 86400*30)\n HospitalInvalidUserPasswdCache.incr(name, -1)\n return response\n assert 0, '用户名或密码错误'\n\n\ndef logout():\n try:\n response = redirect('/hospital/')\n del_cookie(response, 'sign')\n return response\n except:\n import traceback\n traceback.print_exc()\n return 'server error'\n\n\n\nbook_surgery_validator = Inputs(\n {\n 'order_id' : IdField(msg='订单id'),\n 'book_time' : TextField(min_length=1, max_length=100, msg='预约时间')\n }\n )\n@hospital_dec(required=True, validator=book_surgery_validator)\ndef book_surgery():\n ''' 接受用户预约 '''\n order_id = request.valid_data.get('order_id')\n book_time = request.valid_data.get('book_time')\n\n count = OrderService.book_surgery(order_id, book_time)\n\n msg = '预约成功' if count else '预约失败'\n result = {\n 'msg' : msg,\n 'code' : ResponseCode.SUCCESS\n }\n return jsonify_response(result)\n\n\nconfirm_surgery_validator = Inputs(\n {\n 'order_id' : IdField(msg='订单id'),\n 'service_code' : TextField(min_length=1, max_length=100, msg='服务码'),\n }\n )\n@hospital_dec(required=True, validator=confirm_surgery_validator)\ndef confirm_surgery():\n ''' 验证服务码 确认手术 '''\n service_code = request.valid_data.get('service_code')\n order_id = request.valid_data.get('order_id')\n count = OrderService.verify_servicecode(order_id, service_code)\n assert count, '确认手术失败'\n msg = '确认手术成功' if count else '确认手术失败'\n result = {\n 'msg' : msg,\n 'code' : ResponseCode.SUCCESS\n }\n return jsonify_response(result)\n\n\ncancel_book_validator = Inputs(\n {\n 'order_id' : IdField(msg='订单id'),\n }\n )\n@hospital_dec(required=True, validator=cancel_book_validator)\ndef cancel_book():\n ''' 取消预约 '''\n order_id = request.valid_data.get('order_id')\n count = OrderService.cancel_book(order_id)\n msg = '取消预约成功' if count else '没有找到预约记录或已取消预约'\n\n result = {\n 'msg' : msg,\n 'code' : ResponseCode.SUCCESS\n }\n return jsonify_response(result)\n\n\n\ncancel_surgery_validator = Inputs(\n {\n 'order_id' : IdField(msg='订单id'),\n }\n )\n@hospital_dec(required=True, validator=cancel_surgery_validator)\ndef cancel_surgery():\n ''' 取消手术 '''\n order_id = request.valid_data.get('order_id')\n count = OrderService.cancel_surgery(order_id)\n msg = '取消手术成功' if count else '订单不存在或已取消'\n\n result = {\n 'msg' : msg,\n 'code' : ResponseCode.SUCCESS\n }\n return jsonify_response(result)\n\n\nfinish_order_validator = Inputs(\n {\n 'order_id' : IdField(msg='订单id'),\n }\n )\n@hospital_dec(required=True, validator=finish_order_validator)\ndef finish_order():\n order_id = request.valid_data.get('order_id')\n order = OrderService.get_order_by_id(order_id)\n where = Order.status==ORDER_STATUS.PAY_SUCCESS\n count = OrderService.update_order_status(order_id, ORDER_STATUS.FINISH, where=where)\n if count and order.credit_choice_id:\n RoomDesignService.add_user_vote_privilege(order.user_id, 2)\n #PromoteService.add_rd_draw_count(order.user_id, 3)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '手术已完成'\n }\n return jsonify_response(result)\n\n\n@hospital_dec(required=True)\ndef get_hospital_cats():\n ''' 获取医院分类 '''\n user = HospitalService.get_user_by_name(request.name)\n hospital_id = user.hospital_id\n\n sub_cat_ids_count = HospitalService.get_hospital_sub_cat_ids_and_count(hospital_id)\n\n sub_cat_ids = sub_cat_ids_count.keys()\n\n sub_cat_count_map = {i:len(v) for i,v in sub_cat_ids_count.items()}\n\n sub_cat_list = ItemService.get_subcats_by_ids(sub_cat_ids)\n\n cat_ids = []\n for i in sub_cat_list:\n cat_ids.extend((i['cat_id_list']))\n\n for i in sub_cat_list:\n i['count'] = sub_cat_count_map.get(i['id'], 0)\n\n print sub_cat_ids, 'sub_cat_ids', sub_cat_count_map\n cat_list = ItemService.get_cats_by_ids(cat_ids)\n\n for cat in cat_list:\n sub_cats = [i for i in sub_cat_list if cat['id'] in i['cat_id_list']]\n cat['sub_cats'] = sub_cats\n cat['count'] = sum(i['count'] for i in sub_cats)\n\n total = sum(i['count'] for i in cat_list)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'cat_list' : cat_list,\n 'total' : total\n }\n return jsonify_response(result)\n\n\n\norder_list_validator = Inputs(\n {\n 'offset' : Optional(TextField(min_length=0, max_length=100, msg='分页')),\n 'cat' : Optional(IntChoiceField(choices=[1,2,3,4], msg='订单类型')),\n 'phone' : Optional(MobileField(msg='客户手机号')),\n }\n )\n@hospital_dec(required=True, validator=order_list_validator)\ndef get_orders():\n ''' 订单列表 '''\n offset = request.valid_data.get('offset')\n cat = request.valid_data.get('cat')\n phone = request.valid_data.get('phone')\n if not phone: assert cat, '订单类型'\n if not cat: assert phone, '客户手机号'\n\n user = HospitalService.get_user_by_name(request.name)\n hospital_id = user.hospital_id\n\n conditions = []\n if cat==2: #待预约\n conditions.append(\n Order.status==ORDER_STATUS.PAY_SUCCESS,\n )\n elif cat==3: #待验证\n sub_query = db.session.query(ServiceCode.order_id).filter(ServiceCode.status==SERVICE_STATUS.STANDBY).subquery()\n conditions[:] = [\n Order.status==ORDER_STATUS.BOOKED,\n Order.id.in_(sub_query)\n ]\n elif cat==4: #待完成\n sub_query = db.session.query(ServiceCode.order_id).filter(ServiceCode.status==SERVICE_STATUS.VERIFYED).subquery()\n conditions[:] = [\n Order.status==ORDER_STATUS.BOOKED,\n Order.id.in_(sub_query)\n ]\n if phone: #按客户手机号搜索订单\n sub_query = db.session.query(User.id).filter(User.phone==phone).subquery()\n conditions[:] = [ Order.user_id.in_(sub_query) ]\n\n conditions.append(Order.hospital_id==hospital_id)\n where = and_(*conditions)\n has_more, orders = OrderService.get_paged_orders(where=where, offset=offset)\n offset = str(orders[-1]['id']) if orders else ''\n\n fetch_credit_refs(orders, dest_key='credit', keep_id=True)\n fetch_user_refs(orders)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'offset' : offset,\n 'msg' : '',\n 'has_more' : has_more,\n 'infos' : orders,\n }\n return jsonify_response(result)\n\n\ndef change_passwd():\n return render_template('hospital/change_passwd.html')\n\n\nchange_passwd_validator = Inputs(\n {\n 'passwd' : TextField(min_length=0, max_length=100, msg='原密码'),\n 'new_passwd' : TextField(min_length=0, max_length=100, msg='新密码')\n }\n )\n@hospital_dec(validator=change_passwd_validator)\ndef change_passwd_post():\n ''' 修改密码 '''\n passwd = request.valid_data.get('passwd')\n new_passwd = request.valid_data.get('new_passwd')\n user = HospitalService.get_user_by_name(request.name)\n assert user and user.passwd==passwd, '原密码不正确'\n count = HospitalService.change_passwd(request.name, new_passwd)\n\n msg = '修改成功'\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : msg\n }\n return jsonify_response(result)\n\n\n\n\n\n\nsearch_order_validator = Inputs(\n {\n 'offset' : Optional(TextField(min_length=0, max_length=100, msg='分页')),\n 'phone' : Optional(MobileField(msg='客户手机号')),\n }\n )\n@hospital_dec(required=True, validator=search_order_validator)\ndef search_order_list():\n ''' 订单列表 '''\n offset = request.valid_data.get('offset')\n phone = request.valid_data.get('phone')\n if not phone: assert phone, '客户手机号'\n\n user = HospitalService.get_user_by_name(request.name)\n hospital_id = user.hospital_id\n\n conditions = []\n if phone: #按客户手机号搜索订单\n sub_query = db.session.query(User.id).filter(User.phone==phone).subquery()\n conditions.append(Order.user_id.in_(sub_query))\n conditions.append(Order.hospital_id==hospital_id)\n conditions.append(Order.credit_verified==1)\n where = and_(*conditions)\n has_more, orders = OrderService.get_paged_orders(where=where, offset=offset)\n\n order_ids = [i['id'] for i in orders]\n\n order_id_service_code_map = OrderService.get_service_codes_by_order_ids(order_ids)\n for order in orders:\n if order['status']==ORDER_STATUS.PAY_SUCCESS:\n order['label'] = '待预约'\n elif order['status']==ORDER_STATUS.BOOKED and not order_id_service_code_map.get(order['id']):\n order['label'] = '待验证'\n elif order['status']==ORDER_STATUS.BOOKED and order_id_service_code_map.get(order['id']):\n order['label'] = '待完成'\n else:\n order['label'] = '全部'\n\n offset = str(orders[-1]['id']) if orders else ''\n\n data = defaultdict(list)\n fetch_credit_refs(orders, dest_key='credit', keep_id=True)\n fetch_user_refs(orders)\n fetch_item_refs(orders)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'offset' : offset,\n 'phone' : phone,\n 'data' : data,\n 'msg' : '',\n 'has_more' : has_more,\n 'infos' : orders,\n }\n\n template_name = 'hospital/order_search.html'\n #return jsonify_response(result)\n return render_template(template_name, **result)\n\n@hospital_dec()\ndef home():\n user = HospitalService.get_user_by_name(request.name)\n hospital = ItemService.get_hospital_dict_by_id(user.hospital_id)\n return render_template('hospital/home.html',\n user=user,\n hospital=hospital\n )\n\n@hospital_dec()\ndef cat():\n ''' 获取医院分类 '''\n user = HospitalService.get_user_by_name(request.name)\n hospital_id = user.hospital_id\n sub_cat_ids_count = HospitalService.get_hospital_sub_cat_ids_and_count(hospital_id)\n\n sub_cat_ids = sub_cat_ids_count.keys()\n\n sub_cat_count_map = {i:len(v) for i,v in sub_cat_ids_count.items()}\n\n sub_cat_list = ItemService.get_subcats_by_ids(sub_cat_ids)\n cat_ids = []\n for i in sub_cat_list:\n cat_ids.extend((i['cat_id_list']))\n\n for i in sub_cat_list:\n i['count'] = sub_cat_count_map.get(i['id'], 0)\n\n print sub_cat_ids, 'sub_cat_ids', sub_cat_count_map\n cat_list = ItemService.get_cats_by_ids(cat_ids)\n\n for cat in cat_list:\n sub_cats = [i for i in sub_cat_list if cat['id'] in i['cat_id_list']]\n cat['sub_cats'] = sub_cats\n cat_sub_cat_item_ids = set()\n for i in sub_cats:\n cat_sub_cat_item_ids = cat_sub_cat_item_ids.union(sub_cat_ids_count[i['id']])\n cat['count'] = len(cat_sub_cat_item_ids)\n\n total = sum(i['count'] for i in cat_list)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'cat_list' : cat_list,\n 'total' : total\n }\n return render_template('hospital/cat.html',\n **result\n )\n\n\ndef reset_passwd():\n ''' 重置密码 '''\n return render_template('hospital/reset_passwd.html')\n\n\nreset_passwd_post_validator = Inputs(\n {\n 'passwd' : TextField(min_length=0, max_length=100, msg='密码'),\n 'new_passwd' : TextField(min_length=0, max_length=100, msg='新密码'),\n }\n )\n@hospital_dec(validator=reset_passwd_post_validator)\ndef reset_passwd_post():\n ''' 重置密码 '''\n passwd = request.valid_data.get('passwd')\n new_passwd = request.valid_data.get('new_passwd')\n\n print passwd\n print new_passwd\n user = HospitalService.get_user_by_name(request.name)\n\n assert user and user.passwd == passwd, '原密码错误'\n HospitalService.change_passwd(request.name, new_passwd)\n\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '修改成功'\n }\n return jsonify_response(result)\n\n\n\n@hospital_dec()\ndef cat_items():\n ''' 分类商品列表 '''\n cat_id = request.args.get('cat_id')\n sub_cat_id = request.args.get('sub_cat_id')\n keyword = request.args.get('keyword') or ''\n\n user = HospitalService.get_user_by_name(request.name)\n conditions = []\n where = None\n cat = None\n subcat = None\n if cat_id:\n cat = ItemService.get_cat_dict_by_id(cat_id)\n if sub_cat_id:\n subcat = ItemService.get_subcat_dict_by_id(sub_cat_id)\n cat = ItemService.get_cat_dict_by_id(subcat['cat_id'])\n if cat_id:\n query = or_(\n ItemSubCat.cat_ids==cat_id,\n ItemSubCat.cat_ids.like('%,{}'.format(cat_id)),\n ItemSubCat.cat_ids.like('%,{},%'.format(cat_id)),\n ItemSubCat.cat_ids.like('{},%'.format(cat_id))\n )\n result = db.session.query(ItemSubCat).filter(query).all()\n print result\n sub_cat_ids = [i.id for i in result]\n where = or_()\n for the_sub_cat_id in sub_cat_ids:\n the_query = or_(\n Item.sub_cat_ids==the_sub_cat_id,\n Item.sub_cat_ids.like('%,{}'.format(the_sub_cat_id)),\n Item.sub_cat_ids.like('%,{},%'.format(the_sub_cat_id)),\n Item.sub_cat_ids.like('{},%'.format(the_sub_cat_id))\n )\n where.append(the_query)\n elif sub_cat_id:\n query = or_(\n Item.sub_cat_ids==sub_cat_id,\n Item.sub_cat_ids.like('%,{}'.format(sub_cat_id)),\n Item.sub_cat_ids.like('%,{},%'.format(sub_cat_id)),\n Item.sub_cat_ids.like('{},%'.format(sub_cat_id))\n )\n where = query\n if keyword:\n where = or_(\n Item.id==keyword,\n and_(\n Item.title.like('%{}%'.format(keyword)),\n Item.hospital_id==user.hospital_id\n )\n )\n if where is not None:\n where = and_(\n where,\n Item.hospital_id==user.hospital_id\n )\n else:\n where = Item.hospital_id==user.hospital_id\n has_more, items = ItemService.get_paged_items(where=where)\n\n item_ids = [i['id'] for i in items]\n activity_items = ItemService.get_activity_items_by_item_ids(item_ids)\n item_price_map = {i.item_id:i.price for i in activity_items}\n for item in items:#活动价\n if item_price_map.get(item['id']):\n item['price'] = item_price_map.get(item['id'])\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'keyword' : keyword,\n 'infos' : items,\n 'has_more' : has_more,\n 'cat' : cat,\n 'subcat' : subcat,\n 'msg' : ''\n }\n #return jsonify_response(result)\n return render_template('hospital/cat_items.html', **result)\n\n\n\n" }, { "alpha_fraction": 0.6549618244171143, "alphanum_fraction": 0.6870229244232178, "avg_line_length": 24.19230842590332, "blob_id": "540d5fef88b9de0e4092467d74ade58c869e9f59", "content_id": "d78e83bc337e5dc2e2ebae7a02a82f704196fa21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 655, "license_type": "no_license", "max_line_length": 97, "num_lines": 26, "path": "/migrations/versions/4deed7860ffe_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4deed7860ffe\nRevises: 17354ab277c6\nCreate Date: 2015-12-19 11:40:41.995891\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4deed7860ffe'\ndown_revision = '17354ab277c6'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('admin_user', sa.Column('cat', mysql.TINYINT(display_width=1), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('admin_user', 'cat')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6215780973434448, "alphanum_fraction": 0.6827697157859802, "avg_line_length": 22.884614944458008, "blob_id": "cd811c94282aeed4c1a682283abbacf5541070cb", "content_id": "a22e979d7f81689ee32868fd69caace8f9323c36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 621, "license_type": "no_license", "max_line_length": 87, "num_lines": 26, "path": "/migrations/versions/37321f4a22b3_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 37321f4a22b3\nRevises: 4deed7860ffe\nCreate Date: 2015-12-20 09:45:00.700298\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '37321f4a22b3'\ndown_revision = '4deed7860ffe'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('credit_apply', sa.Column('has_supply', sa.Boolean(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('credit_apply', 'has_supply')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6208791136741638, "alphanum_fraction": 0.6857143044471741, "avg_line_length": 29.33333396911621, "blob_id": "ee7da174624e7dd3f5152e133f7f7b25b0d9f5e7", "content_id": "c4eddc2545bcb1e347d115d6b3a34c7bfbdb2d02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 910, "license_type": "no_license", "max_line_length": 96, "num_lines": 30, "path": "/migrations/versions/4161c03ed1e6_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4161c03ed1e6\nRevises: 2eb48ce629a0\nCreate Date: 2015-12-17 19:35:22.464397\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4161c03ed1e6'\ndown_revision = '2eb48ce629a0'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('qr_code_user', sa.Column('city', sa.String(length=100), nullable=True))\n op.add_column('qr_code_user', sa.Column('headimgurl', sa.String(length=300), nullable=True))\n op.add_column('qr_code_user', sa.Column('nickname', sa.String(length=100), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('qr_code_user', 'nickname')\n op.drop_column('qr_code_user', 'headimgurl')\n op.drop_column('qr_code_user', 'city')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6157249212265015, "alphanum_fraction": 0.6210340261459351, "avg_line_length": 27.630434036254883, "blob_id": "86e04c51492aa75878e0b3db95943837d2a07b9c", "content_id": "3e00150cbeb99a175ac502d2f2e25849a6991a9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8095, "license_type": "no_license", "max_line_length": 85, "num_lines": 276, "path": "/ops/cache.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# -*- coding: utf-8 -*-\nimport json\nimport math\nimport random\nimport time\nfrom datetime import datetime\n\nimport redis\n\nfrom util.utils import get_today_timestamp\nfrom settings import REDIS_HOST\nfrom settings import REDIS_PORT\nfrom settings import CACHE_DB_NO\n\n\ncache = redis.Redis(REDIS_HOST, REDIS_PORT, CACHE_DB_NO)\n\nsms_cache = redis.Redis(REDIS_HOST, REDIS_PORT, 1)\n\ncommon_cache = redis.Redis(REDIS_HOST, REDIS_PORT, 10)\n\n\ncurrent_time = lambda:int(time.time())\ndef today_remain_seconds():\n now = datetime.now()\n year, month, day = now.year, now.month, now.day\n now_second = time.mktime(now.timetuple())\n cut_now = datetime(year, month, day)\n cut_now_second = time.mktime(cut_now.timetuple())\n return 86400 - int(now_second-cut_now_second)\n\n\nclass SmsCache:\n sms_key = 'sms_vcode:{0}'\n sms_total_key = 'sms_vcode_count:{0}'\n\n @classmethod\n def set_vcode(cls, phone, vcode, expire=600):\n key = cls.sms_key.format(phone)\n return sms_cache.set(key, vcode, expire)\n @classmethod\n def get_vcode(cls, phone):\n key = cls.sms_key.format(phone)\n return sms_cache.get(key)\n @classmethod\n def expire_vcode(cls, phone):\n key = cls.sms_key.format(phone)\n return sms_cache.expire(key, 0)\n \n @classmethod\n def get_sent_count(cls, phone):\n key = cls.sms_total_key.format(phone)\n return int(sms_cache.get(key) or 0)\n\n @classmethod\n def incr_sent_count(cls, phone, ex=None):\n key = cls.sms_total_key.format(phone)\n count = sms_cache.incr(key)\n if not ex: ex = today_remain_seconds()\n sms_cache.expire(key, ex)\n return count\n\n @classmethod\n def decr_sent_count(cls, phone, ex=None):\n key = cls.sms_total_key.format(phone)\n count = sms_cache.decr(key)\n if not ex: ex = today_remain_seconds()\n sms_cache.expire(key, ex)\n return count\n\n @classmethod\n def clear_sent_count(cls, phone):\n key = cls.sms_total_key.format(phone)\n return sms_cache.expire(key, 1)\n\n\n\nclass WechatTokenCache(object):\n cache_key = 'wechat_access_token_cache_key'\n cache = common_cache\n\n @classmethod\n def set(cls, access_token, ex=None):\n return cls.cache.set(cls.cache_key, access_token, ex=ex)\n\n @classmethod\n def get(cls):\n return cls.cache.get(cls.cache_key)\n\n\nclass ChsiCache(object):\n ''' 学信网session缓存 '''\n cache_key = 'chsi_session_user_id_cache'\n cache = cache\n\n @classmethod\n def make_key(cls, user_id):\n return cls.cache_key+str(user_id)\n\n @classmethod\n def set(cls, user_id, session_pickle):\n cache_key = cls.make_key(user_id)\n return cls.cache.set(cache_key, session_pickle)\n\n @classmethod\n def get(cls, user_id):\n cache_key = cls.make_key(user_id)\n return cls.cache.get(cache_key)\n\n\nclass TodayInvalidCounter(object):\n @classmethod\n def make_key(cls, phone, timestamp):\n return cls.cache_key.format(phone) + '_' + str(timestamp)\n\n @classmethod\n def incr(cls, phone, amount=1, timestamp=None):\n if not timestamp: timestamp = get_today_timestamp()\n key = cls.make_key(phone, timestamp=timestamp)\n count = cls.cache.incr(key, amount)\n return int(count)\n\n @classmethod\n def clear_today_counter(cls, phone):\n ''' '''\n key = cls.make_key(phone, get_today_timestamp())\n return cls.cache.delete(key)\n\n\nclass InvalidUserPasswdCache(TodayInvalidCounter):\n ''' 每日用户名错误次数计数 '''\n cache = cache\n cache_key = 'invalid_user_passwd_counter_{}'\n\n\nclass InvalidUserSignupVcodeCache(TodayInvalidCounter):\n ''' 每日注册验证码错误次数计数 '''\n cache = cache\n cache_key = 'invalid_user_signup_vcode_counter_{}'\n\n\nclass InvalidUserResetVcodeCache(TodayInvalidCounter):\n ''' 每日重置密码验证码错误次数计数 '''\n cache = cache\n cache_key = 'invalid_user_reset_vcode_counter_{}'\n\n\nclass AdminInvalidUserPasswdCache(TodayInvalidCounter):\n ''' 管理员每日用户名错误次数计数 '''\n cache = cache\n cache_key = 'invalid_admin_user_passwd_counter_{}'\n\n\nclass HospitalInvalidUserPasswdCache(TodayInvalidCounter):\n ''' 医院端管理员每日用户名错误次数计数 '''\n cache = cache\n cache_key = 'invalid_hospital_admin_user_passwd_counter_{}'\n\n\n\nAPI_VERSION = '1.0'\n\nclass AppVersion(object):\n ''' 设置app版本 '''\n cache = cache\n force_key = 'app_version_force_update_{}'.format(API_VERSION)\n download_key = 'app_version_android_download_link_{}'.format(API_VERSION)\n title_key = 'app_version_title_{}'.format(API_VERSION)\n content_key = 'app_version_content_{}'.format(API_VERSION)\n @classmethod\n def make_key(cls, client_type):\n return 'app_version_{}_{}'.format(API_VERSION, client_type)\n\n @classmethod\n def set_version(cls, client_type, version_no):\n key = cls.make_key(client_type)\n return cls.cache.set(key, version_no)\n\n @classmethod\n def get_version(cls, client_type):\n key = cls.make_key(client_type)\n version = cls.cache.get(key) or 0\n return float(version)\n\n @classmethod\n def set_force_update(cls, is_force):\n val = '1' if is_force else ''\n return cls.cache.set(cls.force_key, val)\n @classmethod\n def get_force_update(cls):\n return bool(cls.cache.get(cls.force_key))\n\n @classmethod\n def set_download_link(cls, link):\n return cls.cache.set(cls.download_key, link)\n @classmethod\n def get_download_link(cls):\n return cls.cache.get(cls.download_key) or ''\n\n @classmethod\n def set_title(cls, title):\n return cls.cache.set(cls.title_key, title)\n @classmethod\n def get_title(cls):\n return cls.cache.get(cls.title_key)\n\n @classmethod\n def set_content(cls, content):\n return cls.cache.set(cls.content_key, content)\n @classmethod\n def get_content(cls):\n return cls.cache.get(cls.content_key) \n\n\n\nclass RoomDesignVoteCounter(object):\n cache = cache\n cache_key = 'roomdesign_vote_counter'\n apply_no_key= 'roomdesign_apply_no_key'\n score_key = 'roomdesign_score_key'\n\n @classmethod\n def incr(cls, room_id, amount=1):\n ''' '''\n return int(math.floor(cls.cache.zincrby(cls.cache_key, room_id, amount)))\n\n @classmethod\n def rank(cls, room_id):\n score = cls.cache.zscore(cls.cache_key, room_id)\n score = int(score or 0)\n print score, 'score'\n return cls.cache.zrevrank(cls.score_key, score) + 1\n\n @classmethod\n def init(cls, room_id):\n differ = 1-0.9999999**room_id\n return int(cls.cache.zincrby(cls.cache_key, room_id, differ))\n\n @classmethod\n def get_vote_by_rank(cls, rank):\n ''' 由排名获得票数 '''\n result = cls.cache.zrevrange(cls.score_key, rank-1, rank-1, withscores=True)\n if result: return int(math.floor(result[0][1]))\n\n @classmethod\n def incr_apply_no(cls, count=1):\n ''' 生成参赛编号'''\n return str(cls.cache.incr(cls.apply_no_key, count))\n\n @classmethod\n def get_paged_rank_room_ids(cls, start, end):\n ''' '''\n return map(int, cache.zrevrange(cls.cache_key,start, end))\n\n @classmethod\n def reset(cls):\n cls.cache.delete(cls.cache_key)\n cls.cache.delete(cls.score_key)\n cls.cache.delete(cls.apply_no_key)\n\n @classmethod\n def remove_score(cls, score):\n ''' '''\n return cls.cache.zrem(cls.score_key, score)\n @classmethod\n def exists_score(cls, score):\n ''' '''\n result = cls.cache.zrangebyscore(cls.cache_key, score, score+1, withscores=1)\n for i,j in result:\n if int(j)==score: return True\n\n @classmethod\n def add_score(cls, score):\n ''' '''\n return cls.cache.zadd(cls.score_key, score, score)\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6215953230857849, "alphanum_fraction": 0.661478579044342, "avg_line_length": 28.371429443359375, "blob_id": "6c6d0a1285cd4d86dc9e7e289c0ea347b4314751", "content_id": "4399fe3f314bc15514eddd83e66c03673bea1350", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1028, "license_type": "no_license", "max_line_length": 97, "num_lines": 35, "path": "/migrations/versions/46669801ed5f_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 46669801ed5f\nRevises: 314740bf50b9\nCreate Date: 2015-12-04 15:39:42.974215\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '46669801ed5f'\ndown_revision = '314740bf50b9'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('qr_code_user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('open_id', sa.String(length=50), nullable=True),\n sa.Column('scene_id', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('open_id')\n )\n op.create_index(op.f('ix_qr_code_user_scene_id'), 'qr_code_user', ['scene_id'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_qr_code_user_scene_id'), table_name='qr_code_user')\n op.drop_table('qr_code_user')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5990491509437561, "alphanum_fraction": 0.6909667253494263, "avg_line_length": 23.269229888916016, "blob_id": "9b6736bffe6f1a332dbd7df9546bf2f9c74e5556", "content_id": "ea3439ab023f7f2137c87a60254cef5476bf9312", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 631, "license_type": "no_license", "max_line_length": 90, "num_lines": 26, "path": "/migrations/versions/4806ede530e3_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4806ede530e3\nRevises: 503198b95377\nCreate Date: 2015-11-23 11:34:07.511517\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4806ede530e3'\ndown_revision = '503198b95377'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('item_comment', sa.Column('is_re_comment', sa.Boolean(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('item_comment', 'is_re_comment')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5904731750488281, "alphanum_fraction": 0.5911582112312317, "avg_line_length": 33.92265319824219, "blob_id": "b81e2a4a12a24f8a23c5178a1ecde10fa3703ee2", "content_id": "6cc4ddab7aa7f4ff10b67ea3c8adb3827462a76b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19122, "license_type": "no_license", "max_line_length": 141, "num_lines": 543, "path": "/ops/item.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom collections import defaultdict\n\nfrom sqlalchemy import func\nfrom sqlalchemy import and_\nfrom sqlalchemy.sql import exists\nfrom models import db\nfrom models import Item\nfrom models import Hospital\nfrom models import ItemCat\nfrom models import ItemSubCat\nfrom models import RecommendItem\nfrom models import RecommendSubcat\nfrom models import RecommendHospital\nfrom models import ItemFav\nfrom models import Activity\nfrom models import ActivityItem\nfrom util.utils import dt_obj\nfrom util.utils import format_price\nfrom util.sqlerr import SQL_DUPLICATE_NAME\nfrom ops.utils import get_page\nfrom ops.utils import get_items\nfrom ops.utils import count_items\n\n\nclass ItemService(object):\n\n @staticmethod\n def create_item(title, hospital_id, sub_cat_id, sub_cat_ids, price, orig_price, \\\n item_no, support_choices, photos, surgery_desc, doctor_desc, image, has_fee, direct_buy, \\\n use_time, note):\n ''' 创建商品 '''\n item = Item(\n title=title,\n item_no=item_no,\n hospital_id=hospital_id,\n sub_cat_id=sub_cat_id,\n price=price,\n orig_price=orig_price,\n support_choices=support_choices,\n photos=photos,\n doctor_desc=doctor_desc,\n image=image,\n surgery_desc=surgery_desc,\n direct_buy=direct_buy,\n has_fee=has_fee,\n use_time=use_time,\n note=note\n )\n db.session.add(item)\n db.session.commit()\n return item.id\n\n @staticmethod\n def update_item(item_id, **kw):\n count = Item.query.filter(Item.id==item_id).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def offline_item(item_id):\n ActivityItem.query.filter(ActivityItem.item_id==item_id).delete()\n RecommendItem.query.filter(RecommendItem.item_id==item_id).delete()\n db.session.commit()\n\n @staticmethod\n def offline_subcat(subcat_id):\n RecommendSubcat.query.filter(RecommendSubcat.sub_cat_id==subcat_id).delete()\n db.session.commit()\n\n @staticmethod\n def set_hospital_status(hospital_id, status):\n count = Hospital.query.filter(Hospital.id==hospital_id).update({'status':status})\n db.session.commit()\n return count\n @staticmethod\n def set_hospital_item_status(where, item_status):\n count = Item.query.filter(where).update({'status':item_status})\n db.session.commit()\n\n @staticmethod\n def update_hospital(item_id, **kw):\n count = Hospital.query.filter(Hospital.id==item_id).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def create_hospital(**kw):\n hospital = Hospital(**kw)\n db.session.add(hospital)\n db.session.commit()\n return hospital.id\n\n @staticmethod\n def create_cat(name):\n try:\n cat = ItemCat(name=name)\n db.session.add(cat)\n db.session.commit()\n return cat.id\n except Exception as e:\n db.session.rollback()\n if SQL_DUPLICATE_NAME.search(str(e)):\n assert 0, '分类名已存在'\n\n @staticmethod\n def update_cat(cat_id, **kw):\n count = ItemCat.query.filter(ItemCat.id==cat_id).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def update_subcat(sub_cat_id, **kw):\n count = ItemSubCat.query.filter(ItemSubCat.id==sub_cat_id).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def create_sub_cat(cat_id, name, icon, desc, cat_ids):\n try:\n sub_cat = ItemSubCat(name=name, cat_id=cat_id, cat_ids=cat_ids, icon=icon, desc=desc)\n db.session.add(sub_cat)\n db.session.commit()\n return sub_cat.id\n except Exception as e:\n db.session.rollback()\n if SQL_DUPLICATE_NAME.search(str(e)):\n assert 0, '小类名已存在'\n\n @staticmethod\n def get_the_paged_items(**kw):\n return get_page(Item, {}, **kw)\n\n @staticmethod\n def get_paged_items(**kw):\n\n current_time = dt_obj.now()\n query = and_(\n Activity.start_time<current_time,\n Activity.end_time>=current_time\n )\n activity = Activity.query.filter(query).first()\n activity_id = None\n if activity:\n activity_id = activity.id\n join = None#ActivityItem.activity_id==activity_id\n has_more, items, extras = get_page(Item, {}, extra=ActivityItem, **kw)\n item_activity_price_map = {i.item_id:float(i.price) for i in extras}\n for i in items:\n if item_activity_price_map.get(i['id']):\n #i['old_price'] = i['price']\n i['price'] = int(item_activity_price_map.get(i['id']))\n return has_more, items\n\n @staticmethod\n def get_item_cats():\n return ItemCat.query.order_by(ItemCat.sort_order.asc()).all()\n\n @staticmethod\n def get_item_subcats(**kw):\n condition = {}\n where = ItemSubCat.status==1\n kw.setdefault('where', where)\n kw.setdefault('no_limit', True)\n kw.setdefault('fields', ['id', 'name', 'icon', 'cat_id', 'cat_id_list'])\n has_more, infos = get_page(ItemSubCat, condition, **kw)\n return infos\n\n @staticmethod\n def get_item_dict_by_id(item_id, fields=None):\n ''' '''\n item = Item.query.filter(Item.id==item_id).first()\n if item:\n item_dict = item.as_dict()\n if fields:\n for k, v in item_dict.items():\n if k not in fields: item_dict.pop(k, '')\n return item_dict\n\n @staticmethod\n def get_cat_dict_by_id(cat_id, fields=None):\n ''' '''\n item = ItemCat.query.filter(ItemCat.id==cat_id).first()\n if item:\n item_dict = item.as_dict()\n if fields:\n for k, v in item_dict.items():\n if k not in fields: item_dict.pop(k, '')\n return item_dict\n\n @staticmethod\n def get_subcat_dict_by_id(sub_cat_id, fields=None):\n ''' '''\n item = ItemSubCat.query.filter(ItemSubCat.id==sub_cat_id).first()\n if item:\n item_dict = item.as_dict()\n if fields:\n for k, v in item_dict.items():\n if k not in fields: item_dict.pop(k, '')\n return item_dict\n\n @staticmethod\n def get_item_cat_choices():\n items = ItemCat.query.all()\n sub_items = ItemSubCat.query.all()\n result = []\n for cat in items:\n name = cat.name\n cat_id = cat.id\n cat_subs= [i for i in sub_items if i.cat_id==cat_id]\n tmp = {\n 'id' : cat_id,\n 'name' : name,\n 'sub_cats': [{'id':i.id, 'name':i.name} for i in cat_subs]\n }\n result.append(tmp)\n return result\n\n @staticmethod\n def fav_item(user_id, item_id):\n ''' 添加商品到心愿单 '''\n try:\n fav = ItemFav(user_id=user_id, item_id=item_id)\n db.session.add(fav)\n db.session.commit()\n return fav.id\n except Exception as e:\n db.session.rollback()\n\n @staticmethod\n def unfav_item(user_id, item_id):\n ''' 从心愿单移除商品 '''\n query = and_(\n ItemFav.user_id==user_id,\n ItemFav.item_id==item_id\n )\n count = ItemFav.query.filter(query).delete()\n db.session.commit()\n return count\n\n @staticmethod\n def has_fav(item_id, user_id):\n query = and_(\n ItemFav.user_id==user_id,\n ItemFav.item_id==item_id\n )\n return ItemFav.query.filter(query).first()\n\n @staticmethod\n def get_paged_cats(**kw):\n return get_page(ItemCat, {}, **kw)\n\n @staticmethod\n def get_paged_sub_cats(**kw):\n return get_page(ItemSubCat, {}, **kw)\n\n @staticmethod\n def get_paged_hospitals(**kw):\n return get_page(Hospital, {}, **kw)\n\n @staticmethod\n def get_paged_fav_items(**kw):\n return get_page(ItemFav, {}, **kw)\n\n @staticmethod\n def get_items_by_ids(item_ids, **kw):\n return get_items(Item, item_ids, **kw)\n\n @staticmethod\n def get_hospitals_by_ids(item_ids, **kw):\n return get_items(Hospital, item_ids, **kw)\n\n @staticmethod\n def get_cats_by_ids(item_ids, **kw):\n return get_items(ItemCat, item_ids, **kw)\n\n @staticmethod\n def get_subcats_by_ids(item_ids, **kw):\n return get_items(ItemSubCat, item_ids, **kw)\n\n @staticmethod\n def get_hospital_dict_by_id(hospital_id, **kw):\n items = get_items(Hospital, (hospital_id, ), **kw)\n if items: return items[0]\n\n @staticmethod\n def count_items(where=None):\n query = db.session.query(func.count(Item.id))\n if where is not None: query = query.filter(where)\n return query.scalar()\n\n @staticmethod\n def count_hospitals(where=None):\n return count_items(Hospital, where=where)\n\n @staticmethod\n def set_subcat_status(subcat_id, status):\n count = ItemSubCat.query.filter(ItemSubCat.id==subcat_id).update({'status':status})\n db.session.commit()\n return count\n\n @staticmethod\n def count_sub_cat_items(hospital_id=None):\n return db.session.query(Item.sub_cat_id, func.count(Item.id)).filter(Item.hospital_id==hospital_id).group_by(Item.sub_cat_id).all()\n\n @staticmethod\n def add_recommend_item(item_id, sort_order, image, desc):\n recommend = RecommendItem(item_id=item_id, sort_order=sort_order, image=image, desc=desc)\n db.session.add(recommend)\n db.session.commit()\n\n @staticmethod\n def update_recommend_item(item_id, **kw):\n count = RecommendItem.query.filter(RecommendItem.item_id==item_id).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def rm_recommend_item(item_id):\n count = RecommendItem.query.filter(RecommendItem.item_id==item_id).delete()\n db.session.commit()\n return count\n @staticmethod\n def rm_recommend_hospital(item_id):\n count = RecommendHospital.query.filter(RecommendHospital.hospital_id==item_id).delete()\n db.session.commit()\n return count\n\n @staticmethod\n def exists_recommend_item_ids(item_ids):\n rows = db.session.query(RecommendItem).filter(RecommendItem.item_id.in_(item_ids)).all()\n return {i.item_id:i.sort_order for i in rows}\n @staticmethod\n def exists_recommend_hospital_ids(item_ids):\n rows = db.session.query(RecommendHospital).filter(RecommendHospital.hospital_id.in_(item_ids)).all()\n return {i.hospital_id:i.sort_order for i in rows}\n\n @staticmethod\n def exists_recommend_subcat_map(subcat_ids):\n rows = db.session.query(RecommendSubcat).filter(RecommendSubcat.sub_cat_id.in_(subcat_ids)).all()\n return {i.sub_cat_id:i.sort_order for i in rows}\n\n @staticmethod\n def top_recommend_item(item_id):\n ''' 推荐置顶 '''\n item = RecommendItem.query.filter(RecommendItem.item_id==item_id).first()\n if not item: return\n\n query = and_(\n RecommendItem.item_id!=item_id,\n RecommendItem.sort_order<=item.sort_order\n )\n count = RecommendItem.query.filter(query) \\\n .update({'sort_order':RecommendItem.sort_order+1})\n RecommendItem.query.filter(RecommendItem.item_id==item_id).update({'sort_order':0})\n db.session.commit()\n return count\n\n @staticmethod\n def top_recommend_subcat(sub_cat_id):\n ''' 推荐子分类置顶 '''\n item = RecommendSubcat.query.filter(RecommendSubcat.sub_cat_id==sub_cat_id).first()\n if not item: return\n\n query = and_(\n RecommendSubcat.sub_cat_id!=sub_cat_id,\n RecommendSubcat.sort_order<=item.sort_order\n )\n count = RecommendSubcat.query.filter(query) \\\n .update({'sort_order':RecommendSubcat.sort_order+1})\n RecommendSubcat.query.filter(RecommendSubcat.sub_cat_id==sub_cat_id).update({'sort_order':0})\n db.session.commit()\n return count\n\n @staticmethod\n def exists_recommend_subcat_ids(subcat_ids):\n rows = db.session.query(RecommendSubcat.sub_cat_id).filter(RecommendSubcat.sub_cat_id.in_(subcat_ids)).all()\n return [i.item_id for i in rows]\n\n\n @staticmethod\n def add_recommend_subcat(sub_cat_id, sort_order, icon):\n recommend = RecommendSubcat(sub_cat_id=sub_cat_id, sort_order=sort_order, icon=icon)\n db.session.add(recommend)\n db.session.commit()\n\n @staticmethod\n def update_recommend_subcat(sub_cat_id, **kw):\n count = RecommendSubcat.query.filter(RecommendSubcat.sub_cat_id==sub_cat_id).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def add_recommend_hospital(hospital_id, sort_order, color, tag):\n recommend = RecommendHospital(hospital_id=hospital_id, sort_order=sort_order, color=color, tag=tag)\n db.session.add(recommend)\n db.session.commit()\n @staticmethod\n def update_recommend_hospital(hospital_id, **kw):\n count = RecommendHospital.query.filter(RecommendHospital.hospital_id==hospital_id).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def rm_recommend_subcat(sub_cat_id):\n count = RecommendSubcat.query.filter(RecommendSubcat.sub_cat_id==sub_cat_id).delete()\n db.session.commit()\n return count\n\n @staticmethod\n def get_item_recommend(item_id):\n recommend = RecommendItem.query.filter(RecommendItem.item_id==item_id).first()\n if recommend: return recommend.as_dict()\n\n @staticmethod\n def get_subcat_recommend(sub_cat_id):\n recommend = RecommendSubcat.query.filter(RecommendSubcat.sub_cat_id==sub_cat_id).first()\n if recommend: return recommend.as_dict()\n\n @staticmethod\n def get_hospital_recommend(hospital_id):\n recommend = RecommendHospital.query.filter(RecommendHospital.hospital_id==hospital_id).first()\n if recommend: return recommend.as_dict()\n\n @staticmethod\n def get_item_activity(item_id, activity_id=None):\n query = and_()\n query.append(ActivityItem.item_id==item_id)\n if activity_id: query.append(ActivityItem.activity_id==activity_id)\n activity = ActivityItem.query.filter(query).first()\n if activity: return activity.as_dict()\n\n @staticmethod\n def add_activity_item(item_id, sort_order, activity_id, price, image):\n activity = ActivityItem(item_id=item_id, sort_order=sort_order, activity_id=activity_id, price=price, image=image)\n db.session.add(activity)\n db.session.commit()\n\n @staticmethod\n def update_activity_item(item_id, **kw):\n count = ActivityItem.query.filter(ActivityItem.item_id==item_id).update(kw)\n db.session.commit()\n return count\n\n @staticmethod\n def get_paged_recommend_subcats(**kw):\n return get_page(RecommendSubcat, **kw)\n\n @staticmethod\n def get_paged_recommend_hospitals(**kw):\n return get_page(RecommendHospital, **kw)\n\n @staticmethod\n def get_paged_activity_items(**kw):\n return get_page(ActivityItem, **kw)\n\n @staticmethod\n def get_paged_recommend_items(**kw):\n return get_page(RecommendItem, **kw)\n\n @staticmethod\n def check_exist_order(sort_order):\n query = and_(\n RecommendItem.sort_order==sort_order\n )\n return db.session.query(RecommendItem).filter(query).first()\n\n @staticmethod\n def check_exist_subcat_order(sort_order):\n query = and_(\n RecommendSubcat.sort_order==sort_order\n )\n return db.session.query(RecommendSubcat).filter(query).first()\n\n @staticmethod\n def check_exist_hospital_order(sort_order):\n query = and_(\n RecommendHospital.sort_order==sort_order\n )\n return db.session.query(RecommendHospital).filter(query).first()\n\n @staticmethod\n def incr_item_count(item_id, amount=1):\n ''' 商品已售数加1 医院已售数加1 '''\n item = Item.query.filter(Item.id==item_id).first()\n count = Item.query.filter(Item.id==item_id).update({'sold_count':Item.sold_count+amount}, synchronize_session=False)\n if item:\n Hospital.query.filter(Hospital.id==item.hospital_id).update({'sold_count':Hospital.sold_count+amount}, synchronize_session=False)\n db.session.commit()\n\n @staticmethod\n def get_activity_items_by_item_ids(item_ids):\n return ActivityItem.query.filter(ActivityItem.item_id.in_(item_ids)).all()\n\n @staticmethod\n def count_hospital_items(hospital_ids):\n ''' 医院商品计数 '''\n query = and_(\n Item.status==1,\n Item.hospital_id.in_(hospital_ids)\n )\n result = db.session.query(\n Item.hospital_id,func.count(Item.id)).filter(query).group_by(Item.hospital_id).all()\n return dict(result)\n\n @staticmethod\n def get_hospital_item_cats(hospital_ids):\n rows = db.session.query(\n Item.sub_cat_ids, Item.hospital_id\n ).filter(Item.hospital_id.in_(hospital_ids)).all()\n data = defaultdict(set)\n for row in rows:\n sub_cat_ids, hospital_id = row\n sub_cat_id_list = map(int, sub_cat_ids.split(','))\n for i in sub_cat_id_list: data[hospital_id].add(i)\n return data\n\n @staticmethod\n def get_sub_cat_id_name(sub_cat_ids, all_sub_cats=None, all_cats=None):\n ''' 子分类id列表对应分类列表 '''\n sub_cats = filter(lambda i:i['id'] in sub_cat_ids, all_sub_cats)\n cat_ids = [i['cat_id'] for i in sub_cats]\n cats = filter(lambda i:i['id'] in cat_ids, all_cats)\n return cats\n\n @staticmethod\n def set_item_cat_order(cat_id, sort_order):\n count = ItemCat.query.filter(ItemCat.id==cat_id).update(\n {'sort_order':sort_order},\n synchronize_session=False\n )\n db.session.commit()\n\n @staticmethod\n def get_activity_prices(item_ids, activity_id=None):\n ''' '''\n query = and_(\n ActivityItem.item_id.in_(item_ids),\n ActivityItem.activity_id==activity_id\n )\n items = ActivityItem.query.filter(query).all()\n price_map = {i.item_id:format_price(i.price) for i in items}\n return price_map\n\n\n \n\n\n\n\n" }, { "alpha_fraction": 0.6219708919525146, "alphanum_fraction": 0.6833602786064148, "avg_line_length": 22.80769157409668, "blob_id": "bc66ee42dde571ba311e4476601a0d0c8601bda2", "content_id": "9ff1c9b1f335b205ffa5c8b27043b109193d39c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 619, "license_type": "no_license", "max_line_length": 90, "num_lines": 26, "path": "/migrations/versions/2ea902b5c7a3_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 2ea902b5c7a3\nRevises: 5a62ecc7316e\nCreate Date: 2016-01-14 11:11:21.248504\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2ea902b5c7a3'\ndown_revision = '5a62ecc7316e'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('beauty_entry', sa.Column('icon', sa.String(length=100), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('beauty_entry', 'icon')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.49484536051750183, "alphanum_fraction": 0.6907216310501099, "avg_line_length": 15.560976028442383, "blob_id": "daed1c45aaeaa41c52038ee29284a2ed10bfdcda", "content_id": "3720b5a436e6e5871741c12a9cd4bf8ed4a08bda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 679, "license_type": "no_license", "max_line_length": 23, "num_lines": 41, "path": "/requirements.txt", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "alembic==0.8.2\namqp==1.4.7\nanyjson==0.3.3\nappnope==0.1.0\nbilliard==3.3.0.20\ncelery==3.1.18\ndecorator==4.0.4\nFlask==0.10.1\nFlask-Inputs==0.1.0\nFlask-Migrate==1.6.0\nFlask-Script==2.0.5\nFlask-SQLAlchemy==2.0\ngnureadline==6.3.3\nipython==4.0.0\nipython-genutils==0.1.0\nitsdangerous==0.24\nJinja2==2.8\nkombu==3.0.28\nlxml==3.4.4\nMako==1.0.2\nMarkupSafe==0.23\nMySQL-python==1.2.5\npath.py==8.1.2\npexpect==4.0.1\npickleshare==0.5\nptyprocess==0.5\npycurl==7.19.5.3\npython-editor==0.4\npython-weixin==0.0.2\npytz==2015.6\nqiniu==7.0.5\nredis==2.10.3\nrequests==2.8.1\nsimplegeneric==0.8.1\nsix==1.10.0\nSQLAlchemy==1.0.8\ntraitlets==4.0.0\nuWSGI==2.0.11.2\nwechat-sdk==0.5.8\nWerkzeug==0.10.4\nWTForms==2.0.2\n" }, { "alpha_fraction": 0.5192837715148926, "alphanum_fraction": 0.555096447467804, "avg_line_length": 47.33333206176758, "blob_id": "b549df9729930c3612ed36d62b50a3613ad53414", "content_id": "65b3f64dc1849432fca1d66072cabfb292186a9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1466, "license_type": "no_license", "max_line_length": 170, "num_lines": 30, "path": "/static/js/baidumap.js", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "//经纬度转城市名\nfunction lnglatToLocation(latitude, longitude, callback) {\n console.log('http://api.map.baidu.com/geocoder/v2/?ak=74136f02e6474fb72f3000e449e93c97&callback=renderReverse&location='+latitude+','+longitude+'&output=json&pois=1')\n $.ajax({ \n url: 'http://api.map.baidu.com/geocoder/v2/?ak=74136f02e6474fb72f3000e449e93c97&callback=renderReverse&location='+latitude+','+longitude+'&output=json&pois=1', \n type: \"get\", \n dataType: \"jsonp\", \n jsonp: \"callback\", \n success: function (data) { \n console.log(data);\n var province = data.result.addressComponent.province; \n var city_name = (data.result.addressComponent.city); \n var district = data.result.addressComponent.district; \n var street = data.result.addressComponent.street; \n var street_number = data.result.addressComponent.street_number; \n var formatted_address = data.result.formatted_address;\n var city_code = data.result.cityCode;\n //alert(city_code);\n var data = { \n latitude: latitude, \n longitude: longitude, \n city_code: city_code,\n city_name: city_name\n };\n if (typeof callback == \"function\") { \n callback(data); \n }\n } \n }); \n};\n\n\n" }, { "alpha_fraction": 0.6415384411811829, "alphanum_fraction": 0.6907692551612854, "avg_line_length": 24, "blob_id": "f1b23de977789597097da83b52f7f8ca3952c5d9", "content_id": "0b6f1176689d2a47e0e5753d98a461949ea5ff8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 650, "license_type": "no_license", "max_line_length": 95, "num_lines": 26, "path": "/migrations/versions/21e38b1473b2_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 21e38b1473b2\nRevises: 2c01f9e048f7\nCreate Date: 2015-11-07 14:35:55.667526\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '21e38b1473b2'\ndown_revision = '2c01f9e048f7'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('order', sa.Column('refund', mysql.TINYINT(display_width=1), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('order', 'refund')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.504273533821106, "alphanum_fraction": 0.5470085740089417, "avg_line_length": 18.66666603088379, "blob_id": "680c1c9baa729b3bfd2c53c9bc1c1cc778065658", "content_id": "f428f930844af819481a6cdb8fcb4f4b70bd8795", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 54, "num_lines": 6, "path": "/setting/local.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\n\n\nLOG_FILE_NAME = '/data/api/local/api_logger.log'\nLOG_PORT = 8008" }, { "alpha_fraction": 0.6379059553146362, "alphanum_fraction": 0.6674745678901672, "avg_line_length": 35.83928680419922, "blob_id": "cf1dc7d5afa0c7688ea927cd0e1be0ab9e8aa837", "content_id": "c27f44e2b1fae111cdc40b8be8efe7e6a0b059d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2063, "license_type": "no_license", "max_line_length": 110, "num_lines": 56, "path": "/migrations/versions/18e507e87862_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 18e507e87862\nRevises: 3c12ca43b1ba\nCreate Date: 2015-12-31 09:52:12.246206\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '18e507e87862'\ndown_revision = '3c12ca43b1ba'\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport models\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('beauty_entry',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=100), nullable=True),\n sa.Column('image', sa.String(length=100), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('recommend_beauty_item',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('item_id', sa.Integer(), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['item_id'], ['item.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('daily_coupon',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('coupon_id', sa.Integer(), nullable=False),\n sa.Column('start_time', sa.DateTime(), nullable=True),\n sa.Column('end_time', sa.DateTime(), nullable=True),\n sa.Column('total', sa.Integer(), nullable=True),\n sa.Column('sent', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['coupon_id'], ['coupon.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.add_column(u'coupon', sa.Column('need', models.MoneyField(precision=10, scale=2), nullable=False))\n op.add_column(u'user_coupon', sa.Column('need', models.MoneyField(precision=10, scale=2), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(u'user_coupon', 'need')\n op.drop_column(u'coupon', 'need')\n op.drop_table('daily_coupon')\n op.drop_table('recommend_beauty_item')\n op.drop_table('beauty_entry')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5170789361000061, "alphanum_fraction": 0.5429917573928833, "avg_line_length": 21.972972869873047, "blob_id": "d861d465b6fff21ca2013f119fc2540f495103b1", "content_id": "bf7e367f37130412070826b72d463adb3d498fbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 863, "license_type": "no_license", "max_line_length": 89, "num_lines": 37, "path": "/util/drawgift.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport random\nimport bisect\n\n\n\n\nclass DrawGift(object):\n ''' 抽奖 '''\n def __init__(self, items): \n weights = [w for _,w in items] \n self.goods = [x for x,_ in items] \n self.total = sum(weights) \n self.acc = list(self.accumulate(weights)) \n \n def accumulate(self, weights):#累和.如accumulate([10,40,50])->[10,50,100] \n cur = 0 \n for w in weights: \n cur = cur+w \n yield cur \n \n def __call__(self):\n if not self.total: return None\n return self.goods[bisect.bisect_right(self.acc , random.uniform(0, self.total))] \n\n\n\ndef draw_prize(prize_list):\n ''' 抽奖 '''\n wr = DrawGift(prize_list) \n return wr()\n\n\ndef test_drawprize():\n prize_list = [('iphone', 10), ('ipad', 40), ('itouch', 50)]\n print draw_prize(prize_list)" }, { "alpha_fraction": 0.6196404099464417, "alphanum_fraction": 0.658367931842804, "avg_line_length": 24.821428298950195, "blob_id": "03060b8d335ecc052b8d9d3e116320cfcc9470b7", "content_id": "54a3e604e15757693341d25cb904804c1fb0d738", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 723, "license_type": "no_license", "max_line_length": 80, "num_lines": 28, "path": "/migrations/versions/765d6f2d606_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 765d6f2d606\nRevises: 4bd7029c14b5\nCreate Date: 2015-11-20 14:50:33.668652\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '765d6f2d606'\ndown_revision = '4bd7029c14b5'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('hospital', sa.Column('city_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'hospital', 'city', ['city_id'], ['id'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'hospital', type_='foreignkey')\n op.drop_column('hospital', 'city_id')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6417061686515808, "alphanum_fraction": 0.6815165877342224, "avg_line_length": 29.14285659790039, "blob_id": "618a2ddc69fb2a78f76cc3d2e1db99d93e21de0f", "content_id": "1acb18de83b51b144170f8734de6a0196b64463c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1055, "license_type": "no_license", "max_line_length": 121, "num_lines": 35, "path": "/migrations/versions/1cf4dd01482b_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 1cf4dd01482b\nRevises: 32a72ba0ce03\nCreate Date: 2015-11-10 11:09:59.892332\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1cf4dd01482b'\ndown_revision = '32a72ba0ce03'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('recommend_item',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('item_id', sa.Integer(), nullable=True),\n sa.Column('sort_order', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['item_id'], ['item.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('item_id')\n )\n op.drop_column(u'item', 'is_recommend')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column(u'item', sa.Column('is_recommend', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True))\n op.drop_table('recommend_item')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6557177901268005, "alphanum_fraction": 0.6739659309387207, "avg_line_length": 40.099998474121094, "blob_id": "a161eada50d807cdf85295e3fbcc9b7a3b2e01eb", "content_id": "ff758bc754ed4f6a5fd931809fcad92394cf2253", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1644, "license_type": "no_license", "max_line_length": 119, "num_lines": 40, "path": "/migrations/versions/45f8da7eae76_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 45f8da7eae76\nRevises: 3c706c57fa0\nCreate Date: 2015-12-12 10:45:36.131728\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '45f8da7eae76'\ndown_revision = '3c706c57fa0'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('coupon', sa.Column('cat_id', sa.Integer(), nullable=True))\n op.add_column('coupon', sa.Column('coupon_cat', mysql.TINYINT(display_width=1), nullable=False))\n op.create_foreign_key(None, 'coupon', 'item_cat', ['cat_id'], ['id'])\n op.drop_column('coupon', 'cat')\n op.add_column('user_coupon', sa.Column('cat_id', sa.Integer(), nullable=True))\n op.add_column('user_coupon', sa.Column('coupon_cat', mysql.TINYINT(display_width=1), nullable=False))\n op.create_foreign_key(None, 'user_coupon', 'item_cat', ['cat_id'], ['id'])\n op.drop_column('user_coupon', 'cat')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user_coupon', sa.Column('cat', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False))\n op.drop_constraint(None, 'user_coupon', type_='foreignkey')\n op.drop_column('user_coupon', 'coupon_cat')\n op.drop_column('user_coupon', 'cat_id')\n op.add_column('coupon', sa.Column('cat', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False))\n op.drop_constraint(None, 'coupon', type_='foreignkey')\n op.drop_column('coupon', 'coupon_cat')\n op.drop_column('coupon', 'cat_id')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6410120129585266, "alphanum_fraction": 0.6415445804595947, "avg_line_length": 33.081817626953125, "blob_id": "eb1f0b3833f87ecd265ec609a64b92151d85c740", "content_id": "0a22e283ab216ed12b28dd71e489d1509dcd2d46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3807, "license_type": "no_license", "max_line_length": 77, "num_lines": 110, "path": "/user/draw_money.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import request\nfrom flask import redirect\nfrom flask import render_template\n\nfrom sqlalchemy import and_\nfrom sqlalchemy import or_\nfrom models import db\nfrom models import School\nfrom models import RoomDesignDetail\nfrom util.utils import jsonify_response\nfrom util.utils import random_str\nfrom util.utils import str_to_int_list\nfrom util.utils import comma_str_to_list\nfrom util.decorators import wechat_loggin_dec\nfrom util.validators import Optional\nfrom util.validators import Inputs\nfrom util.validators import MobileField\nfrom util.validators import TextField\nfrom util.validators import IdField\nfrom util.validators import IntChoiceField\nfrom util.sign import sign_user\nfrom util.sign import set_cookie\nfrom util.sign import del_cookie\nfrom util.drawgift import draw_prize\nfrom ops.bulks import fetch_user_refs\nfrom ops.item import ItemService\nfrom ops.data import DataService\nfrom ops.user import UserService\nfrom ops.redpack import RedpackService\nfrom ops.promote import PromoteService\nfrom ops.cache import RoomDesignVoteCounter\nfrom ops.room_design import RoomDesignService\nfrom constants import ResponseCode\nfrom thirdparty.sms import send_sms\nfrom thirdparty.sms import gen_vcode\nfrom thirdparty.wechat import exchange_code_for_token\nfrom settings import MAX_TODAY_PASSWD_ATTEMPT\nfrom settings import MAX_TODAY_VCODE_ATTEMPT\nfrom settings import CONTACT\nfrom constants import VOTE_COUNT_SOURCE_MAP\n\n\n\n\n\n@wechat_loggin_dec(required=False)\ndef draw_index():\n ''' '''\n if request.user_id and not PromoteService.get_rd_user(request.user_id):\n PromoteService.create_rd_user(request.user_id)\n\n user = RedpackService.get_qruser_by_openid(request.open_id)\n has_followed= bool(user and user.nickname)\n invite_count = PromoteService.get_reg_count(request.user_id)\n priviledges = PromoteService.get_draw_logs(request.user_id)\n draw_count = PromoteService.get_user_can_draw_count(request.user_id)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'has_followed': has_followed,\n 'invite_count': invite_count,\n 'draw_count' : draw_count,\n 'priviledges' : priviledges\n }\n return jsonify_response(result)\n\n\n\n@wechat_loggin_dec(required=True, app=True)\ndef draw_money():\n ''' 抽奖 '''\n prize_list = PromoteService.get_prize_left()\n prize_map = dict(prize_list)\n prize_id = draw_prize(prize_list)\n if not prize_id:\n result = {\n 'code': ResponseCode.SERVER_ERROR,\n 'msg': '奖品已被抽完'\n }\n return jsonify_response(result)\n\n prize = PromoteService.get_prize(prize_id)\n current_count = prize_map[prize_id]\n count = PromoteService.incr_draw_used(request.user_id)\n if not count:\n assert 0, '您没有抽奖机会了'\n count = PromoteService.incr_prized(prize_id, current_count)\n if not count:\n remain = PromoteService.get_prize_remain(prize_id)\n if not remain:\n result = {\n 'code': ResponseCode.SERVER_ERROR,\n 'msg': '奖品已被抽完'\n }\n return jsonify_response(result)\n else:\n result = {\n 'code': ResponseCode.SERVER_ERROR,\n 'msg': '服务器忙'\n }\n return jsonify_response(result)\n result = {\n 'code' : ResponseCode.SUCCESS,\n 'msg' : '',\n 'prize_id' : prize_id,\n 'amount' : prize.amount\n }\n return jsonify_response(result)\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6197836399078369, "alphanum_fraction": 0.6955177783966064, "avg_line_length": 23.884614944458008, "blob_id": "705f6a341c80c06332f56bb3114449e721443a4f", "content_id": "5d732b99102bff79b2afe0214f81304440e12a71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 647, "license_type": "no_license", "max_line_length": 96, "num_lines": 26, "path": "/migrations/versions/34a7370fe40_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 34a7370fe40\nRevises: 273db5f3044f\nCreate Date: 2016-02-01 14:55:03.924475\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '34a7370fe40'\ndown_revision = '273db5f3044f'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('redpack_user_question', sa.Column('view_count', sa.Integer(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('redpack_user_question', 'view_count')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6344936490058899, "alphanum_fraction": 0.6819620132446289, "avg_line_length": 23.30769157409668, "blob_id": "ead91b97348f284ac8a9f687a7a382ed3f5e431d", "content_id": "bf7b5fae234e2e0733c88f669a4902cd72468a18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 632, "license_type": "no_license", "max_line_length": 92, "num_lines": 26, "path": "/migrations/versions/59a610b5633d_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 59a610b5633d\nRevises: 3a64535997fb\nCreate Date: 2015-12-09 17:16:46.162195\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '59a610b5633d'\ndown_revision = '3a64535997fb'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('trial', sa.Column('sex', mysql.TINYINT(display_width=1), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('trial', 'sex')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.618369996547699, "alphanum_fraction": 0.6934023499488831, "avg_line_length": 26.60714340209961, "blob_id": "2455d87531945e79c221c140f4873e0ce7225d52", "content_id": "7bdd3692c03c8df117799b2f61fd9d91e7bf0382", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 773, "license_type": "no_license", "max_line_length": 96, "num_lines": 28, "path": "/migrations/versions/2a01c5929823_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 2a01c5929823\nRevises: 285c9a12f7b4\nCreate Date: 2016-01-27 15:32:02.925482\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2a01c5929823'\ndown_revision = '285c9a12f7b4'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('room_design_detail', sa.Column('phone', sa.String(length=30), nullable=True))\n op.create_unique_constraint(None, 'room_design_detail', ['phone'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'room_design_detail', type_='unique')\n op.drop_column('room_design_detail', 'phone')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6495882868766785, "alphanum_fraction": 0.683440089225769, "avg_line_length": 25.560976028442383, "blob_id": "054155ec6ee4c8774197ac3589ae4a66642300a1", "content_id": "6dc68722e1b59e98f136a01ce689e37833b99a12", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 1405, "license_type": "no_license", "max_line_length": 90, "num_lines": 41, "path": "/thirdparty/alipay/config.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "#-*- coding:utf-8 -*-\nfrom settings import SERVER_NAME\n\n\nclass settings:\n # 安全检验码,以数字和字母组成的32位字符\n ALIPAY_KEY = 'knpsiyyep0ertnut5mmt844dkntswotd'\n\n ALIPAY_INPUT_CHARSET = 'utf-8'\n\n # 合作身份者ID,以2088开头的16位纯数字\n ALIPAY_PARTNER = '2088021957827236'\n\n # 签约支付宝账号或卖家支付宝帐户\n ALIPAY_SELLER_EMAIL = '[email protected]'\n\n ALIPAY_SIGN_TYPE = 'RSA'\n\n # 付完款后跳转的页面(同步通知) 要用 http://格式的完整路径,不允许加?id=123这类自定义参数\n ALIPAY_RETURN_URL = 'http://{}/user/finish_pay'.format(SERVER_NAME)\n \n # 交易过程中服务器异步通知的页面 要用 http://格式的完整路径,不允许加?id=123这类自定义参数\n ALIPAY_NOTIFY_URL='http://{}/api/alipay_notify/'.format(SERVER_NAME)\n ALIPAY_REPAYMENT_NOTIFY_URL='http://{}/api/alipay_repayment_notify/'.format(SERVER_NAME)\n\n ALIPAY_SHOW_URL=''\n\n # 网关地址\n #WEB_GATEWAY = 'https://mapi.alipay.com/gateway.do?'\n WEB_GATEWAY = 'http://wappaygw.alipay.com/service/rest.htm?'\n WEB_GATEWAY = 'https://mapi.alipay.com/gateway.do?'\n # 访问模式,根据自己的服务器是否支持ssl访问,若支持请选择https;若不支持请选择http\n ALIPAY_TRANSPORT='https'\n\n\n PRIVATE_KEY = '''-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQCg6pjF7bK7dxQnFVIJAWCdOPraLIkPOsFuIvKFYXQTd5PtFSEv\n5zsKwXd4dolTTge7QbabKY8RHPlukyDfM+mogsMkZ1/AdZqRj3xLU0twXWi5UfO9\nhA6bgUcDQM4tsg5QDLOuTc//4YQ/71XSbvbvKznHGD7M6p0JVS8Au7C2fwIDAQAB\nAoGAM0SLCASDPVbjBNC2IkheD7lnsrsdr8x4dC6ONhCmes1OQ+rHeAujs/mPpsGd\nXv0tWWvGUgkbm8gvcBtQDczsVFJwCkDZpC4WOFIH5sBffOmVjunXaSdrcgczZlbJ\nyosUWXw22lBcKUVwd+cv50yGUH9i5fJcJkLwdZ4h9Hqu2wECQQDRcZzH2HDUnelK\nbTrsVukfad4V8ShZVdMJ9b8BLfh3w1gAXY1i2D1q8x39P+kcI/U3pga6xfv2nYS+\nQkpENoyJAkEAxK+I/ES/i96QoNzu87pMrDZElZTjQZ39Ab70eqvD+o03y8r3C6YJ\nEHCZ6C5zeFJTh9bGk/spoerQuGBrkk+4xwJAdypVMb+MMuzF13rek6m/aggqPAHC\nG1IhiQExc9JcFIgogcy4rQyrpTY+UeETGNe8pbTpD0umWGK3LCk7aCRBQQJAM9FT\nM7MhC8Z9MARE5+1jGdPKSeZJ4RWwfG9ElbT/Etl1o7k7UNRTewNPaP4j6cU2wIjz\nFDWNiF0G1CyC6q8aLQJAPdqJTCLlxGAzDtOfAAYrJKycxixOpZZ2z857Xdf8jTYL\nxnzl0xanRphWrLmbqhCPpGc2esxO0HbUq/OodLKWcQ==\n-----END RSA PRIVATE KEY-----'''\n\n\n PUBLIC_KEY = '''-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCg6pjF7bK7dxQnFVIJAWCdOPra\nLIkPOsFuIvKFYXQTd5PtFSEv5zsKwXd4dolTTge7QbabKY8RHPlukyDfM+mogsMk\nZ1/AdZqRj3xLU0twXWi5UfO9hA6bgUcDQM4tsg5QDLOuTc//4YQ/71XSbvbvKznH\nGD7M6p0JVS8Au7C2fwIDAQAB\n-----END PUBLIC KEY-----'''\n \n\n" }, { "alpha_fraction": 0.6223602294921875, "alphanum_fraction": 0.6546583771705627, "avg_line_length": 25.83333396911621, "blob_id": "6ca731234cfbfbf809383a03212740cebfa3c3c2", "content_id": "38126e346d5fdcc8a3564916f6feb196f9c4f2a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 805, "license_type": "no_license", "max_line_length": 63, "num_lines": 30, "path": "/migrations/versions/4be704e34845_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4be704e34845\nRevises: 325324d21171\nCreate Date: 2015-12-05 14:34:06.582315\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4be704e34845'\ndown_revision = '325324d21171'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('promoter', 'create_by',\n existing_type=mysql.INTEGER(display_width=11),\n nullable=True)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('promoter', 'create_by',\n existing_type=mysql.INTEGER(display_width=11),\n nullable=False)\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.44290658831596375, "alphanum_fraction": 0.5536332130432129, "avg_line_length": 25.272727966308594, "blob_id": "6c4956f05d74a82f7e23fd6e0d5a6a6cba86dcca", "content_id": "f6710cc73ae8c30cbc42b3d9ab5464f979f34f1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 578, "license_type": "no_license", "max_line_length": 74, "num_lines": 22, "path": "/setting/dev.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\n\n\nDOMAIN = '139.196.6.231'\nSERVER_NAME = '139.196.6.231'\n\n\nBROKER_URL = 'amqp://monitor:[email protected]:5672/dev'\nMAIN_MYSQL_URI = 'mysql://root@localhost/dev?charset=utf8mb4'\n\n\nSECRET_USER_COOKIE = 'df2121280332d4d3213883e804eca31137c1de1f_dev'\nADMIN_COOKIE_KEY = 'ADMIN_COOKIE_KEY_dev' \nHOSPITAL_COOKIE_KEY = 'HOSPITAL_COOKIE_KEY_dev'\n\n\nCACHE_DB_NO = 2\n\n\nLOG_FILE_NAME = '/data/api/dev/api_logger.log'\nLOG_PORT = 8009\n" }, { "alpha_fraction": 0.5736925601959229, "alphanum_fraction": 0.5784469246864319, "avg_line_length": 24.280000686645508, "blob_id": "a80585598eb295b898ee8b95eef29cf32a1bb7db", "content_id": "f43d84f994a1ee004b66bb43f70a5076ab798253", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 631, "license_type": "permissive", "max_line_length": 38, "num_lines": 25, "path": "/static/mobile/js/index.js", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "jQuery(document).ready(function($) {\n\tfunction show_one () {\n\t\t$('.tcc').addClass('hidden');\n \t$('.tcc1').removeClass('hidden')\n \t$('.bg').removeClass('hidden');\n\t}\n\tfunction show_two() {\n\t\t$('.tcc').addClass('hidden');\n\t\t$('.tcc2').removeClass('hidden');\n\t\t$('.bg').removeClass('hidden');\n\t}\n\tfunction show_three() {\n\t\t$('.tcc').addClass('hidden');\n \t$('.tcc3').removeClass('hidden');\n \t$('.bg').removeClass('hidden');\n\t}\n\tfunction hide_all (argument) {\n\t\t $('.tcc').addClass('hidden');\n\t\t $('.bg').addClass('hidden');\n\t}\n\twindow.show_one = show_one;\n\twindow.show_two = show_two;\n\twindow.show_three = show_three;\n\t\n});" }, { "alpha_fraction": 0.6419558525085449, "alphanum_fraction": 0.6813880205154419, "avg_line_length": 23.384614944458008, "blob_id": "d1220a1da47614a7eda4f64c46cd8ac7f97aa9b5", "content_id": "30200a61a9cd74fba16b4188e22901c4c8291c58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 634, "license_type": "no_license", "max_line_length": 98, "num_lines": 26, "path": "/migrations/versions/3ee27e625d6d_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3ee27e625d6d\nRevises: 43f0dd6800b1\nCreate Date: 2015-11-17 14:53:58.824350\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3ee27e625d6d'\ndown_revision = '43f0dd6800b1'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('credit_apply', sa.Column('stu_education', sa.String(length=20), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('credit_apply', 'stu_education')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.552071213722229, "alphanum_fraction": 0.5659021139144897, "avg_line_length": 33.27934265136719, "blob_id": "b74583384f4c974e5ef1b29f7d5e9b834715c7af", "content_id": "1fef174527ab07c6dff498ed99918a4c3d935b37", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 15215, "license_type": "permissive", "max_line_length": 673, "num_lines": 426, "path": "/static/user/js/tooth-beauty.js", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "var w,h;\nfunction getSrceenWH(){\n\tw = $(window).width();\n\th = $(\"html\").height();\n\t$('#dialogBg').width(w).height(h);\n}\n\nwindow.onresize = function(){ \n\tgetSrceenWH();\n} \n$(window).resize(); \n$(document).ready(function(){\n\tgetSrceenWH();\n//\t\t\t\t顶部菜单颜色与图标变换\n$(\".drop-btn\").on(\"click\",function(){\n//$(\".drop-btn\").removeClass(\"active\").find(\"i\").html(\"&#xe604;\");\n//$(this).addClass(\"active\").find(\"i\").html(\"&#xe603;\");\n\t\n\tif ($(this).hasClass(\"active\") ){\n\t\tconsole.log('deact')\n\t\t$('#drop-list1').addClass('is-dis')\n\t\t$('#right-side-menu').addClass('is-dis')\n\t\t// $('a').css('pointer-events','all')\n\t\t// $('.drop-btn').css('pointer-events','all')\n\t\t//$('li').unbind('click')\n\t\t$('html').css({'overflow':'auto','position':'static'});\t\t\n\t\t$(\".drop-list\").css(\"top\",\"-264px\");\n\t\t$(\".drop-btn\").removeClass(\"active\").find(\"i\").html(\"&#xe604;\");\n\t\t$(\"#dialogBg\").fadeOut(300);\n\t}else{\n\t console.log('act')\n\t\t$('#drop-list1').removeClass('is-dis')\n\t\t$('#right-side-menu').removeClass('is-dis')\n\t\t$('html').css({'overflow':'hidden','position':'fixed'});\n\t\t$(\".drop-btn\").removeClass(\"active\").find(\"i\").html(\"&#xe604;\");\n\t\t$(this).addClass(\"active\").find(\"i\").html(\"&#xe603;\");\n\t}\n});\n\n//\t\t\t\t第一个菜单点击事件\t\t\t\t\n$(\"#drop1\").on(\"click\",function(){\n\tif($(this).hasClass('active')){\n\t\t$(\".drop-list\").css(\"top\",\"-264px\");\n\t\t$(\"#drop-list1\").css(\"top\",\"40px\");\n\t\t$(\"#dialogBg\").fadeIn(300);\n\n\t}else{\n\t \t$(\".drop-list\").css(\"top\",\"-264px\");\n\t\t$(\"#dialogBg\").fadeOut(300);\n\t}\n});\n\n\n\n\n//\t\t\t\t第二个菜单点击事件\t\t\t\t\n$(\"#drop2\").on(\"click\",function(){\n\tif($(this).hasClass('active')){\n\t\t$(\".drop-list\").css(\"top\",\"-264px\");\n\t\t$(\"#drop-list2\").css(\"top\",\"40px\");\n\t\t$(\"#dialogBg\").fadeIn(300);\n\n\t}else{\n\t \t$(\".drop-list\").css(\"top\",\"-264px\");\n\t\t$(\"#dialogBg\").fadeOut(300);\n\t}\n});\n//\t\t\t\t第二个下拉菜单点击事件\n\n//\t\t\t\t第三个菜单点击事件\t\t\t\t\n\n$(\"#drop3\").on(\"click\",function(){\n\tif($(this).hasClass('active')){\n\t\t$(\".drop-list\").css(\"top\",\"-264px\");\n\t\t$(\"#drop-list3\").css(\"top\",\"40px\");\n\t\t$(\"#dialogBg\").fadeIn(300);\n\n\t}else{\n\t \t$(\".drop-list\").css(\"top\",\"-264px\");\n\t\t$(\"#dialogBg\").fadeOut(300);\n\t}\n\n});\n//\t\t\t\t第三个下拉菜单点击事件\nwindow.bind_menu_click = function bind_menu_click() {\n\t//\t\t\t\t第一个菜单的左侧子菜单点击事件\n\t$(\".tab-vertical li\").on(\"click\",function(){\n\n\t\t$(\".tab-vertical li\").removeClass(\"active\");\n\t\t$(this).addClass(\"active\");\n\t\tvar cat_id=$(this).attr('cat-id');\n\t\t$('.select-operation li').hide();\n\t\t$('.select-operation .ding-'+cat_id).show()\n\t\t$('#drop-list1').scrollTop(0);\n\t});\n\t$(\"#drop-list3 li\").on(\"click\",function(){\n\t\t$('html').css({'overflow':'auto','position':'static'});\t\n\t\t$(\".drop-list\").css(\"top\",\"-264px\");\n\t\t$(\"#drop3\").find(\"span\").html($(this).find(\"span\").html());\n\t\t$(\"#drop-list3 li\").removeClass(\"active\").find(\"i\").html(\"&#xe63e;\");\n\t\t$(this).addClass(\"active\").find(\"i\").html(\"&#xe627;\");\n\t\t//选择背景隐藏\n\t\t$(\"#dialogBg\").fadeOut(300);\t\n\t\t\t//选择的时候请求接口\n\t\tvar city=$('#drop-list3 li.active').attr('data-id');\n\t\tvar sort=$('#drop-list2 li.active').attr('data-id');\n\t\tvar cat=$('.select-operation li.active').attr('data-id');\n\t\toption_sort(sort,cat,city);\n\t\t$(\"#drop3\").click();\n\t});\t\t\t\t\n//\t\t\t\t背景点击事件\n\t$(\"#dialogBg\").on(\"click\",function(event){\n\t\t$(\".drop-list\").css(\"top\",\"-264px\");\n\t\t$(\".drop-btn\").removeClass(\"active\").find(\"i\").html(\"&#xe604;\");\n\t\t$(\"#dialogBg\").fadeOut(300);\n\t\t$('html').css({'overflow':'auto','position':'static'});\t\n\t});\n\t$(\"#drop-list2 li\").on(\"click\",function(){\n\t\t$('html').css({'overflow':'auto','position':'static'});\t\n\t\t$(\".drop-list\").css(\"top\",\"-264px\");\n\t\t$(\"#drop2\").find(\"span\").html($(this).find(\"span\").html());\n\t\t$(\"#drop-list2 li\").removeClass(\"active\").find(\"i\").html(\"&#xe63e;\");\n\t\t$(this).addClass(\"active\").find(\"i\").html(\"&#xe627;\");\n\t\t//选择背景隐藏\n\t\t$(\"#dialogBg\").fadeOut(300);\n\t\t\n\t\t\t//选择的时候请求接口\n\t\tvar city=$('#drop-list3 li.active').attr('data-id');\n\t\tvar sort=$('#drop-list2 li.active').attr('data-id');\n\t\tvar cat=$('.select-operation li.active').attr('data-id');\n\t\toption_sort(sort,cat,city);\n\t\t$(\"#drop2\").click();\n\n\n\n\t});\n\n\t//\t\t\t\t第一个菜单的右侧子菜单点击事件\n\t$(\".select-operation li\").on(\"click\",function(){\n\t\t$('#drop-list1').addClass('is-dis')\n\t\t$('#right-side-menu').addClass('is-dis')\n\t\t$('html').css({'overflow':'auto','position':'static'});\t\n\t\t$(\".select-operation li\").removeClass(\"active\").find(\"i\").html(\"&#xe63e;\");\n\t\t$(this).addClass(\"active\").find(\"i\").html(\"&#xe627;\");\n\t\t$(\".drop-list\").css(\"top\",\"-264px\");\n\n\t\t//选中的li里面的文字截图出来\n\t\tvar li_text=$('.select-operation .mui-table-view-cell.active').text();\n\t\tvar l_length=$('.select-operation .mui-table-view-cell.active').text().length;\n\t\tvar results=li_text.substring(0,l_length-1);\n\t\tconsole.log(results);\n\n\t\t$('#drop1').find('span').html(results)\n\t\t$('#drop1').removeClass('active');\n\t\tset_document_title(results);\n\t\t//点击背景隐藏\n\t\t$(\"#dialogBg\").fadeOut(300);\n\t\t\t//选择的时候请求接口\n\t\t\tvar city=$('#drop-list3 li.active').attr('data-id');\n\t\t\tvar sort=$('#drop-list2 li.active').attr('data-id');\n\t\t\tvar cat=$('.select-operation li.active').attr('data-id');\n\t\t\toption_sort(sort,cat,city);\n\t\t\t\t\t//$(\"#drop1\").click();\n\t\t$(\".drop-btn\").removeClass(\"active\").find(\"i\").html(\"&#xe604;\");\n\t});\n}\n\n});\n\n\n\n//头部选择接口\nfunction option_sort(sort,cat,city){\n\t\t\t$.ajax({\n\t\t\ttype:\"get\",\n\t\t\turl:\"http://\"+getHostName()+\"/user/item_list/?sort_type=\"+sort+\"&sub_cat_id=\"+cat+\"&city_id=\"+city,\n\t\t\tdataType:'json',\n\t\t\tsuccess:function(data){\n\t\t\t\t$('.item-list li').remove();\n\t\t\t\t$('.tit').hide();\n\t\t\t\t//没有更多隐藏;\n\t\t\t\tvar infos=data.infos;\n\t\t\t\t\t\thas_more=data.has_more;\n\t\t\t\t\t\toffset=data.offset;\n\t\t\t\t\t\tvar infos=data.infos;\n\t\t\t\t\t\tif (!has_more){\n\t\t\t\t\t\t\t$('.loader-inner').hide();\n\t\t\t\t\t\t};\n\t\t\t\t\t\tif(infos.length==0){\n\t\t\t\t\t\t\t$('.noCoupon').show();\n\t\t\t\t\t\t\t$('.mui-card').hide()\n\t\t\t\t\t\t}else{\n\t\t\t\t\t\t\t$('.mui-card').show();\n\t\t\t\t\t\t\t$('.noCoupon').hide();\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor(var i=0;i<infos.length;i++){\n\t\t\t\t\t\t\tvar free='';\n\t\t\t\t\t\t\tif (!infos[i].has_fee) {\n\t\t\t\t\t\t\t\tfree='<img src=\"/static/user/free.png\" class=\"free\" alt=\"\" style=\"position: absolute;width:25px;height:25px;top:0;left:0;\" />'\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tvar str=$('<li class=\"mui-table-view-cell mui-media no-after-dotted\"><a href=\"/static/user/detail.html?item_id='+infos[i].id+'\"><img class=\"mui-media-object mui-pull-left\" src=\"'+infos[i].image+'\"><div class=\"mui-media-body\">'+infos[i].title+'<p class=\"mui-ellipsis\"><span>'+infos[i].hospital.name+'</span></p><p class=\"mui-ellipsis\">售价:<span class=\"color-red\">¥'+infos[i].price+'</span>医院价:<span class=\"line-throu\">¥'+infos[i].orig_price+'</span></p></div><div class=\"month-pay month-pay-sm\"><div class=\"tit\">月供</div><div class=\"num\"><p class=\"color-red\"><b>¥'+infos[i].period_money+'</b><span>x '+infos[i].period_count+'</span></p></div></div></a>'+free+'</li>');\n\t\t\t\t\t\t\tif(i==infos.length-1){\n\t\t\t\t\t\t\t\tstr.removeClass('no-after-dotted');\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t$('.item-list').append(str);\n\t\t\t\t\t\t}\t\n\t\t\t\t\n\t\t\t},error:function(){\n\t\t\t\t\n\t\t\t}\n\t\t});\n}\n//\t\t\t头部列表加载请求\nfunction get_filters(callback) {\n\t\t\t$.ajax({\n\t\t\t\ttype:\"get\",\n\t\t\t\turl:\"http://\"+getHostName()+\"/user/item_filters/\"+location.search,\n\t\t\t\tdataType:\"json\",\n\t\t\t\tsuccess:function(data){\n\t\t\t\t\t//销量加载\n\t\t\t\t\tif(callback) { callback();}\n\t\t\t\t\tvar order_choices=data.order_choices;\n\t\t\t\t\tvar citys=data.citys;\n\t\t\t\t\tfor(var x=0,xlg=order_choices.length;x<xlg;x++){\t\t\t\t\t\t\n\t\t\t\t\t\tvar order_str='';\n\t\t\t\t\t\tif(data.sort_type_obj.id==order_choices[x].id){\n\t\t\t\t\t\torder_str='<li class=\"mui-table-view-cell active\" data-id=\"'+order_choices[x].id+'\"><span>'+order_choices[x].name+'</span><i class=\"iconfont mui-pull-right\">&#xe627;</i></li>'\n\t\t\t\t\t\t}else{\n\t\t\t\t\t\torder_str='<li class=\"mui-table-view-cell\" data-id=\"'+order_choices[x].id+'\"><span>'+order_choices[x].name+'</span><i class=\"iconfont mui-pull-right\">&#xe63e;</i></li>'\n\t\t\t\t\t\t}\n\t\t\t\t\t\t$('#drop-list2').append(order_str);\n\t\t\t\t\t}\n\t\t\t\t\t//城市\n\t\t\t\t\tfor(var j=0;j<citys.length;j++){\n\t\t\t\t\t\tvar city='';\n\t\t\t\t\t\tif(data.city.id==citys[j].id){\n\t\t\t\t\t\tcity='<li class=\"mui-table-view-cell active\" data-id=\"'+citys[j].id+'\"><span>'+citys[j].name+'</span><i class=\"iconfont mui-pull-right\">&#xe627;</i></li>'\n\t\t\t\t\t\t}else{\n\t\t\t\t\t\tcity='<li class=\"mui-table-view-cell\" data-id=\"'+citys[j].id+'\"><span>'+citys[j].name+'</span><i class=\"iconfont mui-pull-right\">&#xe63e;</i></li>'\n\t\t\t\t\t\t}\n\t\t\t\t\t\t$('#drop-list3').append(city)\n\t\t\t\t\t}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t// 推荐项目接口\n\t\t\t\t\tvar bg_list=data.all_sub_cats;\n\t\t\t\t\t\n//\t\t\t\t\tfor(var i=0;i<bg_list.length;i++){\n//\t\t\t\t\t\tfor(var x=0;x<bg_list[i].sub_cats.length;x++){\n//\t\t\t\t\t\t\tvar sub=bg_list[i].sub_cats[x]\n//\t\t\t\t\t\t\tvar class_str='';\n//\t\t\t\t\t\t\tfor(var j=0;j<sub.cat_id_list.length;j++){\n//\t\t\t\t\t\t\t\tvar class_count=sub.cat_id_list[j];\n//\t\t\t\t\t\t\t\tclass_str+='ding-'+class_count+' '\n//\t\t\t\t\t\t\t}\n//\t\t\t\t\t\t\tvar node_str='';\n//\t\t\t\t\t\t\tif(data.subcat.id==sub.id){\n//\t\t\t\t\t\t\t\tnode_str='<li class=\"mui-table-view-cell active no-after-dotted ding-'+bg_list[i].id+' '+class_str+'\" data-id=\"'+sub.id+'\">'+sub.name+'<i class=\"iconfont mui-pull-right\">&#xe627;</i></li>'\n//\t\t\t\t\t\t\t}else{\n//\t\t\t\t\t\t\t\tnode_str='<li class=\"mui-table-view-cell no-after-dotted ding-'+bg_list[i].id+' '+class_str+'\" data-id=\"'+sub.id+'\">'+sub.name+'<i class=\"iconfont mui-pull-right\">&#xe63e;</i></li>'\t\n//\t\t\t\t\t\t\t}\n//\t\t\t\t\t\t\t$('.select-operation').append(node_str);\n//\t\t\t\t\t\t}\t\t\t\t\t\t\n//\t\t\t\t\t}\n\t\t\t\t\tfor(var i=0;i<bg_list.length;i++){\n\t\t\t\t\t\tvar class_str='';\n\t\t\t\t\t\tfor(var k=0;k<bg_list[i].cat_id_list.length;k++){\n\t\t\t\t\t\t\tvar class_count=bg_list[i].cat_id_list[k];\n\t\t\t\t\t\t\tclass_str+='ding-'+class_count+' '\n\t\t\t\t\t\t}\n\t\t\t\t\t\tvar node_str='';\n\t\t\t\t\tif(data.subcat.id==bg_list[i].id){\n\t\t\t\t\tnode_str='<li class=\"mui-table-view-cell active no-after-dotted '+class_str+'\" data-id=\"'+bg_list[i].id+'\">'+bg_list[i].name+'<i class=\"iconfont mui-pull-right\">&#xe627;</i></li>'\n\n\t\t\t\t\t}else{\n\t\t\t\t\t node_str='<li class=\"mui-table-view-cell no-after-dotted '+class_str+'\" data-id=\"'+bg_list[i].id+'\">'+bg_list[i].name+'<i class=\"iconfont mui-pull-right\">&#xe63e;</i></li>'\n\t\n\t\t\t\t\t}\t\t\t\t\t\n\t\t\t\t\t$('.select-operation').append(node_str);\n\t\t\t\t\t};\n\t\t\t\t\t\n\t\t\t\t\t$('#drop1').find('span').html(data.subcat.name)\n\t\t\t\t\t$('#drop2').find('span').html(data.sort_type_obj.name);\n\t\t\t\t\t$('#drop3').find('span').html(data.city.name);\n\t\t\t\t\tset_document_title(data.subcat.name);\n\t\t\t\t\t\n\t\t\t\t\t//左边的开始菜单\n\t\t\t\t\tvar data=data.data;\n\t\t\t\t\tvar urlCurArr=[];// 当前红色图片;\n\t\t\t\t\tvar urlArr=[];// 灰色图片;\t\n\t\t\t\t\tfor(var x=0;x<data.length;x++){\n//\t\t\t\t\t\tconsole.log(1);\n\t\t\t\t\t\tvar left_str=$('<li cat-id='+data[x].id+'><span></span><p class=\"size-sm color-black\">'+data[x].name+'</p></li>');\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t$('.tab-vertical').append(left_str);\n\t\t\t\t\t\tleft_str.find('span').css({'background-image':\"url(\"+data[x].icon+\")\"});\n\t\t\t\t\t\turlCurArr.push(data[x].icon_active);\n\t\t\t\t\t\turlArr.push(data[x].icon);\n//\t\t\t\t\t\tconsole.log(urlArr);\n\t\t\t\t\t\t\n\t\t\t\t\t};\n\t\t\t\t\t$('.tab-vertical li').on('click',function(){\n\t\t\t\t\t\tvar index=$(this).index();\n\t\t\t\t\t\t$('.tab-vertical li').each(function(index,item){\n\t\t\t\t\t\t\t$(item).find('span').css({'background-image':\"url(\"+urlArr[index]+\")\"})\n\t\t\t\t\t\t})\n\t\t\t\t\t\t$(this).find('span').css({'background-image':\"url(\"+urlCurArr[index]+\")\"});\n\t\t\t\t\t\t\n\t\t\t\t\t})\n\t\t\t\t//左边结束\n\t\t\t\t\tbind_menu_click();\n\n\t\t\t\t\t//默认加载进来选择推荐\n\t\t\t\t\t$(\".tab-vertical li\").eq(0).trigger('click');\n\t\t\t\t\t$(\".tab-vertical li\").eq(0).addClass('active')\n\n\t\t\t\t\t\n\t\t\t\t},error:function(){\n\t\t\t\t\t\n\t\t\t\t}\n\t\t\t});\n\n}\n\nwindow.set_document_title = function (title) {\n\n\tdocument.title = title;\n\t// var $body = $('body');\n // // hack在微信等webview中无法修改document.title的情况\n // var $iframe = $('<iframe style=\"height:0;border:0\" src=\"/favicon.ico\" ></iframe>');\n // $iframe.on('load',function() {\n // setTimeout(function() {\n // $iframe.off('load').remove();\n // }, 0);\n // }).appendTo($body);\n}\n//列表接口\nvar has_more;\nvar offset=undefined;\n\t\t\t// 请求接口\n\t\t\tfunction getList(){\n\t\t\t\tif(offset){\n\t\t\t\t\tvar Url=\"http://\"+getHostName()+\"/user/item_list/?\"+jQuery.param(Common.UrlGet())+\"&offset=\"+offset\n\t\t\t\t}else{\n\t\t\t\t\tvar Url=\"http://\"+getHostName()+\"/user/item_list/?\"+jQuery.param(Common.UrlGet());\n\t\t\t\t}\t\t \t\t\t\n\t\t\t\t$.ajax({\n\t\t\t\t\txhrFields: {withCredentials: true},\n\t\t\t\t\ttype:\"get\",\n\t\t\t\t\turl:Url,\n\t\t\t\t\tdataType:'json',\n\t\t\t\t\tdata:{},\n\t\t\t\t\tsuccess:function(data){\n\t\t\t\t\t\thas_more=data.has_more;\n\t\t\t\t\t\toffset=data.offset;\n\t\t\t\t\t\tvar infos=data.infos;\n\t\t\t\t\t\tif (!has_more){\n\t\t\t\t\t\t\t$('.loader-inner').hide();\n\t\t\t\t\t\t};\n\t\t\t\t\t\tif(infos.length==0){\n\t\t\t\t\t\t\t$('.noCoupon').show();\n\t\t\t\t\t\t}else{\n\t\t\t\t\t\t\t$('.mui-card').show();\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor(var i=0;i<infos.length;i++){\n\t\t\t\t\t\t\tvar free='';\n\t\t\t\t\t\t\tif (!infos[i].has_fee) {\n\t\t\t\t\t\t\t\tfree='<img src=\"/static/user/free.png\" class=\"free\" alt=\"\" style=\"position: absolute;width:25px;height:25px;top:0;left:0;\" />'\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tvar str=$('<li class=\"mui-table-view-cell mui-media no-after-dotted\"><a href=\"/static/user/detail.html?item_id='+infos[i].id+'\"><img class=\"mui-media-object mui-pull-left\" src=\"'+infos[i].image+'\"><div class=\"mui-media-body\">'+infos[i].title+'<p class=\"mui-ellipsis\"><span>'+infos[i].hospital.name+'</span></p><p class=\"mui-ellipsis\">售价:<span class=\"color-red\">¥'+infos[i].price+'</span>医院价:<span class=\"line-throu\">¥'+infos[i].orig_price+'</span></p></div><div class=\"month-pay month-pay-sm\"><div class=\"tit\">月供</div><div class=\"num\"><p class=\"color-red\"><b>¥'+infos[i].period_money+'</b><span>x '+infos[i].period_count+'</span></p></div></div></a>'+free+'</li>');\n\t\t\t\t\t\t\tif(i==infos.length-1){\n\t\t\t\t\t\t\t\tstr.removeClass('no-after-dotted');\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t$('.item-list').append(str);\n\t\t\t\t\t\t}\t\t\t\t\t\t\t\n\t\t\t\t\t},\n\t\t\t\t\terror:function(){\n\t\t\t\t\t\t//alert('网络出现小差,请稍后再试');\n\t\t\t\t\t}\n\t\t\t\t});\t\n}\nget_filters(getList);\n\n\t\t\t//获取滚动条当前的位置 \n\t\t\tfunction getScrollTop() { \n\t\t\t\tvar scrollTop = 0; \n\t\t\t\tif (document.documentElement && document.documentElement.scrollTop) { \n\t\t\t\t\tscrollTop = document.documentElement.scrollTop; \n\t\t\t\t} \n\t\t\t\telse if (document.body) { \n\t\t\t\t\tscrollTop = document.body.scrollTop; \n\t\t\t\t} \n\t\t\t\treturn scrollTop; \n\t\t\t} \n\t\t\t\n\t\t\t//获取当前可是范围的高度 \n\t\t\tfunction getClientHeight() { \n\t\t\t\tvar clientHeight = 0; \n\t\t\t\tif (document.body.clientHeight && document.documentElement.clientHeight) { \n\t\t\t\t\tclientHeight = Math.min(document.body.clientHeight, document.documentElement.clientHeight); \n\t\t\t\t} \n\t\t\t\telse { \n\t\t\t\t\tclientHeight = Math.max(document.body.clientHeight, document.documentElement.clientHeight); \n\t\t\t\t} \n\t\t\t\treturn clientHeight; \n\t\t\t} \n\t\t\t\n\t\t\t//获取文档完整的高度 \n\t\t\tfunction getScrollHeight() { \n\t\t\t\treturn Math.max(document.body.scrollHeight, document.documentElement.scrollHeight); \n\t\t\t};\n\n\t\t\twindow.onscroll = function () { \n\t\t\t\tif (getScrollTop() + getClientHeight() == getScrollHeight()) { \n\t\t\t\t\tif(has_more){\t\t\t\t\t\t\n\t\t\t\t\t\tsetTimeout(getList(),1000)\t\t\t\t\t\t\n\t\t\t\t\t}else{\n\t\t\t\t\t\t$('.tit').show()\n\t\t\t\t\t}\n\n\t\t\t\t} \n\t\t\t} \n\n\n$(document).ready(function(){\n bind_menu_click();\n $('#drop-list1').addClass('is-dis')\n $('#right-side-menu').addClass('is-dis')\n});\n\n\n" }, { "alpha_fraction": 0.6182519197463989, "alphanum_fraction": 0.682519257068634, "avg_line_length": 26.785715103149414, "blob_id": "f62061c05c25c80d7a2482f4f24f60dd1905aa3c", "content_id": "9f8eaf37e26eb85967a6e667e99907901b69f22d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 778, "license_type": "no_license", "max_line_length": 91, "num_lines": 28, "path": "/migrations/versions/3642e05a5117_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3642e05a5117\nRevises: 37321f4a22b3\nCreate Date: 2015-12-25 14:40:56.163993\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3642e05a5117'\ndown_revision = '37321f4a22b3'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('promoter', sa.Column('dup_count', sa.Integer(), nullable=True))\n op.create_index(op.f('ix_promoter_dup_count'), 'promoter', ['dup_count'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_promoter_dup_count'), table_name='promoter')\n op.drop_column('promoter', 'dup_count')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6238159537315369, "alphanum_fraction": 0.6562922596931458, "avg_line_length": 25.39285659790039, "blob_id": "b104eeb534c421573706a9ce2516b23d1020b044", "content_id": "10812e9f59b1887b0d80577ea685edb726aca8d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 739, "license_type": "no_license", "max_line_length": 85, "num_lines": 28, "path": "/migrations/versions/2aa3dc20f05b_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 2aa3dc20f05b\nRevises: 53a9d06e37ce\nCreate Date: 2015-10-31 14:18:51.577349\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2aa3dc20f05b'\ndown_revision = '53a9d06e37ce'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('item', sa.Column('item_no', sa.String(length=100), nullable=True))\n op.create_index(op.f('ix_item_item_no'), 'item', ['item_no'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_item_item_no'), table_name='item')\n op.drop_column('item', 'item_no')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5816448926925659, "alphanum_fraction": 0.5948198437690735, "avg_line_length": 33.0169792175293, "blob_id": "d643840e0c602f84e1f9c0aa7886fd55ead8eba2", "content_id": "08cbacb78dde709853b6646064d9518ec83f9c62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20512, "license_type": "no_license", "max_line_length": 129, "num_lines": 589, "path": "/manage.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n'''\n创建管理员\npython manage.py create_admin --name=用户名 --passwd=密码\n\n支付成功\npython manage.py mock_pay --order_id=1\n\n创建假用户\npython manage.py create_fake_user --start=10000009000\n\n检查重复注册用户\npython manage.py find_duplicate_reg\n\n导出推广人员数据\npython manage.py download_csv --promoter_id=1\n\n恢复推广数据\npython manage.py recover_promoter_count\n\n'''\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\nfrom flask import Flask\n\nfrom flask.ext.script import Manager\nfrom flask.ext.migrate import Migrate, MigrateCommand\n\nfrom models import app\nfrom models import db\nfrom ops.item import ItemService\nfrom ops.data import DataService\nfrom ops.admin import AdminService\n\n\nmigrate = Migrate(app, db)\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\n\n\n\[email protected]\ndef create_coupons():\n ''' 创建优惠券 '''\n from ops.coupon import CouponService\n effective = 86400*30\n item_id = None\n sub_cat_id = None\n price = 100\n cat = 0\n #通用优惠券\n print CouponService.create_coupon(cat, '通用代金券', price, effective, item_id, sub_cat_id)\n #分类优惠券\n cat = 6\n print CouponService.create_coupon(cat, '空腔分类代金券', price, effective, item_id, sub_cat_id)\n #子类优惠券 以牙齿为例 sub_cat_id 13\n cat = 6\n sub_cat_id = 13\n print CouponService.create_coupon(cat, '牙齿小类代金券', price, effective, item_id, sub_cat_id)\n #项目优惠券 冷光牙齿美白\n item_id = 8\n print CouponService.create_coupon(cat, '指定项目代金券', price, effective, item_id, sub_cat_id)\n\n\[email protected]\ndef create_cats():\n types = [\n {\n 'name':'眼部', 'cats': [{'name': '双眼皮'},{'name': '单眼皮'}]\n },\n {\n 'name':'鼻部', 'cats': [{'name': '扁'},{'name': '平'}]\n },\n {\n 'name':'牙齿', 'cats': [{'name': '白'},{'name': '亮'}]\n },\n ]\n for parent in types:\n name = parent['name']\n cat_id = ItemService.create_cat(name)\n for i in parent['cats']:\n sub_cat_name = i['name']\n ItemService.create_sub_cat(cat_id, sub_cat_name, '', '水白白白亮')\n return types\n\n\[email protected]\ndef create_helps():\n ''' 帮助条目 '''\n from ops.data import DataService\n from setting.help import cats\n from setting.help import questions\n for cat in cats:\n DataService.create_help_cat(cat['id'], cat['name'])\n for question in questions:\n DataService.create_help_entry(question['cat_id'], question['title'], question['content'])\n\[email protected]\ndef create_period_choices():\n from ops.credit import CreditService\n choices = [\n {'id':1, 'period_count':3, 'period_fee': 0.01},\n {'id':2, 'period_count':6, 'period_fee': 0.03},\n {'id':3, 'period_count':12, 'period_fee': 0.05},\n {'id':4, 'period_count':18, 'period_fee': 0.07}, \n ]\n for i in choices:\n CreditService.create_period_choice(**i)\n\[email protected]\ndef create_admin(name=None, passwd=None):\n print name, passwd\n AdminService.create_admin(name, passwd)\n\n\ndef crawl_schools(page=1, head=True):\n ''' 抓取高校信息chrome xpath $x() '''\n from lxml import html\n import requests\n data = {\n 'ddlProvince' : 0,\n '__VIEWSTATE' : '/wEPDwUKMTE2NjY0ODc3Nw9kFgICAw9kFgQCAQ8QDxYGHg1EYXRhVGV4dEZpZWxkBQxQcm92aW5jZU5hbWUeDkRhdGFWYWx1ZUZpZWxkBQpQcm92aW5jZUlEHgtfIURhdGFCb3VuZGdkEBUjBuWFqOmDqAblm5vlt50G6YeN5bqGBuWMl+S6rAblpKnmtKUG5LiK5rW3Buays+WMlwblsbHopb8J5YaF6JKZ5Y+kBui+veWugQblkInmnpcJ6buR6b6Z5rGfBuaxn+iLjwbmtZnmsZ8G5a6J5b69Buemj+W7ugbmsZ/opb8G5bGx5LicBuays+WNlwbmuZbljJcG5rmW5Y2XBuW5v+S4nAblub/opb8G5rW35Y2XBui0teW3ngbkupHljZcG6KW/6JePBumZleilvwbnlJjogoMG6Z2S5rW3BuWugeWkjwbmlrDnloYG6aaZ5rivBua+s+mXqAblhbblroMVIwEwATEBMgEzATQBNQE2ATcBOAE5AjEwAjExAjEyAjEzAjE0AjE1AjE2AjE3AjE4AjE5AjIwAjIxAjIyAjIzAjI0AjI1AjI2AjI3AjI4AjI5AjMwAjMxAjMyAjMzAjM0FCsDI2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZGQCCQ8PFgIeC1JlY29yZGNvdW50AsMQZGQYAQUeX19Db250cm9sc1JlcXVpcmVQb3N0QmFja0tleV9fFgEFCWlidFNlYXJjaL9QO6naEBBAIEeLnXliOHBov+rx',\n '__VIEWSTATEGENERATOR' : 'C818AE9B',\n '__EVENTVALIDATION' : '/wEWKgLZ4pbwBgLGu9ryBwLZu9ryBwLYu9ryBwLbu9ryBwLau9ryBwLdu9ryBwLcu9ryBwLfu9ryBwLOu9ryBwLBu9ryBwLZu5rxBwLZu5bxBwLZu5LxBwLZu67xBwLZu6rxBwLZu6bxBwLZu6LxBwLZu77xBwLZu/ryBwLZu/byBwLYu5rxBwLYu5bxBwLYu5LxBwLYu67xBwLYu6rxBwLYu6bxBwLYu6LxBwLYu77xBwLYu/ryBwLYu/byBwLbu5rxBwLbu5bxBwLbu5LxBwLbu67xBwLbu6rxBwLa2vviBQLN0qSmAwLA15+mCwK1que1CgLYn9b4BgL6wv3hDD8SCUUQtAvvVBm+fe4GVrXDbLVb',\n '__EVENTTARGET' : 'collegeListPager$lbtnNext',\n 'collegeListPager$btGo' : 'GO',\n 'collegeListPager$tbCurrentPage': page,\n }\n response = requests.post('http://uhmajor.cdzk.net/college.aspx', data)\n tree = html.fromstring(response.text)\n province_select = tree.xpath('//*[@id=\"ddlProvince\"]') #省份选项\n for i in province_select[0].getchildren():\n if head: print i.text\n\n pager = tree.xpath('//*[@id=\"collegeListPager_lbTotalPage\"]') #省份选项\n page_count = int(pager[0].text)\n\n if head:\n print '-'*80\n print '共{}页'.format(page_count)\n print '-'*80\n for i in range(1, page_count+1):\n crawl_schools(i, False)\n\n if not head:\n rows = tree.xpath('//*[@id=\"right\"]/table')\n print response.text\n print page, rows, 'rows'\n for i in rows[0].getchildren()[1:]:\n childs = i.getchildren()\n name, location, link, remark = childs\n name = (name.text or '').decode('utf8').strip()\n location= (location.getchildren()[0].text or '').decode('utf8').strip()\n link = (link.getchildren()[0].text or '').decode('utf8').strip()\n remark = ''\n print name, location, link, remark\n DataService.create_school(name, link, location)\n return province_select\n\n\[email protected]\ndef mock_pay(order_id=None):\n ''' 直接命令行使支付成功 '''\n import os\n if os.environ.get('APP_DEV')=='production': return\n from ops.order import OrderService\n from user.views import pay_success_action\n order = OrderService.get_order_by_id(order_id)\n if not order:\n print '订单不存在'\n return\n need_pay = not order.price and order.credit_verified\n pay_success_action(order, need_pay=need_pay)\n\n\[email protected]\ndef md5_user_passwd():\n ''' md5保存用户名密码 '''\n from models import *\n from util.utils import md5_str\n users = User.query.all()\n for user in users:\n if len(user.passwd)!=32:\n passwd = md5_str(user.passwd)\n User.query.filter(User.id==user.id).update({'passwd':passwd})\n db.session.commit()\n\n\[email protected]\ndef create_fake_user(start=10000009000, limit=20):\n ''' 创建用户 '''\n from ops.user import UserService\n from ops.promote import PromoteService\n from util.utils import md5_str\n start = int(start)\n for i in range(int(limit)):\n phone = start + i\n print phone\n user_id = UserService.create_user(phone, phone, md5_str('meifenfen'))\n if user_id: PromoteService.add_fakeuser(user_id)\n\n\[email protected]\ndef add_imgs_width():\n ''' 设置图片宽高 '''\n from ops.item import ItemService\n from ops.data import DataService\n from util.utils import get_img_key\n has_more, infos = ItemService.get_paged_recommend_items()\n while infos:\n offset = infos[-1]['id']\n for info in infos:\n DataService.set_img_size(get_img_key(info['image']))\n has_more, infos = ItemService.get_paged_recommend_items(offset=offset)\n\n\[email protected]\ndef check_promoter_detail(promoter_id=None):\n ''' '''\n from sqlalchemy import func\n from sqlalchemy import and_\n from models import *\n sub_promoters = Promoter.query.filter(Promoter.create_by==promoter_id).all()\n total = 0\n for p in sub_promoters:\n qrcodes = Qrcode.query.filter(Qrcode.promoter_id==p.id).all()\n qrcode_ids = [q.id for q in qrcodes]\n query = and_(\n QrCodeUser.qrcode_id.in_(qrcode_ids),\n QrCodeUser.user_id>0\n )\n count = db.session.query(func.count(QrCodeUser.id)).filter(query).first()[0]\n print p.name, count\n total += count\n print 'total', total\n\[email protected]\ndef set_user_first_location():\n from models import db\n from models import QrCodeUser\n from ops.promote import PromoteService\n from models import WechatLocation\n rows=db.session.query(WechatLocation.open_id, WechatLocation.lng, WechatLocation.lat).group_by(WechatLocation.open_id).all()\n for row in rows:\n open_id = row[0]\n lng = row[1]\n lat = row[2]\n PromoteService.set_first_location(open_id, lng, lat)\n\n'''\n>>> '{0: <5}'.format('ss')\n'ss '\n>>> '{0: <5}'.format('sss')\n'sss '\n>>> '{0: <5}'.format('ssss')\n'ssss '\n>>> '{0: <5}'.format('sssss')\n'sssss'\n'''\[email protected]\ndef find_duplicate_reg():\n from models import *\n from sqlalchemy import func\n from collections import defaultdict\n rows = db.session.query(WechatReg.open_id, func.count(WechatReg.id)).group_by(WechatReg.open_id)\n duplicates = []\n for i in rows:\n if i[1]>1: duplicates.append(i)\n print '{0: <5} {1: <10} {2: <11} {3: <25} {4: <20} {5: <3}'.format('id', '用户名', '手机号', '创建时间', 'open_id', '注册数')\n total = 0\n data = defaultdict(lambda:0)\n for i in duplicates:\n reg_count = i[1]\n open_id = i[0]\n qrcodeuser = QrCodeUser.query.filter(QrCodeUser.open_id==open_id).first()\n qrcode_id = qrcodeuser.qrcode_id\n if qrcode_id:\n qrcode = Qrcode.query.filter(Qrcode.id==qrcode_id).first()\n promoter_id = qrcode.promoter_id\n print promoter_id, 'promoter_id', i[1]\n data[promoter_id]+=i[1]-1\n total += i[1]-1\n user_id = qrcodeuser.user_id\n user = User.query.filter(User.id==user_id).first()\n format_str = '{0: <5} {1: <10} {2: <11} {3: <25} {4: <20} {5: <3}'.format(\n user.id, user.name, user.phone, str(user.create_time)[:16], i[0], i[1])\n print format_str\n print 'total', total\n #return \n data_admin = defaultdict(lambda:0)\n for k, v in data.items():\n Promoter.query.filter(Promoter.id==k).update({'dup_count': v})\n db.session.commit()\n promoter = Promoter.query.filter(Promoter.id==k).first()\n parent_id = promoter.create_by\n if parent_id:\n data_admin[parent_id]+=v\n for k,v in data_admin.items():\n promoter = Promoter.query.filter(Promoter.id==k).first()\n print promoter.id, promoter.name, promoter.phone, v\n\ndef write_to_csv():\n import csv\n schools = DataService.get_schools()\n with open('schools.csv', 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for i in schools:\n writer.writerow([i.name, i.city_name, i.link])\n\n\n\[email protected]\ndef download_csv(promoter_id=None, output_file=''):\n from models import *\n from sqlalchemy import and_\n from ops.promote import PromoteService\n from ops.bulks import fetch_user_refs\n promoters = Promoter.query.filter(Promoter.create_by==promoter_id).all()\n promoter_ids= [i.id for i in promoters]\n qrcodes = Qrcode.query.filter(Qrcode.promoter_id.in_(promoter_ids)).all()\n qr_code_ids = [i.id for i in qrcodes]\n query = and_(\n QrCodeUser.user_id>0,\n QrCodeUser.qrcode_id.in_(qr_code_ids)\n )\n qrcode_user = QrCodeUser.query.filter(query).all()\n qrcode_user_dict_list = [i.as_dict() for i in qrcode_user]\n fetch_user_refs(qrcode_user_dict_list)\n print len(qrcode_user_dict_list)\n import csv\n with open('/tmp/meifenfen/export/promoter_users_{}.csv'.format(promoter_id), 'w') as csvfile:\n for i in qrcode_user_dict_list:\n user_name = i['user']['name']\n create_time = str(i['user']['create_time'])\n user_sex = i['sex'] or ''\n user_id = i['user']['id']\n user_phone = i['user']['phone']\n user_location = i['location'] or ''\n if user_sex==1:\n user_sex = '男'\n elif user_sex==2:\n user_sex = '女'\n print user_id, user_name, user_sex, user_phone, user_location, create_time\n promoter_map = PromoteService.get_qrcodeusers_by_open_ids(([i['open_id']]))\n promoter = promoter_map[i['open_id']]['promoter']\n parent = promoter_map[i['open_id']]['parent']\n print promoter, parent\n writer = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n promoter_name = promoter['name']\n promoter_admin_name = parent['name']\n writer.writerow([\n user_id, user_name, user_sex, user_phone,\n user_location, promoter_name, promoter_admin_name, create_time\n ])\n\n print len(qrcode_user_dict_list)\n\n\[email protected]\ndef recover_promoter_count():\n from models import *\n from sqlalchemy import func\n from sqlalchemy import and_\n Promoter.query.update({\"follow_count\":0,\"reg_count\":0, \"unfollow_count\": 0})\n db.session.commit()\n rows = db.session.query(QrCodeUser.qrcode_id, func.count(QrCodeUser.open_id)).group_by(QrCodeUser.qrcode_id)\n for i in rows:\n promoter = Qrcode.query.filter(Qrcode.id==i[0]).first()\n if promoter:\n print promoter.promoter_id\n Promoter.query.filter(Promoter.id==promoter.promoter_id).update({'follow_count':Promoter.follow_count+i[1]})\n db.session.commit()\n query = QrCodeUser.user_id>0\n rows = db.session.query(QrCodeUser.qrcode_id, func.count(QrCodeUser.open_id)).filter(query).group_by(QrCodeUser.qrcode_id)\n for i in rows:\n promoter = Qrcode.query.filter(Qrcode.id==i[0]).first()\n if promoter:\n print promoter.promoter_id\n Promoter.query.filter(Promoter.id==promoter.promoter_id).update({'reg_count':Promoter.reg_count+i[1]})\n db.session.commit()\n\n query = and_(\n QrCodeUser.user_id>0,\n QrCodeUser.status==0\n )\n rows = db.session.query(QrCodeUser.qrcode_id, func.count(QrCodeUser.open_id)).filter(query).group_by(QrCodeUser.qrcode_id)\n for i in rows:\n promoter = Qrcode.query.filter(Qrcode.id==i[0]).first()\n if promoter:\n print promoter.promoter_id\n Promoter.query.filter(Promoter.id==promoter.promoter_id).update({'unfollow_count':Promoter.unfollow_count+i[1]})\n db.session.commit()\n\n\n\[email protected]\ndef dump_edit_log():\n from models import EditNameLog\n import csv\n rows = EditNameLog.query.all()\n with open('/tmp/edit_name.csv', 'w') as csvfile:\n for i in rows:\n writer = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerow([\n i.id, i.user_id, i.create_time\n ])\n\[email protected]\ndef recover_edit_name():\n import csv\n from models import EditNameLog\n reader = csv.reader(file('edit_name.csv', 'rb'))\n for line in reader:\n print line\n log = EditNameLog(id=line[0], user_id=line[1], create_time=line[2])\n db.session.add(log)\n db.session.commit()\n\n\[email protected]\ndef clear_invalid_counter(phone=None):\n ''' 清空用户错误短信码计数 '''\n from ops.cache import get_today_timestamp\n from ops.cache import cache\n from ops.cache import InvalidUserPasswdCache\n print InvalidUserPasswdCache.clear_today_counter(phone)\n\n\[email protected]\ndef import_school_from_csv():\n import csv\n from ops.data import DataService\n reader = csv.reader(file('schools.csv', 'rb'))\n for line in reader:\n name, city_name, link = line[:3]\n DataService.create_school(name, link, city_name)\n\n\[email protected]\ndef reset_user_question_money():\n ''' '''\n from ops.redpack import RedpackService\n money_map = RedpackService.total_money_group_by_question([])\n from models import db\n from models import RedpackUserQuestion\n for user_question_id, money in money_map.items():\n RedpackUserQuestion.query.filter(RedpackUserQuestion.id==user_question_id).update({'money': money})\n db.session.commit()\n\n\[email protected]\ndef reset_room_vote():\n from ops.cache import RoomDesignVoteCounter\n from models import *\n from sqlalchemy import func\n RoomDesignVoteCounter.reset()\n rooms = RoomDesignDetail.query.order_by(RoomDesignDetail.id.desc()).all()\n max_no = db.session.query(func.max(RoomDesignDetail.apply_no)).scalar() or 1000000\n RoomDesignVoteCounter.incr_apply_no(int(max_no))\n for room in rooms:\n RoomDesignVoteCounter.init(room.id)\n RoomDesignVoteCounter.incr(room.id, room.vote_count)\n RoomDesignVoteCounter.add_score(room.vote_count)\n RoomDesignVoteCounter.add_score(0)\n\n\[email protected]\ndef add_priviledge():\n ''' 之前已经通过额度申请的投票权限 '''\n from models import CreditApply\n from constants import CREDIT_STATUS\n from ops.cache import RoomDesignVoteCounter\n from ops.room_design import RoomDesignService\n result = CreditApply.query.all()\n for apply in result:\n if apply.status!=3: continue\n RoomDesignService.add_user_vote_privilege(apply.user_id, 1)\n\[email protected]\ndef get_unfollowed():\n ''' 取消关注用户列表 '''\n from app import app\n from thirdparty.wechat import wechat\n from models import QrCodeUser\n from ops.promote import PromoteService\n users = db.session.query(QrCodeUser.id).order_by(QrCodeUser.id.desc()).all()\n for user_id in users:\n #if user_id[0]>3779: continue\n i = QrCodeUser.query.filter(QrCodeUser.id==user_id[0]).first()\n print i.id, '-'*80, i.open_id\n if len(i.open_id or '')<10: continue\n #info = wechat.get_user_info(i.open_id)\n PromoteService.set_user_sex(i.open_id)\n continue\n if not info['subscribe']:\n print i.open_id\n if i.nickname:PromoteService.set_wechat_status(i.open_id, 0)\n\[email protected]\ndef init_prize():\n ''' 初始化抽奖 '''\n from models import RdMoneyPrize\n from models import db\n prize_types = [\n {'id':1, 'amount':1, 'total':700},\n {'id':2, 'amount':2, 'total':200},\n {'id':3, 'amount':3, 'total':50},\n {'id':4, 'amount':5, 'total':30},\n {'id':5, 'amount':10, 'total':20},\n {'id':6, 'amount':30, 'total':0},\n {'id':7, 'amount':50, 'total':0},\n {'id':8, 'amount':100, 'total':0},\n ]\n for i in prize_types:\n prize = RdMoneyPrize(**i)\n db.session.add(prize)\n db.session.commit()\n\n\n\[email protected]\ndef export_room_sms():\n ''' 到处发短信用户 '''\n import csv\n from models import *\n from thirdparty.sms import send_room_one\n from thirdparty.sms import send_room_two\n no_vote_set = set()\n vote_set = set()\n old_user_set = set()\n rooms = RoomDesignDetail.query.all()\n for i in rooms:\n user = User.query.filter(User.id==i.user_id).first()\n if (user.phone or '').startswith('1000000'): continue\n if i.vote_count==0:\n no_vote_set.add(user.phone)\n else:\n vote_set.add(user.phone)\n users = User.query.all()\n for user in users:\n if (user.phone or '').startswith('1000000'): continue\n if user.phone in vote_set or user.phone in no_vote_set: continue\n old_user_set.add(user.phone)\n\n #return\n \n with open('vote_set.csv', 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for index, i in enumerate(sorted(vote_set)):\n print 'send vote set....', index, i ;\n #send_room_two(i)\n continue\n writer.writerow([i])\n with open('no_vote_set.csv', 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for index, i in enumerate(sorted(no_vote_set)):\n print 'send no vote set....', index, i ;\n #send_room_two(i)\n continue\n writer.writerow([i])\n with open('old_user_set.csv', 'w') as csvfile:\n import time\n writer = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for index, i in enumerate(sorted(old_user_set)):\n if i<='13817485053':\n print 'small continue'\n continue\n time.sleep(0.1)\n print 'send old user set....', index, i ;\n send_room_one(i)\n continue\n writer.writerow([i])\n\n print len(no_vote_set), len(old_user_set), len(vote_set)\n\n\n\nif __name__ == '__main__':\n manager.run()\n\n\n" }, { "alpha_fraction": 0.6724738478660583, "alphanum_fraction": 0.6742160320281982, "avg_line_length": 37.29999923706055, "blob_id": "f4cbe252a3ad9c27d84d9b8bab452824e9f5c9c3", "content_id": "71fbc6b0d0ea1778e5830749d76db8de708f638d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1148, "license_type": "no_license", "max_line_length": 105, "num_lines": 30, "path": "/promote/urls.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import request\nfrom flask import Blueprint, render_template, abort\nfrom jinja2 import TemplateNotFound\n\nfrom promote.views import index\nfrom promote.views import login\nfrom promote.views import create_promoter\nfrom promote.views import get_promoter_list\nfrom promote.views import login_post\nfrom promote.views import logout\nfrom promote.views import del_promoter\n\n\npromote_api = Blueprint('promote_api', __name__,\n template_folder='templates')\n\n\npromote_api.add_url_rule('/', 'get_promoter_list', get_promoter_list)\n\npromote_api.add_url_rule('/login/', 'login', login)\npromote_api.add_url_rule('/logout/', 'logout', logout)\npromote_api.add_url_rule('/create_promoter/', 'create_promoter', create_promoter, methods=['POST','GET'])\n\npromote_api.add_url_rule('/get_promoter_list/', 'get_promoter_list', get_promoter_list)\n\npromote_api.add_url_rule('/login_post/', 'login_post', login_post, methods=['GET', 'POST'])\n\npromote_api.add_url_rule('/del_promoter/', 'del_promoter', del_promoter, methods=['GET', 'POST'])" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5727391839027405, "avg_line_length": 27.538461685180664, "blob_id": "cd863ae5acaab5558d285e36a66ebb9604f34c1c", "content_id": "53eb564bdc8b9f2034a26860b84864d0b61be80a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1538, "license_type": "no_license", "max_line_length": 87, "num_lines": 52, "path": "/ops/admin.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\nfrom util.sqlerr import SQL_DUPLICATE_NAME\nfrom util.sqlerr import SQL_DUPLICATE_PHONE\n\nfrom models import db\nfrom ops.utils import get_items\nfrom models import AdminUser\nfrom models import UserAdvice\n\n\nclass AdminService(object):\n\n @staticmethod\n def check_admin(name, passwd):\n admin = AdminUser.query.filter(AdminUser.name==name).first()\n return admin and admin.passwd==passwd\n\n @staticmethod\n def create_admin(name, passwd, cat):\n try:\n admin = AdminUser(name=name, passwd=passwd, cat=cat)\n db.session.add(admin)\n db.session.commit()\n return admin.id\n except Exception as e:\n db.session.rollback()\n if SQL_DUPLICATE_NAME.search(str(e)):\n assert 0, '用户名已存在'\n else:\n import traceback\n traceback.print_exc()\n\n @staticmethod\n def get_admin(name):\n return AdminUser.query.filter(AdminUser.name==name).first()\n\n @staticmethod\n def update(where, **kw):\n ''' '''\n count = AdminUser.query.filter(where).update(kw, synchronize_session='fetch')\n db.session.commit()\n return count\n\n @staticmethod\n def remark_useradvice(advice_id, **kw):\n ''' '''\n where = UserAdvice.id==advice_id\n count = UserAdvice.query.filter(where).update(kw)\n db.session.commit()\n return count\n \n \n \n \n \n \n \n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6331126093864441, "alphanum_fraction": 0.6794701814651489, "avg_line_length": 25.964284896850586, "blob_id": "e46d861c8c28fb07df48287d4deb5a3875664892", "content_id": "f9ae8d68e5d61186af5ea6c84b978cff60d3dff1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 755, "license_type": "no_license", "max_line_length": 91, "num_lines": 28, "path": "/migrations/versions/3f2769d4ed16_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3f2769d4ed16\nRevises: 48dc9132c95d\nCreate Date: 2016-01-07 14:45:40.525524\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3f2769d4ed16'\ndown_revision = '48dc9132c95d'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('beauty_entry', sa.Column('items', sa.String(length=100), nullable=True))\n op.add_column('beauty_entry', sa.Column('photo', sa.String(length=100), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('beauty_entry', 'photo')\n op.drop_column('beauty_entry', 'items')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6297297477722168, "alphanum_fraction": 0.6648648381233215, "avg_line_length": 25.428571701049805, "blob_id": "8c191fe338b7f159911ecb00c8a072db957a1da6", "content_id": "085f5c90c5e9bc8839e4a94ad87b884c0afb24b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 740, "license_type": "no_license", "max_line_length": 94, "num_lines": 28, "path": "/migrations/versions/17354ab277c6_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 17354ab277c6\nRevises: 1a31cb75f567\nCreate Date: 2015-12-18 22:43:45.517134\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '17354ab277c6'\ndown_revision = '1a31cb75f567'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('qr_code_user', sa.Column('lnglat', sa.String(length=100), nullable=True))\n op.add_column('qr_code_user', sa.Column('location', sa.String(length=100), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('qr_code_user', 'location')\n op.drop_column('qr_code_user', 'lnglat')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5766910314559937, "alphanum_fraction": 0.5871705412864685, "avg_line_length": 28.70754623413086, "blob_id": "a529e5aefa3cfa7793092b74e70bdb4a12183ffb", "content_id": "a81850a604de09601d5bd6cde197e53e98b6380b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3163, "license_type": "no_license", "max_line_length": 98, "num_lines": 106, "path": "/udp_server.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom errno import EWOULDBLOCK, EAGAIN\nimport logging\nimport os\nimport socket\n\nfrom tornado.ioloop import IOLoop\nfrom tornado.netutil import set_close_exec\n\n\nfrom logging.handlers import RotatingFileHandler\nfrom settings import LOG_FILE_NAME\nfrom settings import LOG_PORT\n\ndef create_client():\n ''' '''\n udp_sock =socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return udp_sock\n\nudp_sock = create_client()\ndef send_msg(msg):\n udp_sock.sendto(msg, ('localhost', LOG_PORT))\n\n\n#----------------------------------------------------------------------\ndef create_logger(path):\n \"\"\" 创建logger \"\"\"\n logger = logging.getLogger(\"api\")\n logger.setLevel(logging.INFO)\n logging.Formatter('%(message)s')\n handler = RotatingFileHandler(path, maxBytes=1024*1024*1024,\n backupCount=1000)\n logger.addHandler(handler)\n \n return logger\n\nlogger = create_logger(LOG_FILE_NAME)\nlogger.propagate=0 #不打印log出来\n\n\nclass UDPServer(object):\n def __init__(self, name, port, on_receive, address=None, family=socket.AF_INET, io_loop=None):\n self.io_loop = io_loop or IOLoop.instance()\n self._on_receive = on_receive\n self._sockets = []\n\n flags = socket.AI_PASSIVE\n\n if hasattr(socket, \"AI_ADDRCONFIG\"):\n flags |= socket.AI_ADDRCONFIG\n\n # find all addresses to bind, bind and register the \"READ\" callback\n for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_DGRAM, 0, flags)):\n af, sock_type, proto, canon_name, sock_addr = res\n self._open_and_register(af, sock_type, proto, sock_addr)\n\n print('Started')\n\n def _open_and_register(self, af, sock_type, proto, sock_addr):\n sock = socket.socket(af, sock_type, proto)\n set_close_exec(sock.fileno())\n if os.name != 'nt':\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setblocking(0)\n\n print('Binding to %s...', repr(sock_addr))\n sock.bind(sock_addr)\n\n def read_handler(fd, events):\n while True:\n try:\n data, address = sock.recvfrom(65536)\n except socket.error as e:\n if e.args[0] in (EWOULDBLOCK, EAGAIN):\n return\n raise\n self._on_receive(data, address)\n\n self.io_loop.add_handler(sock.fileno(), read_handler, IOLoop.READ)\n self._sockets.append(sock)\n\n def stop(self):\n print('Closing %d socket(s)...', len(self._sockets))\n for sock in self._sockets:\n self.io_loop.remove_handler(sock.fileno())\n sock.close()\n\n\ndef custom_on_receive(data, address):\n logger.info(data)\n\n\ndef main():\n server = UDPServer('meifenfen_api_logger_on_8008', LOG_PORT, on_receive=custom_on_receive)\n\n# def done(*args):\n# print args\n# for stoppable in args:\n# stoppable.stop()\n# IOLoop.instance().call_later(10, done, server, IOLoop.instance())\n\n IOLoop.instance().start()\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 14.833333015441895, "blob_id": "a08a79d903375420713982f9b0233d3cd1089cdf", "content_id": "ead6054db855ade14df9675a40a89f7a48525179", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 96, "license_type": "no_license", "max_line_length": 22, "num_lines": 6, "path": "/uwsgi_nginx.ini", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "[uwsgi]\nsocket=127.0.0.1:10001\nwsgi=app:app\nprocesses = 5\nlogdate = true\nlogto = /tmp/user.log\n\n" }, { "alpha_fraction": 0.6404833793640137, "alphanum_fraction": 0.6903323531150818, "avg_line_length": 23.518518447875977, "blob_id": "83c36aeaf9913c1756ea52edaefcb849c7e2a020", "content_id": "e3978f54a14c83b20c0117bd11ea64244538f9c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 662, "license_type": "no_license", "max_line_length": 115, "num_lines": 27, "path": "/migrations/versions/4eefa5b6eb51_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4eefa5b6eb51\nRevises: 55f4c256c989\nCreate Date: 2015-11-28 10:09:38.336732\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4eefa5b6eb51'\ndown_revision = '55f4c256c989'\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport models\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('pay_log_order_no', sa.Column('total', models.MoneyField(precision=10, scale=2), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('pay_log_order_no', 'total')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6446104645729065, "alphanum_fraction": 0.6456776857376099, "avg_line_length": 20.465116500854492, "blob_id": "f3d15ae9e7fffaade27ac65ba5eba92c62f61feb", "content_id": "13fd1272085ba3170be6c53e03ecc1aca8d3fc5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 937, "license_type": "no_license", "max_line_length": 58, "num_lines": 43, "path": "/demo.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_restful import Resource, Api\n\napp = Flask(__name__)\napi = Api(app)\n\nclass HelloWorld(Resource):\n def get(self):\n return {'hello': 'world'}\n\n\nfrom flask import Blueprint, render_template, abort\nfrom jinja2 import TemplateNotFound\nfrom thirdparty.wechat import wechat\n\nserver_verify = Blueprint('server_verify', __name__,\n template_folder='templates')\n\napp = Flask(__name__)\napi_bp = Blueprint('api', __name__)\napi = Api(api_bp)\n\nclass TodoItem(Resource):\n def get(self, id):\n return {'task': 'Say \"Hello, World!\"'}\n\napi.add_resource(TodoItem, '/todos/<int:id>')\napp.register_blueprint(api_bp)\n\napi.add_resource(HelloWorld, '/')\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n\nfrom flask_inputs import Inputs\nfrom wtforms.validators import DataRequired\n\nclass CustomerInputs(Inputs):\n rule = {\n 'id': [DataRequired()]\n }\n \n \n " }, { "alpha_fraction": 0.6351039409637451, "alphanum_fraction": 0.6374133825302124, "avg_line_length": 43.410255432128906, "blob_id": "0ac22adc7e48bd03c3212f534bf7bd254fd651d4", "content_id": "74a3437ddf5cd18c634f62992a8cde1a4065eeb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1756, "license_type": "no_license", "max_line_length": 126, "num_lines": 39, "path": "/user/common.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nimport time\nimport json\nfrom itertools import chain\n\nfrom models import Order\nfrom models import UserCoupon\nfrom ops.order import OrderService\nfrom ops.coupon import CouponService\nfrom ops.credit import CreditService\n\nfrom constants import ORDER_STATUS\n\n\ndef cancel_order(order_id):\n ''' 取消已支付订单 '''\n order = OrderService.get_order_by_id(order_id)\n assert order, '订单不存在'\n count = 0\n if order.status in [ORDER_STATUS.NEW_ORDER, ORDER_STATUS.TO_PAY]:\n where = Order.status.in_([ORDER_STATUS.NEW_ORDER, ORDER_STATUS.TO_PAY])\n count = OrderService.update_order_status(order_id, ORDER_STATUS.CANCEL_BEFORE_PAY, order.user_id, where)\n if count:\n if order.credit_amount:\n CreditService.modify_credit(order.user_id, -(order.credit_amount))\n if order.coupon_id:\n CouponService.update_user_coupon_status(UserCoupon.id==order.coupon_id, 0)\n elif order.status==ORDER_STATUS.PAY_SUCCESS:\n where = Order.status==ORDER_STATUS.PAY_SUCCESS\n count = OrderService.update_order_status(order_id, ORDER_STATUS.CANCELED, order.user_id, where)\n if count:\n if order.credit_amount:\n repayment_amount = OrderService.order_repayment_logs_amount(order_id)\n remain_to_repayment = order.credit_amount - repayment_amount\n CreditService.modify_credit(order.user_id, -remain_to_repayment)\n CreditService.cancel_pay_logs(order_id)\n if order.coupon_id:\n CouponService.update_user_coupon_status(UserCoupon.id==order.coupon_id, 0)\n" }, { "alpha_fraction": 0.6242544651031494, "alphanum_fraction": 0.6590456962585449, "avg_line_length": 22.337209701538086, "blob_id": "07f2035ec9a4b31038cce141ec924a59b04c948b", "content_id": "0de4d2ae4d404c4d437de698a3536faefe8a7ec7", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 2118, "license_type": "no_license", "max_line_length": 62, "num_lines": 86, "path": "/thirdparty/sms.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport time\nimport hashlib\nfrom datetime import datetime\nimport random\nfrom functools import wraps\n\nfrom flask import request\nfrom flask import url_for\n\nfrom thirdparty.SendTemplateSMS import sendTemplateSMS\nimport settings\n\n\ndef today_remain_seconds():\n now = datetime.now()\n year, month, day = now.year, now.month, now.day\n now_second = time.mktime(now.timetuple())\n cut_now = datetime(year, month, day)\n cut_now_second = time.mktime(cut_now.timetuple())\n return 86400 - int(now_second-cut_now_second)\n\ndef gen_vcode():\n code = random.randrange(100000,999999)\n return str(code)\n\ndef gen_complex_vcode():\n code = random.randrange(10000000,99999999)\n return str(code)\n\n\ndef _send_sms(phone, data, tpl_id):\n try:\n #请求包格式无法解析错误 把unicode转为str\n phone = str(phone)\n for i in range(len(data)):\n data[i] = str(data[i])\n print \n print phone, data, tpl_id, '发送短信'\n result = sendTemplateSMS(phone, data, tpl_id)\n return result\n except:\n import traceback\n traceback.print_exc()\n\n\[email protected]\ndef send_sms(phone, vcode):\n print '发送注册短信', phone, vcode\n return _send_sms(phone, [vcode,5], 44515)\n\n\[email protected]\ndef send_sms_apply_success(phone, amount):\n print '发送审核通过短信'\n return _send_sms(phone, [amount], 44988)\n\n\[email protected]\ndef send_sms_apply_reject(phone):\n print '发送审核被拒短信'\n return _send_sms(phone, [], 44990)\n\n\[email protected]\ndef send_sms_new_order(phone, name, desc, service_code):\n print '下单短信'\n return _send_sms(phone, [name, desc, service_code], 44994)\n\n\[email protected]\ndef send_sms_refund(phone, name, price, period):\n print '退款短信'\n return _send_sms(phone, [name, price, period], 52093)\n\n\[email protected]\ndef send_room_one(phone):\n ''' 老用户 '''\n return _send_sms(phone, [], 71623)\n\n\[email protected]\ndef send_room_two(phone):\n ''' 拉票 '''\n return _send_sms(phone, [], 71638)\n\n\n\n\n\n" }, { "alpha_fraction": 0.62269127368927, "alphanum_fraction": 0.6701846718788147, "avg_line_length": 23.45161247253418, "blob_id": "eeadc418430a5e2f63909fcb09cb7945b20ba351", "content_id": "deb2d715c0a71ce5760ec81accb53cb4f604f16f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 758, "license_type": "no_license", "max_line_length": 63, "num_lines": 31, "path": "/migrations/versions/2eb48ce629a0_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 2eb48ce629a0\nRevises: d5045d5ecf4\nCreate Date: 2015-12-16 15:59:24.978212\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2eb48ce629a0'\ndown_revision = 'd5045d5ecf4'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('image_size',\n sa.Column('key', sa.String(length=32), nullable=False),\n sa.Column('width', sa.Integer(), nullable=True),\n sa.Column('height', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('key')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('image_size')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5062175393104553, "alphanum_fraction": 0.5181666016578674, "avg_line_length": 38.01438903808594, "blob_id": "9b8b775998c1ff7ee71b0d96b516a0ecbdd6ef81", "content_id": "92a78457cc5a0fc35cc94468071cf8d61e2a0c59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62381, "license_type": "no_license", "max_line_length": 110, "num_lines": 1529, "path": "/models.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n'''\n'''\nfrom flask import Flask\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext.script import Manager\nfrom flask.ext.migrate import Migrate, MigrateCommand\n\nfrom sqlalchemy import TypeDecorator\nfrom sqlalchemy import UniqueConstraint\nfrom sqlalchemy import PrimaryKeyConstraint\nfrom sqlalchemy.ext import mutable\nfrom sqlalchemy.sql.sqltypes import String\nfrom sqlalchemy.sql.sqltypes import Text\nfrom sqlalchemy.sql.sqltypes import Integer\nfrom sqlalchemy.sql.sqltypes import UnicodeText\nfrom sqlalchemy.sql.sqltypes import DateTime\nfrom sqlalchemy.sql.sqltypes import Float\nfrom sqlalchemy.sql.sqltypes import Boolean\nfrom sqlalchemy.dialects.mysql import TINYINT,DECIMAL,CHAR,INTEGER\nfrom sqlalchemy.sql.expression import cast\n\nfrom util.utils import prefix_http\nfrom util.utils import dt_obj\nfrom util.utils import format_price\nfrom util.utils import format_rate\nfrom util.utils import prefix_img_domain\nfrom util.utils import prefix_img_list\nfrom util.utils import prefix_img_list_thumb\nfrom util.utils import str_to_int_list\nfrom util.utils import comma_str_to_list\nfrom util.utils import imgs_to_list\nfrom settings import MAIN_MYSQL_URI\nfrom settings import DEFAULT_IMAGE\nfrom constants import CREDIT_STATUS\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = MAIN_MYSQL_URI\ndb = SQLAlchemy(app)\n\n\n\nColumn = db.Column\nTable = db.Table\nForeignKey = db.ForeignKey\n\n\nclass Model(db.Model):\n __abstract__ = True\n\n @staticmethod\n def show_status():\n return True\n\n\nclass MoneyField(TypeDecorator):\n impl = DECIMAL(10, 2)\n def column_expression(self, col):\n return cast(col, Float)\n def process_result_value(self, value, dialect):\n return float(value or 0)\n\n\nclass User(Model):\n ''' 用户 '''\n id = db.Column(Integer, primary_key=True)\n name = db.Column(String(80), unique=True)\n avatar = db.Column(String(1000))\n phone = db.Column(String(80), unique=True)\n passwd = db.Column(String(80))\n city_id = Column(Integer, ForeignKey('city.id'))\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n name = self.name,\n avatar = prefix_img_domain(self.avatar or DEFAULT_IMAGE),\n phone = self.phone,\n create_time = self.create_time\n )\n\n\nclass Wechat(Model):\n __tablename__ = 'wechat' \n __table_args__ = (\n PrimaryKeyConstraint('open_id'),\n )\n open_id = Column(String(32), autoincrement=False)\n user_id = Column(Integer, ForeignKey('user.id'), nullable=True)\n create_time = Column(DateTime, default=dt_obj.now)\n status = Column(TINYINT(1), nullable=False, default=0) #1已登录 0新注册未绑定user -1已退出\n\n\nclass Order(Model):\n '''\n 提交订单时, 优惠券和额度都锁定\n credit_choice_id 下单时 就存下来\n 直到真正付款成功了生成每一期的PeriodPayLog记录\n '''\n id = db.Column(Integer, primary_key=True)\n pay_method = Column(TINYINT(1), nullable=False, default=0)#0没用付钱(可能是全部使用优惠券或信用额度) 1微信号 2微信app 3支付宝\n user_id = Column(Integer, ForeignKey('user.id'), nullable=False)\n hospital_id = Column(Integer, ForeignKey('hospital.id'), nullable=False)\n item_id = Column(Integer, ForeignKey('item.id'), nullable=False)\n order_no = db.Column(String(30), unique=True)\n transaction_id = db.Column(String(100))\n credit_choice_id = Column(Integer, ForeignKey('period_pay_choice.id'), nullable=True)#只用做订单支付预览显示\n coupon_id = Column(Integer, ForeignKey('user_coupon.id'), nullable=True, unique=True)\n coupon_amount = Column(MoneyField, nullable=False, default=0)#优惠券面值用量\n credit_amount = Column(MoneyField, nullable=False, default=0)#信用额度使用量(分期总额+分期费用)\n total_fee = Column(MoneyField, nullable=False, default=0)#分期费用\n price = Column(MoneyField, nullable=False, default=0)#订单实际付款的钱 不包括信用额度\n total = Column(MoneyField, nullable=False, default=0)#订单总价 不使用优惠券时的价格\n create_time = Column(DateTime, default=dt_obj.now)\n status = Column(TINYINT(1), nullable=False, default=0) #0待支付 (额度已外金额付款状态)\n refund = Column(TINYINT(1), nullable=False, default=0) #0为退款 1已退款\n credit_verified = Column(TINYINT(1), nullable=False, default=0) #额度是否通过审核 0待审核 1通过审核 2被拒绝重新申请\n user_finished = Column(Boolean, default=False) #用户已确认完成\n remark = db.Column(String(300))\n\n def as_dict(self):\n return dict(\n id = self.id,\n user_id = self.user_id,\n item_id = self.item_id,\n order_no = self.order_no,\n transaction_id = self.transaction_id,\n hospital_id = self.hospital_id,\n coupon_id = self.coupon_id or 0,\n price = format_price(self.price or 0),\n total_fee = format_price(self.total_fee or 0),\n total = format_price(self.total or 0),\n credit_amount = format_price(self.credit_amount or 0),\n coupon_amount = format_price(self.coupon_amount or 0),\n create_time = str(self.create_time),\n status = self.status,\n credit_choice_id = self.credit_choice_id or 0,\n refund = self.refund,\n credit_verified = self.credit_verified,\n user_finished = self.user_finished,\n remark = self.remark or ''\n )\n\n\nclass Coupon(Model):\n '''优惠券'''\n id = db.Column(Integer, primary_key=True)\n item_id = Column(Integer, ForeignKey('item.id'), nullable=True)\n title = Column(String(300), default='')\n price = Column(MoneyField, nullable=False, default=0) #实付金额\n need = Column(MoneyField, nullable=False, default=0) #需要满多少才能使用\n coupon_cat = Column(TINYINT(1), nullable=False, default=0) #优惠券类型\n cat_id = Column(Integer, ForeignKey('item_cat.id'), nullable=True)#0分类\n sub_cat_id = Column(Integer, ForeignKey('item_sub_cat.id'), nullable=True)\n effective = Column(Integer,nullable=False,default=0)\n remark = Column(String(100), default='')\n is_trial = Column(Boolean, default=False) #是否是试用券\n\n def as_dict(self):\n need_cat = 1 if self.need else 2 #1满减券 2普通\n return dict(\n id = self.id,\n coupon_cat = self.coupon_cat,\n is_trial = 1 if self.is_trial else 0,\n item_id = self.item_id,\n need = format_price(self.need),\n title = self.title,\n price = format_price(self.price),\n cat_id = self.cat_id,\n need_cat = need_cat,\n sub_cat_id = self.sub_cat_id,\n effective = self.effective,\n effective_days = self.effective/86400,\n remark = self.remark,\n )\n\n\n\nclass UserCoupon(Model):\n ''' 用户优惠券 '''\n id = db.Column(Integer, primary_key=True)\n coupon_id = Column(Integer, ForeignKey('coupon.id'), autoincrement=False)\n title = Column(String(300), default='')\n user_id = Column(Integer, ForeignKey('user.id'), nullable=False)\n item_id = Column(Integer, ForeignKey('item.id'), nullable=True)\n need = Column(MoneyField, nullable=False, default=0) #需要满多少才能使用\n coupon_cat = Column(TINYINT(1), nullable=False, default=0) #优惠券类型 0全部 1cat分类 2子分类 3指定项目\n cat_id = Column(Integer, ForeignKey('item_cat.id'), nullable=True)#0分类\n sub_cat_id = Column(Integer, ForeignKey('item_sub_cat.id'), nullable=True)\n price = Column(MoneyField, nullable=False, default=0)#实付金额\n status = Column(TINYINT(1), nullable=False, default=0)#0未使用 1已使用\n end_time = Column(DateTime, nullable=False)\n create_time = Column(DateTime, nullable=False, default=dt_obj.now)\n remark = Column(String(100), default='')\n is_trial = Column(Boolean, default=False) #是否是试用券\n\n def as_dict(self):\n return dict(\n id = self.id,\n coupon_cat = self.coupon_cat,\n cat_id = self.cat_id,\n is_trial = 1 if self.is_trial else 0,\n title = self.title,\n sub_cat_id = self.sub_cat_id,\n user_id = self.user_id,\n need = format_price(self.need),\n item_id = self.item_id,\n price = format_price(self.price),\n status = self.status,\n end_time = str(self.end_time),\n create_time = str(self.create_time),\n coupon_id = self.coupon_id,\n remark = self.remark,\n )\n\n\nclass PeriodPayChoice(Model):\n ''' 分期费率表 '''\n id = db.Column(Integer, primary_key=True)\n period_count = Column(Integer, nullable=False, unique=True) #分期数\n period_fee = Column(Float, nullable=False) #分期税率\n\n def as_dict(self):\n return dict(\n id = self.id,\n period_count = self.period_count,\n period_fee = self.period_fee,\n )\n\n\nclass PeriodPayLog(Model):\n '''\n *滞纳金动态计算*\n 每期还款额列表建模\n '''\n id = db.Column(Integer, primary_key=True)\n amount = Column(MoneyField, nullable=False, default=0) #每期金额\n fee = Column(MoneyField, nullable=False, default=0) #每期手续费用\n punish = Column(MoneyField, nullable=False, default=0) #预期滞纳金\n user_id = Column(Integer, ForeignKey('user.id'), nullable=False)\n order_id = Column(Integer, ForeignKey('order.id'), nullable=True)\n period_pay_index = Column(Integer, nullable=True) #分期应该还的第几期\n period_count = Column(Integer, nullable=True) #分期总数\n create_time = Column(DateTime, default=dt_obj.now)\n deadline = Column(DateTime)#还款日\n repayment_time = Column(DateTime)#实际还款日\n status = Column(TINYINT(1), nullable=False, default=0)#0待还 1已还 2 已取消\n\n def as_dict(self):\n return dict(\n id = self.id,\n amount = format_price(self.amount or 0),\n punish = format_price(self.punish or 0),\n period_count= self.period_count,\n fee = float(self.fee or 0),\n user_id = self.user_id,\n order_id = self.order_id,\n period_pay_index = self.period_pay_index,\n deadline = str(self.deadline),\n repayment_time = str(self.repayment_time or ''),\n create_time = self.create_time,\n status = self.status,\n )\n\n\nclass PunishLog(Model):\n '''滞纳金产生 历史'''\n id = db.Column(Integer, primary_key=True)\n log_id = Column(Integer, ForeignKey('period_pay_log.id'), nullable=True)\n amount = Column(MoneyField, nullable=False, default=0)\n create_time = Column(DateTime, default=dt_obj.now)\n\n\nclass CreditUseLog(Model):\n ''' 可用信用额度使用历史 '''\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'), nullable=False)\n amount = Column(MoneyField, nullable=False, default=0)\n order_id = Column(Integer, ForeignKey('order.id'), nullable=True)\n status = Column(TINYINT(1), nullable=False, default=0)#额度当期状态\n create_time = Column(DateTime, default=dt_obj.now)\n\n\nclass CreditChangeLog(Model):\n ''' 信用总额变更历史 '''\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'), nullable=False)\n amount = Column(MoneyField, nullable=False, default=0)\n create_time = Column(DateTime, default=dt_obj.now)\n\n\nclass UserCredit(Model):\n ''' 用户信用额度 '''\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'), nullable=False)\n total = Column(MoneyField, nullable=False, default=0)#总额度\n used = Column(MoneyField, nullable=False, default=0) #已使用额度\n status = Column(TINYINT(1), nullable=False, default=CREDIT_STATUS.DEFAULT)#0默认 1审核中 2审核通过 3被拒\n\n def as_dict(self):\n return dict(\n id = self.id,\n user_id = self.user_id,\n total = format_price(self.total or 0),\n used = format_price(self.used or 0),\n status = self.status\n )\n\n\nclass Hospital(Model):\n ''' 医院 '''\n id = db.Column(Integer, primary_key=True)\n name = db.Column(String(100))\n city_id = Column(Integer, ForeignKey('city.id'), nullable=True)\n image = db.Column(String(100))\n phone = db.Column(String(100))\n desc = db.Column(String(10000))\n tags = db.Column(String(1000)) #逗号分割的医院标签\n addr = db.Column(String(300)) #地址\n working_time = db.Column(String(100)) #工作时间\n long_lat = db.Column(String(30)) #经纬度\n photos = db.Column(String(1000))\n rate = Column(Float, default=5) #评分\n sold_count = db.Column(Integer, default=0) #已售数量\n status = Column(TINYINT(1), nullable=False, default=0)#0下线 1上线\n\n def as_dict(self):\n return dict(\n id = self.id,\n status = self.status,\n city_id = self.city_id,\n sold_count = self.sold_count or 0,\n photo_list = prefix_img_list(self.photos),\n image = prefix_img_domain(self.image),\n photos = self.photos,\n name = self.name,\n rate = format_rate(self.rate or 5),\n phone = self.phone,\n desc = self.desc,\n working_time = self.working_time,\n tag_list = comma_str_to_list(self.tags),\n tags = self.tags,\n addr = self.addr,\n long_lat = self.long_lat,\n lng = self.long_lat.split(',')[0] if self.long_lat else '',\n lat = self.long_lat.split(',')[1] if self.long_lat else '',\n )\n\n\n\nclass ItemCat(Model):\n ''' 分类 '''\n id = db.Column(Integer, primary_key=True)\n name = db.Column(String(100), unique=True)\n sort_order = Column(Integer, default=0) #小的排在前面\n status = Column(TINYINT(1), nullable=False, default=0)#0未上线 1已上线\n\n def as_dict(self):\n return dict(\n id = self.id,\n name = self.name,\n status = self.status,\n sort_order = self.sort_order\n )\n\n\nclass ItemSubCat(Model):\n ''' 子分类 '''\n id = db.Column(Integer, primary_key=True)\n name = db.Column(String(100))\n desc = db.Column(String(1000))\n icon = db.Column(String(100))\n cat_id = Column(Integer, ForeignKey('item_cat.id'), nullable=False)#父分类id\n cat_ids = db.Column(String(500))\n status = Column(TINYINT(1), nullable=False, default=0)#0未上线 1已上线\n\n def as_dict(self):\n return dict(\n id = self.id,\n name = self.name,\n desc = self.desc,\n cat_ids = self.cat_ids,\n cat_id_list = str_to_int_list(self.cat_ids),\n icon = prefix_img_domain(self.icon),\n cat_id = self.cat_id,\n status = self.status\n )\n\n\nclass Item(Model):\n ''' 商品 '''\n id = db.Column(Integer, primary_key=True)\n orig_price = Column(MoneyField, nullable=False, default=0)\n price = Column(MoneyField, nullable=False, default=0)\n sub_cat_id = Column(Integer, ForeignKey('item_sub_cat.id'), nullable=False)#子分类id\n hospital_id = Column(Integer, ForeignKey('hospital.id'), nullable=False)\n sub_cat_ids = db.Column(String(100))\n image = db.Column(String(300))\n photos = db.Column(String(1000))\n title = db.Column(String(500))\n item_no = db.Column(String(100), index=True) #项目编号\n support_choices = db.Column(String(50)) #支持的分期数选项\n sold_count = db.Column(Integer, default=0) #已售数量\n has_fee = Column(Boolean, default=True) #是否免息\n direct_buy = Column(Boolean) #是否可以直接购买\n status = Column(TINYINT(1), nullable=False, default=0)#0未上线 1已上线 2医院被下线\n surgery_desc = Column(Text)\n doctor_desc = Column(Text)\n create_time = Column(DateTime, default=dt_obj.now)\n use_time = db.Column(String(300))\n note = db.Column(String(500)) #提示\n def as_dict(self):\n return dict(\n id = self.id,\n sub_cat_id = self.sub_cat_id,\n title = self.title,\n sub_cat_ids = self.sub_cat_ids,\n sub_cat_id_list = map(int, filter(bool, (self.sub_cat_ids or '').split(','))),\n direct_buy = bool(self.direct_buy),\n price = format_price(self.price or 0),\n orig_price = format_price(self.orig_price or 0),\n photos = self.photos,\n item_no = str(self.id),\n hospital_id = self.hospital_id,\n sold_count = self.sold_count or 0,\n image = prefix_img_domain(self.image),\n photo_list = prefix_img_list(self.photos) if self.photos else [],\n support_choices = self.support_choices,\n support_choice_list = str_to_int_list(self.support_choices),\n status = self.status,\n surgery_desc = self.surgery_desc,\n use_time = self.use_time,\n note = self.note,\n doctor_desc = self.doctor_desc,\n has_fee = bool(self.has_fee),\n create_time = self.create_time,\n )\n\n\nclass ItemComment(Model):\n ''' 商品评价 '''\n id = db.Column(Integer, primary_key=True)\n item_id = Column(Integer, ForeignKey('item.id'))\n user_id = Column(Integer, ForeignKey('user.id'))\n order_id = Column(Integer, ForeignKey('order.id'))\n photos = db.Column(String(1000))\n content = db.Column(String(10000))\n rate = Column(Float, default=0) #评分\n is_anonymous = Column(Boolean, default=False)\n is_re_comment = Column(Boolean, default=False)\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n is_anonymous = self.is_anonymous,\n is_re_comment = bool(self.is_re_comment),\n item_id = self.item_id,\n order_id = self.order_id,\n user_id = self.user_id,\n rate = self.rate or 0,\n photos = self.photos,\n photo_list = prefix_img_list(self.photos) if self.photos else [],\n thumb_photo_list= prefix_img_list_thumb(self.photos) if self.photos else [],\n content = self.content,\n create_time = str(self.create_time)\n )\n\n\nclass ItemFav(Model):\n ''' 心愿单 '''\n __table_args__ = (\n UniqueConstraint('user_id', 'item_id'),\n )\n id = db.Column(Integer, primary_key=True)\n item_id = Column(Integer, ForeignKey('item.id'))\n user_id = Column(Integer, ForeignKey('user.id'))\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n item_id = self.item_id,\n user_id = self.user_id,\n create_time = str(self.create_time)\n )\n\n\nclass UserAdvice(Model):\n ''' 用户反馈 '''\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'), nullable=True)\n content = db.Column(String(10000))\n contact = db.Column(String(100))\n create_time = Column(DateTime, default=dt_obj.now)\n remark = db.Column(String(300))\n\n def as_dict(self):\n return dict(\n id = self.id,\n user_id = self.user_id,\n content = self.content,\n contact = self.contact,\n create_time = self.create_time,\n remark = self.remark\n )\n\n\nclass ServiceCode(Model):\n ''' 预约服务码 '''\n id = db.Column(Integer, primary_key=True)\n order_id = Column(Integer, ForeignKey('order.id'), unique=True) \n code = Column(String(100), index=True, unique=True) \n status = Column(TINYINT(1), nullable=False, default=0) #0未使用 1已预约 2已确认\n book_time = Column(DateTime) #预约时间\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n order_id = self.order_id,\n code = self.code,\n book_time = self.book_time,\n status = self.status\n )\n\n\nclass PayNotifyLog(Model):\n ''' 通知回调日志 '''\n id = db.Column(Integer, primary_key=True)\n pay_type = Column(TINYINT(1), nullable=False, default=0) #1微信公众号 2微信app 3支付宝\n content = db.Column(String(10000))\n create_time = Column(DateTime, default=dt_obj.now)\n\n\nclass OrderLog(Model):\n ''' 订单状态变更日志 '''\n id = db.Column(Integer, primary_key=True)\n order_id = Column(Integer, ForeignKey('order.id')) \n status = Column(TINYINT(1), nullable=False) #订单当前状态\n remark = db.Column(String(100))\n create_time = Column(DateTime, default=dt_obj.now)\n\n\nclass CreditApply(Model):\n ''' 额度申请 大学学生升到了研究生后,学历信息/毕业时间需要提醒她们更改 '''\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'), unique=True)\n name = db.Column(String(100)) #姓名\n id_no = db.Column(String(18)) #身份证号码\n school = db.Column(String(100)) #学校名字\n enrollment_time = Column(DateTime) #入学时间\n major = db.Column(String(100)) #专业\n stu_no = db.Column(String(20)) #学号\n stu_education = db.Column(String(20)) #学历\n stu_years = Column(Float, default=4) #学制\n addr = db.Column(String(100)) #地址\n parent_contact = db.Column(String(100)) #父母联系方式\n chsi_name = db.Column(String(100)) #学信网账号\n chsi_passwd = db.Column(String(100)) #学信网密码\n id_card_photo = db.Column(String(100)) #身份证照\n stu_card_photo = db.Column(String(100)) #学生证照\n body_choice_ids = db.Column(String(100)) #部位id\n body_choice_text = db.Column(String(100)) #其他内容\n create_time = Column(DateTime, default=dt_obj.now)\n update_time = Column(DateTime, default=dt_obj.now)\n graduate_time = Column(DateTime)\n has_supply = Column(Boolean, default=False) #资料已经从学信网补充\n reason = db.Column(String(500)) #被拒原因\n status = Column(TINYINT(1), nullable=False, default=1) #1第一步 2第二步 3通过 4被拒绝\n remark = db.Column(String(500)) #备注\n remark_img = db.Column(String(500)) #备注图片\n\n\n def as_dict(self):\n return dict(\n id = self.id,\n id_no = self.id_no or '',\n stu_education = self.stu_education,\n create_time = self.create_time,\n update_time = self.update_time,\n status = self.status,\n name = self.name or '',\n stu_no = self.stu_no,\n user_id = self.user_id,\n school = self.school,\n enrollment_time = self.enrollment_time,\n major = self.major,\n addr = self.addr,\n graduate_time = self.graduate_time,\n chsi_name = self.chsi_name or '',\n chsi_passwd = self.chsi_passwd or '',\n parent_contact = self.parent_contact or '',\n stu_years = self.stu_years,\n reason = self.reason or '',\n id_card_photo = prefix_img_domain(self.id_card_photo),\n stu_card_photo = prefix_img_domain(self.stu_card_photo),\n id_card_photo_key = self.id_card_photo,\n stu_card_photo_key= self.stu_card_photo,\n has_supply = self.has_supply,\n body_choice_ids = self.body_choice_ids,\n body_choice_text = self.body_choice_text,\n remark = self.remark,\n remark_img = prefix_img_domain(self.remark_img),\n )\n\n\nclass School(Model):\n ''' 学校 '''\n id = db.Column(Integer, primary_key=True)\n name = db.Column(String(100), unique=True) #学校名字\n city_name = db.Column(String(100)) #城市名字\n link = db.Column(String(100)) #链接\n pics_count = db.Column(Integer, default=0, index=True) #图片数量\n\n def as_dict(self):\n return dict(\n id = self.id,\n name = self.name,\n link = prefix_http(self.link),\n city_name = self.city_name,\n pics_count = self.pics_count or 0\n )\n\n\nclass AdminUser(Model):\n ''' 管理员 '''\n id = db.Column(Integer, primary_key=True)\n name = db.Column(String(100), unique=True)\n city_id = Column(Integer, ForeignKey('city.id'))\n passwd = db.Column(String(100))\n cat = Column(TINYINT(1), nullable=False, default=0)#0所有权限 1编辑 2推广\n create_time = Column(DateTime, default=dt_obj.now)\n\n\nclass City(Model):\n ''' 城市 '''\n id = db.Column(Integer, primary_key=True)\n name = db.Column(String(100), unique=True)\n city_code = db.Column(String(30), unique=True) #百度cityCode\n amap_code = db.Column(String(30), unique=True) #高德地图cityCode\n\n def as_dict(self):\n return dict(\n id = self.id,\n name = self.name,\n amap_code = self.amap_code,\n city_code = self.city_code\n )\n\n\nclass Repayment(Model):\n ''' 还款订单 '''\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'))\n pay_method = Column(TINYINT(1), nullable=False, default=0)#0没用付钱(可能是全部使用优惠券或信用额度) 1微信 2支付宝\n coupon_id = Column(Integer, ForeignKey('user_coupon.id'), nullable=True, unique=True)\n price = Column(MoneyField, nullable=False, default=0) #每期手续费用\n data = db.Column(String(10000)) #还了哪些期 还款时的每期金额\n order_no = db.Column(String(30), unique=True)\n transaction_id = db.Column(String(100))\n create_time = Column(DateTime, default=dt_obj.now)\n update_time = Column(DateTime, default=dt_obj.now)\n status = Column(TINYINT(1), nullable=False, default=0) #0待支付 1支付中 2支付成功\n\n def as_dict(self):\n ''' '''\n return dict(\n id = self.id,\n pay_method = self.pay_method,\n coupon_id = self.coupon_id,\n data = self.data,\n price = format_price(self.price),\n order_no = self.order_no,\n create_time = self.create_time,\n update_time = self.update_time,\n status = self.status,\n transaction_id = self.transaction_id\n )\n\n\nclass HospitalUser(Model):\n ''' 医院管理员 '''\n id = db.Column(Integer, primary_key=True)\n hospital_id = Column(Integer, ForeignKey('hospital.id'))\n name = db.Column(String(100), unique=True)\n passwd = db.Column(String(100))\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n hospital_id = self.hospital_id,\n name = self.name,\n create_time = self.create_time\n )\n\n\nclass HelpCat(Model):\n ''' 帮助分类 '''\n id = db.Column(Integer, primary_key=True)\n name = db.Column(String(100), unique=True)\n\n def as_dict(self):\n return dict(\n id = self.id,\n name = self.name,\n )\n\n\nclass HelpEntry(Model):\n ''' 帮助条目 '''\n id = db.Column(Integer, primary_key=True)\n title = db.Column(String(100))\n cat_id = Column(Integer, ForeignKey('help_cat.id'))\n content = db.Column(String(10000))\n\n def as_dict(self):\n return dict(\n id = self.id,\n title = self.title,\n cat_id = self.cat_id,\n content = self.content\n )\n\n\nclass Activity(Model):\n ''' 活动 '''\n id = db.Column(Integer, primary_key=True)\n title = db.Column(String(300))\n city_id = Column(Integer, ForeignKey('city.id'))\n desc = db.Column(String(1000))\n start_time = Column(DateTime)\n end_time = Column(DateTime)\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n title = self.title,\n city_id = self.city_id,\n desc = self.desc,\n start_time = self.start_time,\n end_time = self.end_time,\n create_time = self.create_time\n )\n\n\nclass ActivityItem(Model):\n ''' 活动商品 '''\n id = db.Column(Integer, primary_key=True)\n activity_id = Column(Integer, ForeignKey('activity.id'))\n item_id = Column(Integer, ForeignKey('item.id'))\n sort_order = Column(Integer, default=0) #小的排在前面\n price = Column(MoneyField, nullable=False, default=0) #活动价格\n image = db.Column(String(300))\n\n def as_dict(self):\n return dict(\n id = self.id,\n image = prefix_img_domain(self.image),\n activity_id = self.activity_id,\n item_id = self.item_id,\n price = format_price(self.price),\n sort_order = self.sort_order\n )\n\n\nclass RecommendItem(Model):\n ''' 推荐商品 '''\n id = db.Column(Integer, primary_key=True)\n item_id = Column(Integer, ForeignKey('item.id'), unique=True)\n sort_order = Column(Integer, default=0) #小的排在前面\n image = db.Column(String(300))\n desc = db.Column(String(500))\n\n def as_dict(self):\n return dict(\n id = self.id,\n sort_order = self.sort_order,\n item_id = self.item_id,\n image = prefix_img_domain(self.image),\n desc = self.desc\n )\n\n\n\nclass RecommendSubcat(Model):\n ''' 推荐商品子分类 '''\n id = db.Column(Integer, primary_key=True)\n sub_cat_id = Column(Integer, ForeignKey('item_sub_cat.id'), unique=True)\n sort_order = Column(Integer, default=0) #小的排在前面\n icon = db.Column(String(300))\n\n def as_dict(self):\n return dict(\n id = self.id,\n sort_order = self.sort_order,\n sub_cat_id = self.sub_cat_id,\n icon = prefix_img_domain(self.icon)\n )\n\n\nclass EditNameLog(Model):\n ''' 名字修改记录 '''\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'))\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n user_id = self.user_id,\n create_time = self.create_time\n )\n\n\nclass PayLogOrderNo(Model):\n ''' 还款期记录对应 订单号\n 还款后取消订单的操作是: 退换已还的款项, 将未还的log至为status 2\n '''\n id = db.Column(Integer, primary_key=True)\n order_no = db.Column(String(30), index=True)\n period_pay_log_id = Column(Integer, ForeignKey('period_pay_log.id'), unique=True)\n price = Column(MoneyField, nullable=False, default=0) #还款金额\n total = Column(MoneyField, nullable=False, default=0) #总还款金额\n create_time = Column(DateTime, default=dt_obj.now) \n\n\n\nclass QrCodeUser(Model):\n ''' 扫描二维码关注用户\n '''\n id = db.Column(Integer, primary_key=True)\n open_id = db.Column(String(50), unique=True) #唯一索引\n qrcode_id = Column(Integer, ForeignKey('qrcode.id'), nullable=True)\n user_id = Column(Integer, ForeignKey('user.id'), nullable=True)\n sex = Column(Integer, default=0)\n city = db.Column(String(100))\n headimgurl = db.Column(String(300))\n nickname = db.Column(String(100))\n location = db.Column(String(100))\n lnglat = db.Column(String(100))\n create_time = Column(DateTime, default=dt_obj.now) \n status = Column(TINYINT(1), default=1, index=True) #0取消关注 1已关注 -1未曾关注\n\n def as_dict(self):\n return dict(\n id = self.id,\n open_id = self.open_id,\n qrcode_id = self.qrcode_id,\n user_id = self.user_id,\n sex = self.sex,\n headimgurl = self.headimgurl or DEFAULT_IMAGE,\n city = self.city,\n nickname = self.nickname,\n location = self.location,\n lnglat = self.lnglat,\n create_time = self.create_time,\n status = self.status,\n )\n\n\nclass Promoter(Model):\n ''' 推广员 '''\n id = db.Column(Integer, primary_key=True)\n phone = db.Column(String(20), unique=True)\n name = db.Column(String(50))\n passwd = db.Column(String(50))\n follow_count = Column(Integer, default=0, index=True) #关注数\n reg_count = Column(Integer, default=0, index=True) #注册数\n dup_count = Column(Integer, default=0, index=True) #重复注册数\n unfollow_count = Column(Integer, default=0, index=True) #取消关注数\n create_time = Column(DateTime, default=dt_obj.now)\n create_by = Column(Integer, ForeignKey('promoter.id'), nullable=True)\n status = Column(TINYINT(1), nullable=False, default=1) #0已下线 1可创建二维码 2不可创建二维码\n\n def as_dict(self):\n return dict(\n id = self.id,\n dup_count = self.dup_count,\n phone = self.phone,\n name = self.name,\n passwd = self.passwd,\n create_by = self.create_by,\n follow_count= self.follow_count,\n reg_count = self.reg_count,\n unfollow_count= self.unfollow_count,\n status = self.status\n )\n\n\nclass Qrcode(Model):\n ''' 二维码id '''\n id = db.Column(Integer, primary_key=True)\n ticket = db.Column(String(100))\n image = db.Column(String(300))\n act_type = db.Column(Integer, default=0) #推广活动类型 9现金活动\n promoter_id = Column(Integer, ForeignKey('promoter.id'), nullable=False)\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n ticket = self.ticket,\n image = prefix_img_domain(self.image),\n promoter_id = self.promoter_id,\n create_time = self.create_time,\n act_type = self.act_type\n )\n\n\nclass WechatLocation(Model):\n ''' 微信定位 '''\n id = db.Column(Integer, primary_key=True)\n open_id = db.Column(String(50), index=True) #用户open_id\n lng = db.Column(String(50))\n lat = db.Column(String(50))\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n open_id = self.open_id,\n lng = self.lng,\n lat = self.lat,\n create_time = self.create_time\n )\n\n\n\nclass FakeUser(Model):\n ''' 假用户 '''\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'), nullable=False)\n\n\nclass Trial(Model):\n ''' 试用 '''\n id = db.Column(Integer, primary_key=True)\n title = db.Column(String(300))\n image = db.Column(String(300)) #图片\n cat = Column(Integer, default=0) #试用类型 0免费礼品 1特定项目代金券\n coupon_id = Column(Integer, ForeignKey('coupon.id'), nullable=True)\n total = Column(Integer, default=0) #申请数\n sent = Column(Integer, default=0) #已发放数\n sort_order = Column(Integer, default=0) #试用排序\n apply_count = Column(Integer, default=0) #人气\n rules = db.Column(Text) #试用规则\n process = db.Column(Text) #流程\n create_time = Column(DateTime, default=dt_obj.now)\n start_time = Column(DateTime)\n end_time = Column(DateTime)\n\n def as_dict(self):\n return dict(\n id = self.id,\n title = self.title,\n image = prefix_img_domain(self.image),\n cat = self.cat,\n cat_str = '免费, 包邮' if self.cat==0 else '免费',\n total = self.total,\n coupon_id = self.coupon_id,\n sent = self.sent,\n sort_order = self.sort_order,\n apply_count = self.apply_count,\n rules = self.rules,\n process = self.process,\n create_time = self.create_time,\n end_time = self.end_time,\n start_time = self.start_time,\n )\n\n\n\nclass TrialApply(Model):\n ''' 试用申请 '''\n __table_args__ = (\n UniqueConstraint('user_id', 'trial_id'),\n )\n\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'), nullable=False)\n trial_id = Column(Integer, ForeignKey('trial.id'), nullable=False)\n cat = Column(Integer, default=0) #试用类型 0免费礼品 1特定项目代金券\n coupon_id = Column(Integer, ForeignKey('user_coupon.id'), nullable=True)\n name = db.Column(String(100))\n phone = db.Column(String(30))\n school = db.Column(String(100))\n sex = Column(TINYINT(1), nullable=False, default=0) #0保密 1男 2女\n addr = db.Column(String(100))\n content = db.Column(String(1000))\n create_time = Column(DateTime, default=dt_obj.now) #创建时间\n status = Column(TINYINT(1), nullable=False, default=0) #0等待审核 1获得资格\n\n def as_dict(self):\n return dict(\n id = self.id,\n sex = self.sex,\n cat = self.cat,\n coupon_id = self.coupon_id,\n user_id = self.user_id,\n trial_id = self.trial_id,\n name = self.name,\n phone = self.phone,\n school = self.school,\n addr = self.addr,\n content = self.content,\n create_time = self.create_time,\n status = self.status\n )\n\n\nclass TrialComment(Model):\n ''' 体会评价 '''\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'), nullable=False)\n trial_id = Column(Integer, ForeignKey('trial.id'), nullable=False)\n photos = db.Column(String(1000))\n content = db.Column(String(10000))\n create_time = Column(DateTime, default=dt_obj.now) #创建时间\n\n def as_dict(self):\n return dict(\n id = self.id,\n user_id = self.user_id,\n trial_id = self.trial_id,\n photos = self.photos,\n content = self.content,\n create_time = self.create_time,\n photo_list = prefix_img_list(self.photos)\n )\n\n\nclass ImageSize(Model):\n __tablename__ = 'image_size' \n __table_args__ = (\n PrimaryKeyConstraint('key'),\n )\n key = Column(String(32))\n width = Column(Integer, default=0)\n height = Column(Integer, default=0)\n\n def as_dict(self):\n return dict(\n key = self.key,\n width = self.width,\n height = self.height\n )\n\n\nclass WechatReg(Model):\n ''' 体会评价 '''\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'), nullable=False)\n open_id = db.Column(String(100))\n create_time = Column(DateTime, default=dt_obj.now) #创建时间\n\n\nclass RecommendBeautyItem(Model):\n ''' 美攻略推荐项目 '''\n id = db.Column(Integer, primary_key=True)\n item_id = Column(Integer, ForeignKey('item.id'), nullable=False)\n create_time = Column(DateTime, default=dt_obj.now) #创建时间\n\n def as_dict(self):\n return dict(\n id = self.id,\n item_id = self.item_id,\n create_time = self.create_time\n )\n\n\nclass BeautyEntry(Model):\n ''' 美攻略 '''\n id = db.Column(Integer, primary_key=True)\n title = db.Column(String(100))\n icon = db.Column(String(100)) #列表图\n image = db.Column(String(100)) #首页图\n photo = db.Column(String(100)) #详情页大图\n items = db.Column(String(100))\n view_count = Column(Integer, default=0)\n create_time = Column(DateTime, default=dt_obj.now)\n status = Column(TINYINT(1), nullable=False, default=0)#0未上线 1上线\n\n def as_dict(self):\n return dict(\n id = self.id,\n icon = prefix_img_domain(self.icon),\n view_count = self.view_count,\n title = self.title,\n image = prefix_img_domain(self.image),\n photo = prefix_img_domain(self.photo),\n items = self.items,\n item_id_list= map(int, filter(bool, (self.items or '').split(','))),\n status = self.status,\n create_time = self.create_time\n )\n\n\nclass DailyCoupon(Model):\n ''' 每日优惠券 '''\n id = db.Column(Integer, primary_key=True)\n coupon_id = Column(Integer, ForeignKey('coupon.id'), nullable=False)\n start_time = Column(DateTime)\n end_time = Column(DateTime)\n total = Column(Integer, default=0)\n sent = Column(Integer, default=0)\n title = db.Column(String(100))\n use_condition = db.Column(String(100))\n use_time = db.Column(String(100))\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n title = self.title,\n coupon_id = self.coupon_id,\n start_time = self.start_time,\n use_time = self.use_time or '',\n use_condition = self.use_condition or '',\n end_time = self.end_time,\n sent = self.sent or 0,\n total = self.total or 0,\n remain = self.total-self.sent,\n create_time = self.create_time\n )\n\n\nclass DailyUser(Model):\n ''' 用户每日优惠券 '''\n __table_args__ = (\n UniqueConstraint('daily_id', 'user_id'),\n )\n\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'), nullable=False)\n daily_id = Column(Integer, ForeignKey('daily_coupon.id'), nullable=False)\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n user_id = self.user_id,\n daily_id= self.daily_id,\n create_time=self.create_time\n )\n\n\nclass AlipayOrderUser(Model):\n ''' 支付宝支付订单对应用户支付宝账号 '''\n id = db.Column(Integer, primary_key=True)\n order_no = db.Column(String(100), unique=True)\n buyer_email = db.Column(String(100), index=True)\n create_time = Column(DateTime, default=dt_obj.now)\n\n\n\nclass RecommendHospital(Model):\n ''' 推荐医院 '''\n id = db.Column(Integer, primary_key=True)\n hospital_id = Column(Integer, ForeignKey('hospital.id'), unique=True)\n sort_order = Column(Integer, default=0) #小的排在前面\n tag = db.Column(String(50))\n color = db.Column(String(50))\n\n def as_dict(self):\n return dict(\n id = self.id,\n hospital_id = self.hospital_id,\n sort_order = self.sort_order,\n tag = self.tag,\n color = self.color\n )\n\n\nclass Article(Model):\n ''' 通知文章 '''\n id = db.Column(Integer, primary_key=True)\n title = db.Column(String(300))\n desc = db.Column(String(1000))\n image = db.Column(String(300))\n link = db.Column(String(300))\n create_time = Column(DateTime, default=dt_obj.now)\n status = Column(TINYINT(1), nullable=False, default=0) #0未上线 1上线\n\n def as_dict(self):\n return dict(\n id = self.id,\n title = self.title,\n desc = self.desc,\n image = self.image,\n link = self.link,\n create_time = self.create_time,\n status = self.status\n )\n \n\nclass Notification(Model):\n ''' 消息通知 '''\n id = db.Column(Integer, primary_key=True)\n article_id = Column(Integer, ForeignKey('article.id'))\n user_id = Column(Integer, ForeignKey('user.id'))\n create_time = Column(DateTime, default=dt_obj.now)\n status = Column(TINYINT(1), nullable=False, default=0) #0未读 1已读\n\n def as_dict(self):\n return dict(\n id = self.id,\n user_id = self.user_id,\n article_id = self.article_id,\n create_time = self.create_time,\n status = self.status\n )\n\n\n\nclass RoomDesignDetail(Model):\n ''' 寝室设计详情 '''\n id = Column(Integer, primary_key=True)\n room_name = Column(String(30), unique=True)\n applyer_name = Column(String(30))\n addr = Column(String(30))\n phone = Column(String(30), unique=True)\n user_id = Column(Integer, ForeignKey('user.id'))\n school_id = Column(Integer, ForeignKey('school.id'))\n apply_no = Column(String(30), unique=True) #编号\n pics = Column(String(500))\n vote_count = db.Column(Integer, default=0) #投票数量数量\n pics_count = db.Column(Integer, default=0, index=True) #图片数量\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n result = dict(\n id = self.id,\n room_name = self.room_name,\n apply_no = self.apply_no,\n addr = self.addr,\n phone = self.phone,\n applyer_name= self.applyer_name,\n school_id = self.school_id,\n vote_count = self.vote_count,\n pics_count = self.pics_count,\n user_id = self.user_id,\n pics = self.pics,\n orig_pics = imgs_to_list(self.pics),\n create_time = self.create_time,\n pic_list = prefix_img_list_thumb(self.pics, width=720),\n thumb_pic_list = prefix_img_list_thumb(self.pics),\n )\n if len(result['pic_list'])<4:\n for i in range(4-len(result['pic_list'])):\n result['pic_list'].append('')\n if len(result['thumb_pic_list'])<4:\n for i in range(4-len(result['thumb_pic_list'])):\n result['thumb_pic_list'].append('')\n return result\n\n\nclass RoomDesignVotePrivilege(Model):\n ''' 投票权限 '''\n id = Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'))\n status = Column(TINYINT(1), nullable=False, default=0) #0未使用 1已使用\n source = Column(TINYINT(1), nullable=False, default=0) #1完成申请额度(20票) 2完成一单(200票)\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n user_id = self.user_id,\n status = self.status,\n source = self.source,\n create_time = self.create_time\n )\n\n\nclass RoomDesignVoteLog(Model):\n ''' 投票记录log '''\n id = Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'))\n room_id = Column(Integer, ForeignKey('room_design_detail.id'))\n source = Column(TINYINT(1), nullable=False, default=0) #1完成申请额度(20票) 2完成一单(200票)\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n user_id = self.user_id,\n room_id = self.room_id,\n source = self.source,\n create_time= self.create_time\n )\n\n\n\nclass RedpackQuestion(Model):\n ''' 红包推荐问题 '''\n id = Column(Integer, primary_key=True)\n content = Column(String(1000))\n create_time = Column(DateTime, default=dt_obj.now)\n status = Column(TINYINT(1), nullable=False, default=0) #0下线 1上线\n\n def as_dict(self):\n return dict(\n id = self.id,\n content = self.content,\n create_time = self.create_time,\n status = self.status\n )\n\n\nclass RedpackUserQuestion(Model):\n ''' 红包用户问答 '''\n id = Column(Integer, primary_key=True)\n qr_user_id = Column(Integer, ForeignKey('qr_code_user.id'))\n question_id = Column(Integer, ForeignKey('redpack_question.id'))\n price = Column(MoneyField) #需支付价格\n question = Column(String(1000))\n answer = Column(String(1000))\n is_custom = Column(TINYINT(1), nullable=False, default=0) #0美分分提供问题 1自定义问题\n is_random = Column(TINYINT(1), nullable=False, default=0) #0不随机 1随机\n price = Column(MoneyField) #需支付价格\n money = Column(MoneyField, default=0) #总收到金额\n status = Column(TINYINT(1), nullable=False, default=0) #0新下单 1支付中 2支付成功\n view_count = db.Column(Integer, default=0) #查看数量\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n view_count = self.view_count or 0,\n qr_user_id = self.qr_user_id,\n question_id = self.question_id,\n is_custom = self.is_custom,\n is_random = self.is_random,\n question = self.question,\n answer = self.answer,\n price = format_price(self.price),\n money = format_price(self.money),\n status = self.status,\n create_time = self.create_time\n )\n\n\nclass RedpackPay(Model):\n ''' 红包支付纪录 '''\n id = Column(Integer, primary_key=True)\n qr_user_id = Column(Integer, ForeignKey('qr_code_user.id'))\n user_question_id = Column(Integer, ForeignKey('redpack_user_question.id'))\n order_no = db.Column(String(30), unique=True)\n transaction_id = db.Column(String(100))\n price = Column(MoneyField) #需支付价格\n status = Column(TINYINT(1), nullable=False, default=0) #0新下单 1支付中 2支付成功\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n order_no = self.order_no,\n qr_user_id = self.qr_user_id,\n transaction_id = self.transaction_id,\n user_question_id = self.user_question_id,\n price = format_price(self.price),\n status = self.status,\n create_time = self.create_time\n )\n\n\nclass RedpackPayUser(Model):\n ''' 问题查看用户'''\n id = Column(Integer, primary_key=True)\n qr_user_id = Column(Integer, ForeignKey('qr_code_user.id'))\n price = Column(MoneyField) #需支付价格\n user_question_id = Column(Integer, ForeignKey('redpack_user_question.id'))\n pay_id = Column(Integer, ForeignKey('redpack_pay.id'))\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n qr_user_id = self.qr_user_id,\n price = format_price(self.price),\n pay_id = self.pay_id,\n user_question_id = self.user_question_id,\n create_time = self.create_time,\n )\n\n\nclass UserDevice(Model):\n ''' 用户设备 '''\n id = Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'), nullable=True)\n device_id = db.Column(String(50), unique=True)\n push_token = db.Column(String(50))\n os_version = db.Column(String(10))\n app_version = db.Column(String(10))\n device_name = db.Column(String(100))\n cat = Column(TINYINT(1), nullable=False, default=0) #1ios 2android\n create_time = Column(DateTime, default=dt_obj.now)\n update_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n ''' '''\n return dict(\n id = self.id,\n user_id = self.user_id,\n device_id = self.device_id,\n push_token = self.push_token,\n os_version = self.os_version,\n app_version = self.app_version,\n device_name = self.device_name,\n cat = self.cat,\n create_time = self.create_time,\n update_time = self.update_time\n )\n\n\nclass UserDeviceLog(Model):\n ''' 用户历史设备表 '''\n id = Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'), nullable=False)\n device_id = db.Column(String(50), index=True)\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n ''' '''\n return dict(\n id = self.id,\n user_id = self.user_id,\n device_id = self.device_id,\n create_time = self.create_time\n )\n\n\nclass RdUserQrcode(Model):\n ''' 现金用户分享二维码 '''\n __table_args__ = (\n UniqueConstraint('user_id', 'qrcode_id'),\n )\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'))\n qrcode_id = Column(Integer, ForeignKey('qrcode.id'))\n follow_count = Column(Integer, default=0)\n reg_count = Column(Integer, default=0)\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n qrcode_id = self.qrcode_id,\n user_id = self.user_id,\n follow_count= self.follow_count,\n reg_count = self.reg_count,\n create_time = str(self.create_time)\n )\n\n\nclass RdQrcodeUser(Model):\n ''' 二维码注册用户 '''\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'))\n qrcode_id = Column(Integer, ForeignKey('qrcode.id'))\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n qrcode_id = self.qrcode_id,\n user_id = self.user_id,\n create_time = str(self.create_time)\n )\n\n\nclass RdMoneyPrize(Model):\n ''' 现金奖励金额 '''\n\n id = db.Column(Integer, primary_key=True)\n amount = Column(Integer, default=0)\n sent = Column(Integer, default=0)\n total = Column(Integer, default=0)\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n amount = self.amount,\n sent = self.sent,\n total = self.total\n )\n\n\n\nclass RdDrawCounter(Model):\n ''' 现金奖励抽奖计数 '''\n\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'))\n used = Column(Integer, default=0)\n total = Column(Integer, default=0)\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n user_id = self.user_id,\n used = self.used,\n total = self.total\n )\n\n\nclass RdDrawCounterLog(Model):\n ''' 现金奖励抽奖机会变更历史 '''\n\n id = db.Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.id'))\n count = Column(Integer, default=0)\n source = Column(TINYINT(1), nullable=False, default=1) #1额度申请 2邀请 3完成订单\n create_time = Column(DateTime, default=dt_obj.now)\n\n def as_dict(self):\n return dict(\n id = self.id,\n user_id = self.user_id,\n count = self.count,\n source = self.source\n )\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6411543488502502, "alphanum_fraction": 0.6900878548622131, "avg_line_length": 27.464284896850586, "blob_id": "119fd1f0e386a8cd23281ba5abc314a3734a7595", "content_id": "e22ee8dcec78ed99cb6aad79f08f92b4338031a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 797, "license_type": "no_license", "max_line_length": 101, "num_lines": 28, "path": "/migrations/versions/5adc2c5e2c4f_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 5adc2c5e2c4f\nRevises: 498586bf16c2\nCreate Date: 2016-03-03 13:59:11.264954\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '5adc2c5e2c4f'\ndown_revision = '498586bf16c2'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('promoter', sa.Column('unfollow_count', sa.Integer(), nullable=True))\n op.create_index(op.f('ix_promoter_unfollow_count'), 'promoter', ['unfollow_count'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_promoter_unfollow_count'), table_name='promoter')\n op.drop_column('promoter', 'unfollow_count')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6757000684738159, "alphanum_fraction": 0.6766034364700317, "avg_line_length": 33.625, "blob_id": "79b9fbefb9532e51aeb8708ada2e9c800be0ba61", "content_id": "3badf32869fa45819e92966e37766f80499822e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1107, "license_type": "no_license", "max_line_length": 108, "num_lines": 32, "path": "/util/sqlerr.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport re\n\n\nSQL_DUPLICATE = re.compile(r\"Duplicate entry .*? for key\")\n_DUPLICATE_PRIMARY = re.compile(r\"Duplicate entry '.*?' for key 'PRIMARY'\")\nclass RegDup(object):\n @staticmethod\n def search(string):\n return bool(SQL_DUPLICATE.search(string)) and not(bool(_DUPLICATE_PRIMARY.search(string)))\n\nSQL_REF_NOT_EXIST_ERR = re.compile(\"a foreign key constraint fails\")\n\nSQL_DUPLICATE_ENTRY = RegDup\n\nSQL_MONEY_NOT_ENOUGH = re.compile('BIGINT UNSIGNED value is out of range in')\n\nSQL_DUPLICATE_NAME = re.compile(r\"Duplicate entry '.*?' for key 'name'\")\n\nSQL_DUPLICATE_PHONE = re.compile(r\"Duplicate entry '.*?' for key 'phone'\")\n\nSQL_DUPLICATE_WECHAT = re.compile(r\"Duplicate entry '.*?' for key 'wx_id'\")\n\nSQL_DUPLICATE_BIND_WECHAT = re.compile(r\"with identity key\")\n\n\nSQL_DUPLICATE_ORDER_NO = re.compile(r\"Duplicate entry '.*?' for key 'order_no'\")\n\nSQL_DUPLICATE_COUPON = re.compile(r\"Duplicate entry '.*?' for key 'coupon_id'\")\n\n\nSQL_REF_COUPON_NOT_EXIST = re.compile(\"a foreign key constraint fails .*? FOREIGN KEY \\(\\`coupon_id\")" }, { "alpha_fraction": 0.6128048896789551, "alphanum_fraction": 0.7042682766914368, "avg_line_length": 24.230770111083984, "blob_id": "c79a5ac9f7cd1c82ef4266b49e607eaf8810b25c", "content_id": "9df8d6106b7af00ee1789d48ac0d27d46c00e401", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 656, "license_type": "no_license", "max_line_length": 92, "num_lines": 26, "path": "/migrations/versions/75f96105f81_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 75f96105f81\nRevises: 41e40e694b32\nCreate Date: 2015-11-27 15:04:57.429923\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '75f96105f81'\ndown_revision = '41e40e694b32'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('repayment', 'data')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('repayment', sa.Column('data', mysql.VARCHAR(length=1000), nullable=True))\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5640944242477417, "alphanum_fraction": 0.5656043887138367, "avg_line_length": 35.26224899291992, "blob_id": "fb362206a1ee955b61cfdbbe36a24a7e77ea7a33", "content_id": "cbb483d0b5ab6ca20b660039c6b37cc9766d05f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12875, "license_type": "no_license", "max_line_length": 117, "num_lines": 347, "path": "/ops/order.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport json\nfrom collections import defaultdict\nfrom sqlalchemy import and_\n\nfrom models import db\nfrom models import Order\nfrom models import UserCoupon\nfrom models import UserCredit\nfrom models import ServiceCode\nfrom models import OrderLog\nfrom models import Repayment\nfrom models import Repayment\nfrom models import PayLogOrderNo\nfrom models import PeriodPayLog\nfrom util.utils import random_str\nfrom util.utils import random_no\nfrom util.utils import get_time_str_from_dt\nfrom util.utils import dt_obj\nfrom util.utils import format_price\nfrom util.sqlerr import SQL_DUPLICATE\nfrom util.sqlerr import SQL_DUPLICATE_ORDER_NO\nfrom util.sqlerr import SQL_DUPLICATE_COUPON\nfrom util.sqlerr import SQL_REF_COUPON_NOT_EXIST\nfrom ops.utils import get_page\nfrom ops.utils import get_items\nfrom ops.utils import count_items\nfrom constants import ORDER_STATUS\nfrom constants import SERVICE_STATUS\n\n\nclass OrderService(object):\n ''' '''\n @staticmethod\n def add_order(user_id, item_id, hospital_id, \\\n price, credit_amount, total_fee, coupon_amount, total, \\\n credit_choice_id, user_coupon_id, order_no, credit_verified, \\\n status=ORDER_STATUS.NEW_ORDER):\n try:\n coupon_id = user_coupon_id or None #外键约束 不能为0\n credit_choice_id = credit_choice_id or None\n order = Order(\n total_fee = total_fee,\n user_id = user_id,\n item_id = item_id,\n hospital_id = hospital_id,\n total = total,\n credit_choice_id = credit_choice_id,\n coupon_id = coupon_id,\n order_no = order_no,\n credit_amount = credit_amount,\n price = price,\n status = status,\n coupon_amount = coupon_amount,\n credit_verified = credit_verified\n )\n db.session.add(order)\n db.session.commit()\n return order.id\n except Exception as e:\n db.session.rollback()\n import traceback\n traceback.print_exc()\n if SQL_DUPLICATE_ORDER_NO.search(str(e)):\n print 'duplicate order no'\n assert 0, '服务器忙'\n\n @staticmethod\n def update_order(where, commit=True, **kw):\n count = Order.query.filter(where).update(kw, synchronize_session=False)\n db.session.commit()\n return count\n\n @staticmethod\n def get_user_order(order_id, user_id):\n query = and_(\n Order.id==order_id,\n Order.user_id==user_id\n )\n return Order.query.filter(query).first()\n\n @staticmethod\n def create_servicecode(order_id):\n random_code = random_str()\n service_code = ServiceCode.query.filter(ServiceCode.code==random_code).first()\n while service_code:\n random_code = random_str()\n service_code = ServiceCode.query.filter(ServiceCode.code==random_code).first()\n try:\n service = ServiceCode(order_id=order_id, code=random_code)\n db.session.add(service)\n db.session.commit()\n return random_code\n except Exception as e:\n db.session.rollback()\n\n @staticmethod\n def get_servicecode(order_id):\n return ServiceCode.query.filter(ServiceCode.order_id==order_id).first()\n\n @staticmethod\n def get_paged_orders(**kw):\n return get_page(Order, {}, **kw)\n\n @staticmethod\n def get_orders(where):\n ''' 订单列表 '''\n return Order.query.filter(where).all()\n @staticmethod\n def create_no():\n ''' 随机生成订单号 第12位插入一个'''\n now = dt_obj.now()\n timestr = get_time_str_from_dt(now, format='%Y%m%d%H%M%S%f')\n random_number = random_no(4)\n print now, timestr, random_number\n return timestr[:12] + random_number + timestr[12:]\n\n @staticmethod\n def get_order_by_orderno(order_no):\n ''' '''\n return Order.query.filter(Order.order_no==order_no).first()\n\n @staticmethod\n def update_order_status(order_id, status, user_id=None, where=None):\n query = and_()\n query.append(Order.id==order_id)\n if user_id: query.append(Order.user_id==user_id)\n if where is not None: query.append(where)\n count = Order.query.filter(query).update({'status':status},synchronize_session=False)\n if count:\n log = OrderLog(order_id=order_id, status=status)\n db.session.add(log)\n db.session.commit()\n return count\n\n @staticmethod\n def repayment(user_id, pay_method, coupon_id, price, data, order_no):\n try:\n repayment = Repayment(\n pay_method=pay_method, coupon_id=coupon_id,\n user_id=user_id, price=price, order_no=order_no,\n data=data)\n db.session.add(repayment)\n db.session.commit()\n return repayment.id\n except Exception as e:\n print 'except'\n print str(e)\n db.session.rollback()\n if SQL_REF_COUPON_NOT_EXIST.search(str(e)):\n print '优惠券不存在'\n elif SQL_DUPLICATE_ORDER_NO.search(str(e)):\n print '订单号已存在'\n elif SQL_DUPLICATE_COUPON.search(str(e)):\n print '优惠券已被使用'\n\n @staticmethod\n def update_repayment(where, **kw):\n ''' 更新还款单状态 '''\n count = Repayment.query.filter(where).update(kw, synchronize_session=False)\n db.session.commit()\n return count\n\n @staticmethod\n def book_surgery(order_id, book_time):\n ''' 预约时间手术 '''\n query = and_(\n ServiceCode.order_id==order_id,\n ServiceCode.status==SERVICE_STATUS.STANDBY\n )\n data = {\n 'status' : SERVICE_STATUS.BOOKED,\n 'book_time' : book_time\n }\n count = ServiceCode.query.filter(query).update(data)\n db.session.commit()\n return count\n\n @staticmethod\n def cancel_book(order_id):\n ''' 取消预约 '''\n query = and_(\n ServiceCode.order_id==order_id,\n ServiceCode.status==SERVICE_STATUS.BOOKED\n )\n data = {\n 'status' : SERVICE_STATUS.STANDBY,\n }\n count = ServiceCode.query.filter(query).update(data)\n db.session.commit()\n return count\n\n @staticmethod\n def verify_servicecode(order_id, service_code):\n ''' 验证服务码 确认手术 '''\n query = and_(\n ServiceCode.order_id==order_id,\n ServiceCode.code==service_code,\n ServiceCode.status==SERVICE_STATUS.BOOKED\n )\n count = ServiceCode.query.filter(query).update({'status':SERVICE_STATUS.VERIFYED})\n db.session.commit()\n if count:\n print '确认手术'\n else:\n print '服务码找不到'\n return count\n\n @staticmethod\n def cancel_surgery(order_id):\n ''' 取消手术 '''\n query = and_(\n ServiceCode.order_id==order_id,\n ServiceCode.status==SERVICE_STATUS.VERIFYED\n )\n count = ServiceCode.query.filter(query).update({'status':SERVICE_STATUS.BOOKED})\n db.session.commit()\n if count:\n print '已取消手术'\n else:\n print '服务码找不到'\n return count\n\n @staticmethod\n def get_user_repayment(repayment_id, user_id):\n query = and_(\n Repayment.id==repayment_id,\n Repayment.user_id==user_id\n )\n repayment = Repayment.query.filter(query).first()\n return repayment\n\n @staticmethod\n def get_repayment_by_orderno(order_no):\n query = and_(\n Repayment.order_no==order_no\n )\n repayment = Repayment.query.filter(query).first()\n return repayment\n\n @staticmethod\n def count_order(where=None):\n return count_items(Order, where=where)\n\n @staticmethod\n def get_order_by_id(order_id):\n order = Order.query.filter(Order.id==order_id).first()\n return order\n\n @staticmethod\n def get_service_codes_by_order_ids(order_ids):\n ''' '''\n rows = ServiceCode.query.filter(ServiceCode.order_id.in_(order_ids)).all()\n return {i.order_id:i.status for i in rows}\n\n @staticmethod\n def get_servicecodes_by_order_ids(order_ids, **kw):\n rows = ServiceCode.query.filter(ServiceCode.order_id.in_(order_ids)).all()\n return [i.as_dict() for i in rows]\n\n @staticmethod\n def get_orders_by_ids(order_ids):\n ''' 返回 '''\n return get_items(Order, order_ids)\n\n @staticmethod\n def add_repayment_log(period_pay_log_id, price, total, order_no):\n try:\n log = PayLogOrderNo(period_pay_log_id=period_pay_log_id, price=price, total=total, order_no=order_no)\n db.session.add(log)\n db.session.commit()\n return log.id\n except Exception as e:\n import traceback\n traceback.print_exc()\n db.session.rollback()\n if SQL_DUPLICATE.search(str(e)):\n assert 0, '分期{}已还{}'.format(period_pay_log_id, price)\n\n @staticmethod\n def gen_repayment_log(repayment):\n ''' 还款ID '''\n log_list = json.loads(repayment.data)\n print log_list, 'log_list'\n for data in log_list:\n print data,'...'\n period_pay_log_id = data['id']\n amount = data['amount']\n fee = data['fee']\n punish = data['punish']\n #total = format_price(float(amount)+float(fee or 0)+float(punish or 0))\n price = format_price(float(amount)+float(fee or 0))\n OrderService.add_repayment_log(period_pay_log_id, price, repayment.price, repayment.order_no)\n\n @staticmethod\n def order_repayment_logs_amount(order_id):\n ''' 已还的总额 '''\n subquery = db.session.query(PeriodPayLog.id).filter(PeriodPayLog.order_id==order_id).subquery()\n logs = PayLogOrderNo.query.filter(PayLogOrderNo.period_pay_log_id.in_(subquery)).all()\n return sum(log.price for log in logs)\n\n @staticmethod\n def get_order_repayment_logs_amount(order_id):\n ''' 所有已还的总额按订单划分 '''\n subquery = db.session.query(PeriodPayLog.id).filter(PeriodPayLog.order_id==order_id).subquery()\n logs = PayLogOrderNo.query.filter(PayLogOrderNo.period_pay_log_id.in_(subquery)).all()\n\n order_no_map = defaultdict(lambda:0)\n order_no_total_map = {}\n for log in logs:\n order_no_total_map[log.order_no] = format_price(log.total)\n order_no_map[log.order_no] += format_price(log.price)\n\n data = {}\n for order_no, price in order_no_map.items():\n repayment = Repayment.query.filter(Repayment.order_no==order_no).first()\n assert repayment, '还款不存在'\n data[order_no] = {\n 'price': format_price(price),\n 'pay_method': repayment.pay_method, \n 'total': order_no_total_map[order_no],\n 'transaction_id': repayment.transaction_id\n }\n return data\n\n @staticmethod\n def get_order_by_coupon_id(coupon_id):\n ''' '''\n return Order.query.filter(Order.coupon_id==coupon_id).first()\n\ndef set_order_status(order, comment=None, servicecode=None):\n ''' 根据服务码状态 是否已评论重新订单状态 '''\n if order['user_finished']:\n order['status'] = ORDER_STATUS.FINISH\n elif order['status']==ORDER_STATUS.FINISH:\n order['status'] = ORDER_STATUS.PAY_SUCCESS\n if order['credit_verified']==0 and order['status'] in [ORDER_STATUS.PAY_SUCCESS]:\n order['status'] = ORDER_STATUS.VERIFYING\n elif order['credit_verified']==2:\n order['status'] = ORDER_STATUS.REJECTED\n elif order['status']==ORDER_STATUS.PAY_SUCCESS:\n if servicecode['status'] == 1:\n order['status'] = ORDER_STATUS.BOOKED\n elif servicecode['status'] == 2:\n order['status'] = ORDER_STATUS.CONFIRMED\n elif order['status'] == ORDER_STATUS.FINISH and not comment:\n order['status'] = ORDER_STATUS.TO_COMMENT\n" }, { "alpha_fraction": 0.6523722410202026, "alphanum_fraction": 0.6770073175430298, "avg_line_length": 29.44444465637207, "blob_id": "924fa3e5cb0c02a53004669c855900f78e586800", "content_id": "c562b496bb2ef6a68de3ef75fece2321bc9a414b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1096, "license_type": "no_license", "max_line_length": 72, "num_lines": 36, "path": "/migrations/versions/2b3331ab4b9d_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 2b3331ab4b9d\nRevises: f1412ee78a9\nCreate Date: 2015-12-05 10:28:51.755265\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2b3331ab4b9d'\ndown_revision = 'f1412ee78a9'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('promoter',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('phone', sa.String(length=20), nullable=True),\n sa.Column('passwd', sa.String(length=50), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.Column('create_by', sa.Integer(), nullable=False),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.ForeignKeyConstraint(['create_by'], ['promoter.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('phone')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('promoter')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6383190155029297, "alphanum_fraction": 0.6611515283584595, "avg_line_length": 39.83783721923828, "blob_id": "aa554ae232a809fb2ce5249ecace591f6045ace6", "content_id": "cd99ff36eda99829051642e755a34c7e1bdc853b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3022, "license_type": "no_license", "max_line_length": 72, "num_lines": 74, "path": "/migrations/versions/3d0882a6044_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3d0882a6044\nRevises: 31291b2ba259\nCreate Date: 2016-01-26 14:39:20.133527\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3d0882a6044'\ndown_revision = '31291b2ba259'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('room_design_apply',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('school_id', sa.Integer(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('room_name', sa.String(length=30), nullable=True),\n sa.Column('applyer_name', sa.String(length=30), nullable=True),\n sa.Column('phone', sa.String(length=30), nullable=True),\n sa.Column('addr', sa.String(length=30), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['school_id'], ['school.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('room_name')\n )\n op.create_table('room_design_vote_privilege',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('status', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('source', mysql.TINYINT(display_width=1), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('room_design_detail',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('school_id', sa.Integer(), nullable=True),\n sa.Column('room_id', sa.Integer(), nullable=True),\n sa.Column('pics', sa.String(length=500), nullable=True),\n sa.Column('vote_count', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['room_id'], ['room_design_apply.id'], ),\n sa.ForeignKeyConstraint(['school_id'], ['school.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('room_design_vote_log',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('room_id', sa.Integer(), nullable=True),\n sa.Column('vote_count', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['room_id'], ['room_design_apply.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('room_design_vote_log')\n op.drop_table('room_design_detail')\n op.drop_table('room_design_vote_privilege')\n op.drop_table('room_design_apply')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6254237294197083, "alphanum_fraction": 0.6644067764282227, "avg_line_length": 21.69230842590332, "blob_id": "47e444c3ac27dc329bc7be4461f983227010bc41", "content_id": "37bd849b4b588273665e5ac73899760ce16a9616", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 590, "license_type": "no_license", "max_line_length": 83, "num_lines": 26, "path": "/migrations/versions/a123ae998bf_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: a123ae998bf\nRevises: 36d5b6be1479\nCreate Date: 2015-11-11 17:01:19.461450\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'a123ae998bf'\ndown_revision = '36d5b6be1479'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('item', sa.Column('image', sa.String(length=300), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('item', 'image')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6653845906257629, "avg_line_length": 31.5, "blob_id": "6430d612d6842358f6daa593db7edaedba98a515", "content_id": "903782f38b3462ac2f381b57f143177f2af55c1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1040, "license_type": "no_license", "max_line_length": 86, "num_lines": 32, "path": "/migrations/versions/569e3d7f70ab_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 569e3d7f70ab\nRevises: 5784ac6510c3\nCreate Date: 2015-12-10 10:39:57.648906\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '569e3d7f70ab'\ndown_revision = '5784ac6510c3'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('coupon', sa.Column('sub_cat_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'coupon', 'item_sub_cat', ['sub_cat_id'], ['id'])\n op.add_column('user_coupon', sa.Column('sub_cat_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'user_coupon', 'item_sub_cat', ['sub_cat_id'], ['id'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'user_coupon', type_='foreignkey')\n op.drop_column('user_coupon', 'sub_cat_id')\n op.drop_constraint(None, 'coupon', type_='foreignkey')\n op.drop_column('coupon', 'sub_cat_id')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6337115168571472, "alphanum_fraction": 0.67423015832901, "avg_line_length": 22.730770111083984, "blob_id": "64b927cf026893debab7220607d43c9f30d61790", "content_id": "1342dffcdbff33fcfc986cd45ab4a4bea5f223aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 617, "license_type": "no_license", "max_line_length": 88, "num_lines": 26, "path": "/migrations/versions/4a4cc4517bb_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4a4cc4517bb\nRevises: 4cf4f86adc0c\nCreate Date: 2015-11-11 14:04:48.035474\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4a4cc4517bb'\ndown_revision = '4cf4f86adc0c'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('activity_item', sa.Column('sort_order', sa.Integer(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('activity_item', 'sort_order')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6240409016609192, "alphanum_fraction": 0.6930946111679077, "avg_line_length": 26.928571701049805, "blob_id": "beb5cff21631e815956b8ab849bb3e8668f3eecd", "content_id": "4cff4de0b4c93966831419f29903baa3779886ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 782, "license_type": "no_license", "max_line_length": 99, "num_lines": 28, "path": "/migrations/versions/29347d4f2522_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 29347d4f2522\nRevises: 2ab4005efb6c\nCreate Date: 2016-01-27 17:27:42.642697\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '29347d4f2522'\ndown_revision = '2ab4005efb6c'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('room_design_detail', sa.Column('apply_no', sa.String(length=30), nullable=True))\n op.create_unique_constraint(None, 'room_design_detail', ['apply_no'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'room_design_detail', type_='unique')\n op.drop_column('room_design_detail', 'apply_no')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5406004190444946, "alphanum_fraction": 0.5467519760131836, "avg_line_length": 27.822694778442383, "blob_id": "57a090b393cbce2d1838287ef83466e32b612ca9", "content_id": "aa69201f7dd5ecf92511fd23bc698d3feec89e5b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4210, "license_type": "permissive", "max_line_length": 100, "num_lines": 141, "path": "/static/user/js/my-not-reg.js", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "$(document).ready(function (){\n\tif(getCookie('sign_user')){\n\t\t$('.Log').hide();\n\t\t$('#my').show();\n\t\t$('.notDone').hide()\n\t\t$('.done').show();\n\t\t$('#HK').attr('href','/static/user/repay.html');\n\t\t$('#WD').attr('href','/static/user/order-list.html');\n\t\t$('#DJQ').attr('href','/static/user/coupon.html');\n\t\t$('#XYD').attr('href','/static/user/wishOrder.html');\n\t\t$('#login').attr('href','/static/user/login.html')\n\t\t$('#reg').attr('href','/user/signup/')\n\t\t//\n\t\t$.ajax({\n\t\t\txhrFields: {withCredentials: true},\n\t\t\ttype: \"post\",\n\t\t\turl: \"http://\"+getHostName()+\"/user/home/?\",\n\t\t\tdataType: 'json',\n\t\t\tdata: {\n\t\t\t\t\n\t\t\t},\n\t\t\tsuccess: function(data) {\n\t\t\t\t$('.ky').html(data.remain);\n\t\t\t\t$('.zje').html(data.total)\n\t\t\t\tif(data.code==1) {\n\t\t\t\t\t$('.my-text-cont').show();\n\t\t\t\t\t$('#reimbursement').html('');\n\t\t\t\t\t$('.Log').show();\n\t\t\t\t\t$('#my').hide();\n\t\t\t\t\t$('.notDone').show()\n\t\t\t\t\t$('.done').hide()\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\tvar user = data.user;\n\t\t\t\t$('#my').find('img').attr('src', user.avatar);\n\t\t\t\t$('#my').find('span').html(user.name);\n\t\t\t\t$('#my').find('p').html('手机号:' + user.phone);\n\t\t\t\t//#0默认 1审核中 2审核通过 3被拒\n\t\t\t\tif(data.apply_status==0){\n\t\t\t\t\t$('.status').html('预计申请额度¥10000');\n\t\t\t\t\t$('.circle-btn').attr('href','/static/user/applyer-infor.html');\n\t\t\t\t}else if(data.apply_status==1){\n\t\t\t\t\t$('.status').html('审核中');\n\t\t\t\t\t$('.circle-btn').attr('href','http://www.meifenfen.com/user/menu_credit_apply/')\n\t\t\t\t\t$('.circle-btn').html('查看申请')\n\t\t\t\t}else if(data.apply_status==2){\n\t\t\t\t\t$('.status').html('¥'+data.remain);\n\t\t\t\t\t$('.circle-btn').hide();\n\t\t\t\t\t$('.my-text-cont').hide();\n\t\t\t\t\t$('#amount_parent').show();\n\t\t\t\t}else if(data.apply_status==3){\n\t\t\t\t\t$('.status').html('审核没通过,请重新申请');\n\t\t\t\t\t$('.circle-btn').attr('href','http://www.meifenfen.com/user/menu_credit_apply/')\n\t\t\t\t\t$('.circle-btn').html('查看申请')\n\t\t\t\t}\n\t\t\t\t$('#lines').html(data.total);\n\t\t\t\t$('#vouchers').html(data.coupon_count + '张');\n\t\t\t\tif (data.period_to_pay==0) {\n\t\t\t\t\t$('#reimbursement').html('');\n\t\t\t\t}else{\n\t\t\t\t if(data.has_delayed) {\n $('#reimbursement').html('本期应还余额:¥' + data.period_to_pay + '(包含已逾期部分)');\n\t\t\t\t } else {\n\t\t\t\t\t $('#reimbursement').html('本期应还余额:¥' + data.period_to_pay + '(剩余' + data.remain_days + '天)');\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tif (data.can_edit_name) {\n\t\t\t\t\t$('#editor').show()\n\t\t\t\t} else {\n\t\t\t\t\t$('#editor').hide()\n\t\t\t\t}\n\t\t\t},\n\t\t\terror: function() {\n\t\t\t\t\n\t\t\t}\t\t\t\t\n\t\t})\n}else{\n$('.Log').show();\n$('#my').hide();\n$('.notDone').show();\n$('.done').hide();\n$('#HK').attr('href','/static/user/login.html?next='+location.href);\n$('#WD').attr('href','/static/user/login.html?next='+location.href);\n$('#DJQ').attr('href','/static/user/login.html?next='+location.href);\n$('#XYD').attr('href','/static/user/login.html?next='+location.href);\n\nif(Common.UrlGet()['next']) {\n $('#login').attr('href','/static/user/login.html?next='+Common.UrlGet()['next'])\n} else {\n $('#login').attr('href','/static/user/login.html?next='+location.href)\n\n}\n$('#reg').attr('href','/user/signup/?next='+location.href)\n\n};\n//上传图片\nvar upload = function(_file,btn){\n\n\tif(_file.files.length === 0){\n\t\treturn;\n\t} \t\n\tvar data = new FormData();\n\tdata.append('file', _file.files[0]);\n\tdata.append('image_cat', 'avatar')\n\tvar request = new XMLHttpRequest();\n\trequest.onreadystatechange = function(){\n\t\tif(request.readyState == 4){\n\t\t\ttry {window.r = request.response;\n\t\t\t\tvar resp = JSON.parse(request.response);\n\t\t\t\t$('.mask').hide();\n\t\t\t\tbtn.attr('src', resp.fullpath)\n\t\t\t\t$(_file).attr('path',resp.image)\n\t\t\t} catch (e){\n\t\t\t\tvar resp = {\n\t\t\t\t\tstatus: 'error',\n\t\t\t\t\tdata: 'Unknown error occurred: [' + request.responseText + ']'\n\t\t\t\t};\n\t\t\t}\n\t\t\tconsole.log(resp.status + ': ' + resp.data);\n\t\t}\n\t};\n\n\trequest.upload.addEventListener('progress', function(e){\n\t\tconsole.log(parseInt(100*e.loaded/e.total));\n//\t\t$('.mask').find('span').html(parseInt(100*e.loaded/e.total)+'%')\n\t}, false);\n\n\trequest.open('POST', 'http://'+getHostName()+'/user/upload_image/?'+token);\n\trequest.send(data);\n}\n$(\"#ding\").change(function() {\n\tupload($(this)[0], $('.photo'));\n\t$('.mask').show()\n\n});\n\n\n\n\n})\n" }, { "alpha_fraction": 0.6466346383094788, "alphanum_fraction": 0.682692289352417, "avg_line_length": 28.714284896850586, "blob_id": "ecfaec9893c5c23b9e6fce1d3ea4336422ade4ef", "content_id": "cf7c6f84ad7d8009495b013dafcc03922161f0e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 832, "license_type": "no_license", "max_line_length": 101, "num_lines": 28, "path": "/migrations/versions/498586bf16c2_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 498586bf16c2\nRevises: 3d1f1303d3e0\nCreate Date: 2016-03-03 10:54:43.656812\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '498586bf16c2'\ndown_revision = '3d1f1303d3e0'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('qr_code_user', sa.Column('status', mysql.TINYINT(display_width=1), nullable=True))\n op.create_index(op.f('ix_qr_code_user_status'), 'qr_code_user', ['status'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_qr_code_user_status'), table_name='qr_code_user')\n op.drop_column('qr_code_user', 'status')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6276422739028931, "alphanum_fraction": 0.6731707453727722, "avg_line_length": 22.653846740722656, "blob_id": "11e9e2ed43dfeb309f3f87a1b0f6dc259523d885", "content_id": "03a51da33c7f514de6b3532f55459d786f519fa5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 615, "license_type": "no_license", "max_line_length": 77, "num_lines": 26, "path": "/migrations/versions/3d20dc8132b4_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 3d20dc8132b4\nRevises: 4e224649d340\nCreate Date: 2015-12-09 16:02:14.572280\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3d20dc8132b4'\ndown_revision = '4e224649d340'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint(None, 'trial_apply', ['user_id', 'trial_id'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'trial_apply', type_='unique')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6512500047683716, "alphanum_fraction": 0.6912500262260437, "avg_line_length": 27.571428298950195, "blob_id": "961b48c856681d20fe5c26b31fa4040df80dafb3", "content_id": "4064394abfd4d67f9ac5bee5bead588c72496569", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 800, "license_type": "no_license", "max_line_length": 113, "num_lines": 28, "path": "/migrations/versions/480dd7e7caac_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 480dd7e7caac\nRevises: 59a610b5633d\nCreate Date: 2015-12-09 17:20:34.481073\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '480dd7e7caac'\ndown_revision = '59a610b5633d'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('trial', 'sex')\n op.add_column('trial_apply', sa.Column('sex', mysql.TINYINT(display_width=1), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('trial_apply', 'sex')\n op.add_column('trial', sa.Column('sex', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False))\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6326836347579956, "alphanum_fraction": 0.6335403919219971, "avg_line_length": 34.85384750366211, "blob_id": "fc96c0304264afc95e8f6100e2e36305a099540d", "content_id": "8bfaa7e0b8d14233fa4d447bd61339a10c643b41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4741, "license_type": "no_license", "max_line_length": 109, "num_lines": 130, "path": "/ops/bulks.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport time\nimport urllib\nfrom collections import defaultdict\n\nfrom models import User\nfrom ops.user import UserService\nfrom ops.item import ItemService\nfrom ops.activity import ActivityService\nfrom ops.credit import CreditService\nfrom ops.order import OrderService\nfrom ops.promote import PromoteService\nfrom ops.trial import TrialService\nfrom ops.coupon import CouponService\nfrom ops.notification import NotificationService\nfrom ops.redpack import RedpackService\nfrom ops.data import DataService\nfrom settings import ANONY_IMAGE\n\n\nnow = lambda :int(time.time())\n\n\n\ndef fetch_refs(items, id_, func=None, keep_id=False, **kw):\n refs = defaultdict(dict)\n dest_key = kw.pop('dest_key', None) or id_.replace('_id', '')\n ref_key = kw.pop('ref_key', None) or 'id'\n for item in items:\n ref_id = item.get(id_)\n item[dest_key] = refs[ref_id]\n\n ref_list = func(refs.keys(), **kw)\n for item in ref_list:\n refs[item[ref_key]].update(item)\n if not keep_id:\n #重复的关联怎么优化处理 只保留一个引用\n for item in items:\n item.pop(id_, None)\n print items\n\n\nANONYMOUS_USER = {\n 'name': '匿名用户',\n 'id': 0,\n 'avatar': ANONY_IMAGE\n }\ndef fetch_user_refs(items, func=UserService.get_users_by_ids, **kw):\n id_ = 'user_id'\n fetch_refs(items, id_, func, **kw)\n for item in items:\n if item.get('is_anonymous'):\n item['user'] = ANONYMOUS_USER\n\n\ndef fetch_item_refs(items, id_='item_id', func=ItemService.get_items_by_ids, **kw):\n fetch_refs(items, id_, func, **kw)\n\n\ndef fetch_item_cat_refs(items, id_='cat_id', func=ItemService.get_cats_by_ids, **kw):\n fetch_refs(items, id_, func, **kw)\n\n\ndef fetch_item_subcat_refs(items, id_='sub_cat_id', func=ItemService.get_subcats_by_ids, **kw):\n fetch_refs(items, id_, func, **kw)\n\n\ndef fetch_credit_refs(items, id_='user_id', func=UserService.get_credit_applies_by_ids, **kw):\n fetch_refs(items, id_, func, ref_key='user_id', **kw)\n\ndef fetch_activity_refs(items, id_='activity_id', func=ActivityService.get_activitys_by_ids, **kw): \n fetch_refs(items, id_, func, **kw)\n\n\ndef fetch_hospital_refs(items, id_='hospital_id', func=ItemService.get_hospitals_by_ids, **kw): \n fetch_refs(items, id_, func, **kw)\n\n\ndef fetch_servicecode_refrence(items, id_='order_id', func=OrderService.get_servicecodes_by_order_ids, **kw):\n fetch_refs(items, id_, func, ref_key='order_id', **kw)\n\n\ndef fetch_order_refs(items, id_='order_id', func=OrderService.get_orders_by_ids, **kw):\n fetch_refs(items, id_, func, **kw)\n\n\ndef fetch_wechatinfo_refs(items, id_='user_id', func=PromoteService.get_user_qrcodes_by_user_ids, **kw):\n fetch_refs(items, id_, func, ref_key='user_id', **kw)\n\n\ndef fetch_apply_refs(items, id_='user_id', func=TrialService.get_trial_apply_by_user_ids, **kw):\n fetch_refs(items, id_, func, ref_key='user_id', **kw)\n\n\ndef fetch_coupon_refs(items, id_='coupon_id', func=CouponService.get_coupon_by_ids, **kw):\n fetch_refs(items, id_, func, **kw)\n\n\ndef fetch_article_refs(items, id_='article_id', func=NotificationService.get_articles_by_ids, **kw):\n fetch_refs(items, id_, func, **kw)\n\ndef fetch_question_refs(items, id_='question_id', func=RedpackService.get_questions_by_ids, **kw):\n fetch_refs(items, id_, func, **kw)\n\ndef fetch_qrcodeuser_refs(items, id_='qr_user_id', func=RedpackService.get_qr_user_by_ids, **kw):\n fetch_refs(items, id_, func, **kw)\n\ndef fetch_school_refs(items, id_='school_id', func=DataService.get_schools_dict_by_ids, **kw):\n fetch_refs(items, id_, func, **kw)\n\n\ndef fetch_min_period_info(items):\n ''' 商品列表 获取最低分期价格期数 '''\n _, period_pay_choices = CreditService.get_paged_period_choices()\n choice_map = {i['id']:i for i in period_pay_choices}\n period_id_count_map = {i['id']:i['period_count'] for i in period_pay_choices}\n min_choice_id_func = lambda choices: max(choices, key=lambda i:period_id_count_map[i])\n for item in items:\n choices = item.pop('support_choice_list')\n min_choice = choice_map[min_choice_id_func(choices)] if choices else None\n if min_choice:\n period_count = min_choice['period_count']\n period_fee = min_choice['period_fee']\n price = item['price']\n period_amount = price/period_count\n item['period_count']= period_count\n item['period_money']= int(period_amount*(1+period_fee))\n else:\n item['period_count']= 1\n item['period_money']= item['price']\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6358768343925476, "alphanum_fraction": 0.6760374903678894, "avg_line_length": 25.678571701049805, "blob_id": "78744ef1dbb77da96863c839524e83bdc0eaa588", "content_id": "1cb9cf45e7ff9221766012bf55f73e4f48fe18b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 747, "license_type": "no_license", "max_line_length": 83, "num_lines": 28, "path": "/migrations/versions/57366d94ca9a_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 57366d94ca9a\nRevises: 2d7888ae13f9\nCreate Date: 2015-12-30 15:30:29.964102\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '57366d94ca9a'\ndown_revision = '2d7888ae13f9'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('hospital', sa.Column('rate', sa.Float(), nullable=True))\n op.add_column('hospital', sa.Column('sold_count', sa.Integer(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('hospital', 'sold_count')\n op.drop_column('hospital', 'rate')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.4714680314064026, "alphanum_fraction": 0.47860100865364075, "avg_line_length": 41.20388412475586, "blob_id": "f89e7a0b0329184684432579caca3faab45220ef", "content_id": "5c2a4ee107ae160bca3d8cc9b8989fb2b191406f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4484, "license_type": "permissive", "max_line_length": 126, "num_lines": 103, "path": "/static/user/js/main.js", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "$(function(){\n $.ajax({\n url: \"http://\"+getHostName()+\"/user\",\n type: 'post',\n dataType: 'json',\n success:function(data){\n\n var banner=data.banner;\t\t\t\n var activity=data.activity;\n var activity_items=data.activity_items;\n var recommend_items=data.recommend_items;\n\t\t\t// alert(recommend_items[0].item.has_fee)\n\t\t\t$('.img').remove();//先删除原先有的banner\n $('.mui-indicator').remove();\n for(var i=0;i<banner.length;i++){\n \n var item=$('<div class=\"mui-slider-item img\"><a href=\"'+banner[i].link+'\"><img src=\"'+banner[i].image+'\"></a></div>');\n var itemList=$('<div class=\"mui-indicator\"></div>');\n\n \t$('#after').after(item);//动态加载banner节点元素\n \t$('#append').append(itemList);\n \tif (i==0) {\n \t\titemList.addClass('mui-active');\n \t};\n \t$('.mui-slider-item-duplicate img').attr('src',banner[0].image);//第一个和最后一个都用第一张图片\n };\n for(var j=0;j<recommend_items.length;j++){ \n \t$('.title').eq(j).html(recommend_items[j].item.title);\n \t$('h5').eq(j).html(recommend_items[j].desc)\n \t$('.orig').eq(j).html('¥'+recommend_items[j].item.period_money);\n \t$('.price').eq(j).html('x'+recommend_items[j].item.period_count);\n \t$('.big-img').eq(j).attr('src',recommend_items[j].image);\n \tvar item_id = recommend_items[j].item.id;\n \tvar click_event = function (item_id) {\n var func = function () {\n window.location = '/static/user/detail.html?item_id=' + item_id;\n }\n return func\n }(item_id)\n $('.big-img').eq(j).click(click_event)\n };\n for(var k=0;k<activity_items.length;k++){\n $('.list').find('h4').eq(k).html(activity_items[k].item.title);\n $('.list').find('.line-throu').eq(k).html(activity_items[k].item.orig_price);\n $('.list').find('.color-red').eq(k).html(activity_items[k].price);\n var item_id = activity_items[k].item.id;\n var click_event = function (item_id) {\n var func = function () {\n window.location = '/static/user/detail.html?item_id=' + item_id;\n }\n return func\n }(item_id)\n $('.list').eq(k).click(click_event)\n if(activity_items[k].item.has_fee==false){\n $('.list').find('img').eq(k).css('display','none');\n }\n console.log($('.period').length);\n if (k>0) {\n $('.period').eq(k).html('月供:'+activity_items[k].item.period_money+'x'+activity_items[k].item.period_count)\n }else if(k==0){\n $('#yuegong').find('b').html('¥'+activity_items[k].item.period_money);\n $('#yuegong').find('span').html('x'+activity_items[k].item.period_count);\n }\n };\n //倒计时;\n console.log(activity.end_time);\n var timeList=activity.end_time.split(' ');\n var riqi=timeList[0].split('-');\n console.log(riqi[0])\n var time=riqi[1]+'/'+riqi[2]+'/'+riqi[0]+' '+timeList[1];\n console.log(time);\n //倒计时插件代码\n var end = new Date(time);\n var _second = 1000;\n var _minute = _second * 60;\n var _hour = _minute * 60;\n var _day = _hour * 24;\n var timer;\n function showRemaining() {\n var now = new Date();\n var distance = end - now;\n if (distance < 0) {\n clearInterval(timer);\n // document.getElementById('countdown').innerHTML = 'EXPIRED!';\n $('#at').html('活动已经结束');\n return;\n }\n var days = Math.floor(distance / _day);\n var hours = Math.floor((distance % _day) / _hour);\n var minutes = Math.floor((distance % _hour) / _minute);\n var seconds = Math.floor((distance % _minute) / _second);\n $('#at').html('仅剩'+days + '天'+hours+'时'+minutes+'分'+seconds+'秒')\n console.log() ;\n }\n timer = setInterval(showRemaining, 1000);\n\n },\n error:function(){\n// alert('网络出现小差,请稍后再试');\n }\n\n })\n});" }, { "alpha_fraction": 0.4959193468093872, "alphanum_fraction": 0.5088814496994019, "avg_line_length": 24.048192977905273, "blob_id": "ea0e823355a3672ae7cc54810efde3c2f4e225a6", "content_id": "14b239782e3a3f9f3be154c34916dae5ea90a954", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 2157, "license_type": "no_license", "max_line_length": 120, "num_lines": 83, "path": "/static/admin/tpl/chsi_captcha.html", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\n\n\n\n<div class=\"dialog-contents\" draggable=\"true\" id=\"drag_me\" draggable>\n <form class='modal-form' ng-submit=\"Ok()\" >\n <div class='custom-modal-header'>\n <h4>输入验证码</h4>\n </div>\n\n <div class=\"modal-body\" ng-show='showCaptcha'>\n <img ng-src='{{captcha_img}}'></img><i class=\"captch-refresh-btn fa fa-refresh\" ng-click='refreshCaptcha()'></i>\n <br>\n <p>\n <input class=\"form-control\" placeholder=\"请输入验证码\" ng-model='captcha' required/>\n </p>\n </div>\n \n <div class=\"modal-body\" ng-show='chsi_info'>\n <p>\n <label>姓名</label> {{chsi_info.name}}\n <br />\n <label>学制</label> {{chsi_info.years}}\n <br />\n <label>学历</label> {{chsi_info.grade}}\n <br />\n <label>专业</label> {{chsi_info.major}}\n <br />\n <label>学校</label> {{chsi_info.school}}\n <br />\n <label>身份证号</label> {{chsi_info.id_no}}\n <br />\n <label>入学时间</label> {{chsi_info.enroll_time}}\n <br />\n <label>毕业时间</label> {{chsi_info.graduate_time}}\n </p>\n </div>\n <div class='modal-footer'>\n <button type=\"button\" ng-click=\"closeThisDialog()\" class=\"btn btn-default\" data-dismiss=\"modal\">关闭</button>\n <button type=\"submit\" ng-show='showCaptcha' class=\"btn btn-danger\">确认</button>\n </div>\n </form>\n</div>\n\n<style>\n\n.captch-refresh-btn {\n font-size: 30px;\n}\n\n.custom-modal-header {\n padding: 5px;\n border-bottom: 1px solid #e5e5e5;\n}\n\n.modal-body label {\n width: 150px;\n}\n\n.modal-form {\n padding: 0px;\n margin: 0px;\n}\n\n.ngdialog-content {\n overflow: hidden;\n width: 500px;\n height: auto;\n margin: auto;\n top: 0;\n bottom: 0;\n left: 0;\n right: 0;\n border-radius: 5px;\n position: absolute;\n display: table;\n}\n\n.ngdialog-close {\n position: absolute;\n top: 0px;\n color: gray;\n right: 10px;\n font-size: 30px;\n}\n\n</style>\n" }, { "alpha_fraction": 0.6448863744735718, "alphanum_fraction": 0.7017045617103577, "avg_line_length": 26.076923370361328, "blob_id": "b4787f8f28371c306fedecdd7b120fc1cfb108d5", "content_id": "fffdac32cb67ab22dfb4e95ce7f940def6a593e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 704, "license_type": "no_license", "max_line_length": 135, "num_lines": 26, "path": "/migrations/versions/32ca0414826f_.py", "repo_name": "qsq-dm/mff", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 32ca0414826f\nRevises: 29bbb2cfc971\nCreate Date: 2016-01-28 11:49:27.884628\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '32ca0414826f'\ndown_revision = '29bbb2cfc971'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('room_design_vote_log', 'vote_count')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('room_design_vote_log', sa.Column('vote_count', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))\n ### end Alembic commands ###\n" } ]
191
drkwdck/ls
https://github.com/drkwdck/ls
e1a9ac966f4b88da8b70c2bd9bd250e5f8d01867
f2c03e6f5a7af6d6239dfa05007d46a4666c2e56
5ad0041b2c21104c5053a39b94be5ce30513c851
refs/heads/master
2022-12-15T11:23:25.134839
2020-09-09T18:16:14
2020-09-09T18:16:14
262,844,133
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7112069129943848, "alphanum_fraction": 0.7155172228813171, "avg_line_length": 32.14285659790039, "blob_id": "c73fc23635ec2155aa76d9abfb7b0650159e2f8f", "content_id": "0a6c0da825042a6d8dd7c4e347b8b060cdc4ed96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 232, "license_type": "no_license", "max_line_length": 80, "num_lines": 7, "path": "/README.md", "repo_name": "drkwdck/ls", "src_encoding": "UTF-8", "text": "# ls.py\nThe ls.py will show directory-listing. It is cross-platform command line tool. \n# Example\n![alt sreenshot](/public/1.png)\n# Usage\nTo get more information about a certain command, you can use \n<code>$ ./ls.py --help</code>\n" }, { "alpha_fraction": 0.5394449234008789, "alphanum_fraction": 0.5476871132850647, "avg_line_length": 30.62234115600586, "blob_id": "dd4c0f1d2cacae6eb416e16e46407a0af1eafbed", "content_id": "08e880692397990dea9796c12a864c96fed16e69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5945, "license_type": "no_license", "max_line_length": 111, "num_lines": 188, "path": "/ls.py", "repo_name": "drkwdck/ls", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom optparse import OptionParser\nimport os\nimport locale\nimport datetime\n\n\nlocale. setlocale(locale.LC_ALL, '')\n\n\ndef args_parse(args):\n parsed_args =[]\n if not args:\n parsed_args = \".\"\n elif args == \"..\":\n parsed_args.append(args)\n else:\n for arg in args:\n if arg.endswith(\"/\") or arg.endswith(\"\\\\\"):\n arg = arg[:-1]\n parsed_args.append(arg)\n return parsed_args\n\n\ndef file_list_create(options):\n if options.recursive:\n def get_file_list(args):\n if 'win' in os.uname().sysname.lower():\n slash = '\\\\'\n else:\n slash = '/'\n folder = []\n file_list = []\n for arg in args:\n for i in os.walk(arg):\n folder.append(i)\n for address, sub_dirs, files in folder:\n for file in files:\n file_list.append(address + slash + file)\n return file_list\n else:\n def get_file_list(args):\n if 'win' in os.uname().sysname.lower():\n slash = '\\\\'\n else:\n slash = '/'\n file_list = []\n for arg in args:\n folders = []\n for i in os.walk(arg):\n folders.append(i)\n folder = folders[0]\n adress, dirs, files = folder\n for dir in dirs:\n file_list.append(adress + slash + dir + slash)\n for file in files:\n file_list.append(adress + slash + file)\n return file_list\n return get_file_list\n\n\ndef file_hidden(files, options):\n if not options.hidden:\n for file_path in files[::]:\n file = file_path.split(\"/\")[-1]\n #if ls is working in windows\n file = file.split(\"\\\\\")[-1]\n if file.startswith(\".\"):\n files.remove(file_path)\n return files\n\n\ndef modified_option(files, options):\n files_data = {}\n if options.modified:\n for path_to_file in files:\n if 'win' in os.uname().sysname.lower():\n files_data[path_to_file] = os.path.getctime(path_to_file)\n else:\n mtime = os.path.getmtime(path_to_file)\n files_data[path_to_file] = datetime.datetime.fromtimestamp(mtime).strftime(\"%d %b %Y %H:%M:%S\")\n else:\n for path_to_file in files:\n files_data[path_to_file] = ''\n return files_data\n\n\ndef get_size(files_data):\n for file_path in files_data:\n if file_path.endswith(\"\\\\\") or file_path.endswith(\"/\"):\n continue\n # size in b\n size = os.path.getsize(file_path)\n formatted_size = locale.format(\"%d\", size)\n if size > 9999999999:\n formatted_size = formatted_size[:-2] + \"...\"\n files_data[file_path] = \"{0} {1:>10}\".format(files_data[file_path], formatted_size)\n\n\ndef programs_out(files_data, options):\n directory_counter = 0\n files_counter = 0\n keys = get_sorted_dict_keys(files_data, options)\n for key in keys:\n if options.size and options.modified:\n print(\"{0:32} {1:20}\".format(files_data[key], key))\n elif options.modified:\n print(\"{0:21} {1:20}\".format(files_data[key], key))\n elif options.size:\n print(\"{0:10} {1:20}\".format(files_data[key], key))\n else:\n print(key)\n if key.endswith(\"\\\\\") or key.endswith(\"/\"):\n directory_counter += 1\n else:\n files_counter += 1\n if files_counter == 1:\n file_word = \"file\"\n else:\n file_word = \"files\"\n if directory_counter == 1:\n directory_word = \"directory\"\n else:\n directory_word = \"directories\"\n print(\"{} {}, {} {}\".format(files_counter, file_word, directory_counter, directory_word))\n\n\ndef get_size_to_path(path):\n if 'win' in os.uname().sysname.lower():\n slash = '\\\\'\n else:\n slash = '/'\n if path.endswith(slash) or path.endswith(\".\"):\n folder = []\n file_list = []\n for i in os.walk(path):\n folder.append(i)\n for address, sub_dirs, files in folder:\n for file in files:\n file_list.append(address + slash + file)\n size = 0\n for file in file_list:\n size += os.path.getsize(file)\n return size\n return os.path.getsize(path)\n\n\n\ndef get_file_dir_name(file_path):\n if 'win' in os.uname().sysname.lower():\n slash = '\\\\'\n else:\n slash = '/'\n if file_path.endswith(slash):\n file_path = file_path[:-1]\n return file_path.split(slash)[-1]\n\ndef get_sorted_dict_keys(files_dict, options):\n if options.order == 'name' or options.order == 'n':\n sorted_keys = sorted(files_dict, key=get_file_dir_name)\n return sorted_keys\n elif options.order == 'size' or options.order == 's':\n sorted_keys = sorted(files_dict, key=get_size_to_path)\n return sorted_keys\n\nusage = '''\n%prog [options] [path1 [path2 [... pathN]]]\n\nThe paths are optional; if not given . is used.\n\n'''\nparser = OptionParser(usage=usage)\nparser.add_option(\"-H\", \"--hidden\", action='store_true', help='show hidden files default: off')\nparser.add_option(\"-m\", \"--modified\", action='store_true', help='show last modified date/time [default: off]')\nparser.add_option(\"-o\", \"--order\", default='name')\nparser.add_option(\"-r\", \"--recursive\", action='store_true', help='recurse into subdirectories [default: off]')\nparser.add_option(\"-s\", \"--size\", action='store_true', help='show sizes [default: off]')\noptions, args = parser.parse_args()\n\nargs = args_parse(args)\nget_file_list = file_list_create(options)\nfiles = get_file_list(args)\nfiles = file_hidden(files, options)\nfiles_dict = modified_option(files, options)\nif options.size:\n get_size(files_dict)\n\nprograms_out(files_dict, options)\n" } ]
2
dohelavip/spydork
https://github.com/dohelavip/spydork
6ba03d93ed86006c6e3a9c3207e39235543b528d
039d724ddefb0b9c5935dd1fa8e8d79b1a438618
aa6b2d5fa8ad58ff373e682215b909591dac79d8
refs/heads/master
2021-02-17T13:30:34.408208
2020-03-05T13:27:09
2020-03-05T13:27:09
245,100,448
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 16, "blob_id": "8781b3ed3c1bdb76844f34305bfafac6213f0b00", "content_id": "2be937585b38e7efda32db618fb115e505d2321d", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 34, "license_type": "permissive", "max_line_length": 23, "num_lines": 2, "path": "/README.md", "repo_name": "dohelavip/spydork", "src_encoding": "UTF-8", "text": "# spydork\nSPYHACKERZ DORK SCANNER\n" }, { "alpha_fraction": 0.4557467997074127, "alphanum_fraction": 0.4782898426055908, "avg_line_length": 38.022220611572266, "blob_id": "e248fdcb7c3171ebbb22624c5bdcc20baa425e9d", "content_id": "91582d64e47e2b49c23158bd12e0b76ed0095ae3", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9005, "license_type": "permissive", "max_line_length": 249, "num_lines": 225, "path": "/spydork.py", "repo_name": "dohelavip/spydork", "src_encoding": "UTF-8", "text": "import urllib2 , cookielib , random , re , sys , socket , time , httplib , ssl\r\n\r\n #########################################\r\n # Author : Dohela #\r\n # Team : Spyhackerz.org #\r\n # Date : 5 - 3 - 2020 #\r\n # Github : https://github.com/dohelavip #\r\n #########################################\r\n\r\nif sys.platform == \"linux2\" or sys.platform == \"linux\":\r\n\tR = (\"\\033[31m\")\r\n\tW = (\"\\033[0;1m\")\r\n\tB = (\"\\033[35m\")\r\n\tG = (\"\\033[32m\")\r\n\tglp = (\"\\033[2m\")\r\n\tY = (\"\\033[33;1m\")\r\nelse:\r\n\tR = \"\"\r\n\tW = \"\"\r\n\tY = \"\"\r\n\tB = \"\"\r\n\tG = \"\"\r\n\tglp = \"\"\r\n\r\nfilename = (\"vuln.txt\")\r\nvuln = open(filename,\"a\")\r\nfinallist = []\r\n\r\n# Thanks Google for User-Agent , sites and list sqlerrors --\r\n\r\nheader = ['Mozilla/4.0 (compatible; MSIE 5.0; SunOS 5.10 sun4u; X11)',\r\n 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.2pre) Gecko/20100207 Ubuntu/9.04 (jaunty) Namoroka/3.6.2pre',\r\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser;',\r\n\t 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)',\r\n\t 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.1)',\r\n\t 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6)',\r\n\t 'Microsoft Internet Explorer/4.0b1 (Windows 95)',\r\n\t 'Opera/8.00 (Windows NT 5.1; U; en)',\r\n\t 'amaya/9.51 libwww/5.4.0',\r\n\t 'Mozilla/4.0 (compatible; MSIE 5.0; AOL 4.0; Windows 95; c_athome)',\r\n\t 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',\r\n\t 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',\r\n\t 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; ZoomSpider.net bot; .NET CLR 1.1.4322)',\r\n\t 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; QihooBot 1.0 [email protected])',\r\n\t 'Mozilla/4.0 (compatible; MSIE 5.0; Windows ME) Opera 5.11 [en]']\r\n\r\nerrors = {'MySQL': 'error in your SQL syntax',\r\n 'MiscError': 'mysql_fetch',\r\n 'MiscError2': 'num_rows',\r\n 'Oracle': 'ORA-01756',\r\n 'JDBC_CFM': 'Error Executing Database Query',\r\n 'JDBC_CFM2': 'SQLServer JDBC Driver',\r\n 'MSSQL_OLEdb': 'Microsoft OLE DB Provider for SQL Server',\r\n 'MSSQL_Uqm': 'Unclosed quotation mark',\r\n 'MS-Access_ODBC': 'ODBC Microsoft Access Driver',\r\n 'MS-Access_JETdb': 'Microsoft JET Database',\r\n 'Error Occurred While Processing Request' : 'Error Occurred While Processing Request',\r\n 'Server Error' : 'Server Error',\r\n 'Microsoft OLE DB Provider for ODBC Drivers error' : 'Microsoft OLE DB Provider for ODBC Drivers error',\r\n 'Invalid Querystring' : 'Invalid Querystring',\r\n 'OLE DB Provider for ODBC' : 'OLE DB Provider for ODBC',\r\n 'VBScript Runtime' : 'VBScript Runtime',\r\n 'ADODB.Field' : 'ADODB.Field',\r\n 'BOF or EOF' : 'BOF or EOF',\r\n 'ADODB.Command' : 'ADODB.Command',\r\n 'JET Database' : 'JET Database',\r\n 'mysql_fetch_array()' : 'mysql_fetch_array()',\r\n 'Syntax error' : 'Syntax error',\r\n 'mysql_numrows()' : 'mysql_numrows()',\r\n 'GetArray()' : 'GetArray()',\r\n 'FetchRow()' : 'FetchRow()',\r\n 'Input string was not in a correct format' : 'Input string was not in a correct format',\r\n 'Not found' : 'Not found'}\r\n\r\nsites = ['ac', 'ad', 'ae', 'af', 'ag', 'ai', 'al', 'am', 'an', 'ao',\r\n 'aq', 'ar', 'as', 'at', 'au', 'aw', 'ax', 'az', 'ba', 'bb',\r\n 'bd', 'be', 'bf', 'bg', 'bh', 'bi', 'bj', 'bm', 'bn', 'bo',\r\n 'br', 'bs', 'bt', 'bv', 'bw', 'by', 'bz', 'ca', 'cc', 'cd',\r\n 'cf', 'cg', 'ch', 'ci', 'ck', 'cl', 'cm', 'cn', 'co', 'cr',\r\n 'cu', 'cv', 'cx', 'cy', 'cz', 'de', 'dj', 'dk', 'dm', 'do',\r\n 'dz', 'ec', 'ee', 'eg', 'eh', 'er', 'es', 'et', 'eu', 'fi',\r\n 'fj', 'fk', 'fm', 'fo', 'fr', 'ga', 'gb', 'gd', 'ge', 'gf',\r\n 'gg', 'gh', 'gi', 'gl', 'gm', 'gn', 'gp', 'gq', 'gr', 'gs',\r\n 'gt', 'gu', 'gw', 'gy', 'hk', 'hm', 'hn', 'hr', 'ht', 'hu',\r\n 'id', 'ie', 'il', 'im', 'in', 'io', 'iq', 'ir', 'is', 'it',\r\n 'je', 'jm', 'jo', 'jp', 'ke', 'kg', 'kh', 'ki', 'km', 'kn',\r\n 'kp', 'kr', 'kw', 'ky', 'kz', 'la', 'lb', 'lc', 'li', 'lk',\r\n 'lr', 'ls', 'lt', 'lu', 'lv', 'ly', 'ma', 'mc', 'md', 'me',\r\n 'mg', 'mh', 'mk', 'ml', 'mm', 'mn', 'mo', 'mp', 'mq', 'mr',\r\n 'ms', 'mt', 'mu', 'mv', 'mw', 'mx', 'my', 'mz', 'na', 'nc',\r\n 'ne', 'nf', 'ng', 'ni', 'nl', 'no', 'np', 'nr', 'nu', 'nz',\r\n 'om', 'pa', 'pe', 'pf', 'pg', 'ph', 'pk', 'pl', 'pm', 'pn',\r\n 'pr', 'ps', 'pt', 'pw', 'py', 'qa', 're', 'ro', 'rs', 'ru',\r\n 'rw', 'sa', 'sb', 'sc', 'sd', 'se', 'sg', 'sh', 'si', 'sj',\r\n 'sk', 'sl', 'sm', 'sn', 'so', 'sr', 'st', 'su', 'sv', 'sy',\r\n 'sz', 'tc', 'td', 'tf', 'tg', 'th', 'tj', 'tk', 'tl', 'tm',\r\n 'tn', 'to', 'tp', 'tr', 'tt', 'tv', 'tw', 'tz', 'ua', 'ug',\r\n 'uk', 'um', 'us', 'uy', 'uz', 'va', 'vc', 've', 'vg', 'vi',\r\n 'vn', 'vu', 'wf', 'ws', 'ye', 'yt', 'za', 'zm', 'zw', 'com',\r\n 'net', 'org','biz', 'gov', 'mil', 'edu', 'info', 'int', 'tel',\r\n 'name', 'aero', 'asia', 'cat', 'coop', 'jobs', 'mobi', 'museum',\r\n 'pro', 'travel']\r\n\r\n# Checking Vuln Web --\r\ndef cek():\r\n\tprint (W+43*\"-\")\r\n\thasil = []\r\n\tfor url in finallist:\r\n\t\tprint (R+\"[!] \"+W+\"Web Vuln Taraniyor..\\r\"),;sys.stdout.flush()\r\n\t\tEXT = \"'\"\r\n\t\thost = url+EXT\r\n\t\ttry:\r\n\t\t\tsource = urllib2.urlopen(host).read()\r\n\t\t\tfor type,eMSG in errors.items():\r\n\t\t\t\tif re.search(eMSG, source):\r\n\t\t\t\t\tprint (B+\"\\r[+]\"+G+\" Vuln \"+W+\": \"+host.replace(\"'\",\"\"))\r\n\t\t\t\t\tprint (B+\"[*]\"+R+\" Error \"+W+\": \"+glp+type+W)\r\n\t\t\t\t\thasil.append(host.replace(\"'\",\"\"))\r\n\t\t\t\telse:\r\n\t\t\t\t\tpass\r\n\t\texcept:\r\n\t\t\tpass\r\n\r\n\r\n\tif len(hasil) == 0:\r\n\t\tpass\r\n\telse:\r\n\t\tprint (W+43*\"-\")\r\n\t\tprint (R+\"[!] \"+W+\"Vuln web kaydediliyor..\"),;sys.stdout.flush()\r\n\t\tfor x in hasil:\r\n\t\t\tvuln.write(x+\"\\n\")\r\n\t\tvuln.close()\r\n\r\n\t\tprint (B+\"\\r[+] \"+G+\"basariyla kaydedildi\"+W+filename)\r\n\r\n print (B+\"\\r[*] \"+G+\"Toplam web vuln \"+W+\": %s \"%(len(hasil)))\r\n\tprint (W+43*'-'+'\\n')\r\n\r\n\r\n# Searching web --\r\ndef cari(inurl , site , maxc):\r\n\r\n print (R+\"[!] \"+W+\"Lutfen Bekleyin.. spyhackerz.org\"+glp),;sys.stdout.flush()\r\n\r\n urls = []\r\n page = 0\r\n\r\n try:\r\n while page < int(maxc):\r\n\tjar = cookielib.FileCookieJar(\"cookies\")\r\n\tquery = inurl+\"+site:\"+site\r\n\tresults_web = 'http://www.search-results.com/web?q='+query+'&hl=en&page='+repr(page)+'&src=hmp'\r\n\trequest_web =urllib2.Request(results_web)\r\n\tagent = random.choice(header)\r\n\trequest_web.add_header('User-Agent', agent)\r\n\topener_web = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))\r\n\ttext = opener_web.open(request_web).read()\r\n\tstringreg = re.compile('(?<=href=\")(.*?)(?=\")')\r\n names = stringreg.findall(text)\r\n page += 1\r\n\r\n for name in names:\r\n\t if name not in urls:\r\n\t if re.search(r'\\(',name) or re.search(\"<\", name) or re.search(\"\\A/\", name) or re.search(\"\\A(http://)\\d\", name):\r\n\t pass\r\n\t elif re.search(\"google\",name) or re.search(\"youtube\", name) or re.search(\"phpbuddy\", name) or re.search(\"iranhack\",name) or re.search(\"phpbuilder\",name) or re.search(\"codingforums\", name) or re.search(\"phpfreaks\", name) or re.search(\"%\", name):\r\n\t pass\r\n\t else:\r\n\t urls.append(name)\r\n\r\n\tpercent = int((1.0*page/int(maxc))*100)\r\n\turls_len = len(urls)\r\n\tsys.stdout.write(\"\\r[*] Urls: %s | Yuzde: %s | Sayfa: %s [*]\" % (repr(urls_len),repr(percent)+\"%\",repr(page))),\r\n\tsys.stdout.flush()\r\n\r\n except KeyboardInterrupt:\r\n pass\r\n except urllib2.URLError as e:\r\n\tprint (R+\"\\r-- \"+W+\"Hata \"+R+\"-- \"+W+\": %s\"%e)\r\n\tprint (W+43*\"-\")\r\n\tsys.exit()\r\n except socket.error as s:\r\n\tprint (R+\"\\r-- \"+W+\"Hata \"+R+\"-- \"+W+\": %s\"%s)\r\n\tprint (W+43*\"-\")\r\n\tsys.exit()\r\n except httplib.IncompleteRead as h:\r\n\tprint (R+\"\\r-- \"+W+\"Hata \"+R+\"-- \"+W+\": %s\"%h)\r\n\tprint (W+43*\"-\")\r\n sys.exit()\r\n except ValueError as V:\r\n\tprint (R+\"\\r-- \"+W+\"Hata \"+R+\"-- \"+W+\": %s\"%V)\r\n\tprint (W+43*\"-\")\r\n sys.exit()\r\n\r\n tmplist = []\r\n for url in urls:\r\n try:\r\n host = url.split(\"/\",3)\r\n domain = host[2]\r\n if domain not in tmplist and \"=\" in url:\r\n\t finallist.append(url)\r\n\t tmplist.append(domain)\r\n except:\r\n pass\r\n\r\n print (\"\\n\"+W+43*\"-\")\r\n print (B+\"[+] \"+G+\"Urls (sorted) \"+W+\": %s Url\" % (len(finallist)))\r\n if site == '':\r\n\t print (B+\"[+] \"+G+\"Site \"+W+\": random\")\r\n else:\r\n\t print (B+\"[+] \"+G+\"Site \"+W+\": %s\"%(site))\r\n return finallist\r\n\r\nif __name__ == \"__main__\":\r\n\tprint (\"SPYSQLi DorkScanner\"+Y+\" Spyhackerz.org\")\r\n\t\r\n\tprint (W+43*\"-\")\r\n\r\n\tinurl = raw_input(B+\"[?]\"+G+\" Dork girin \"+W+\": \")\r\n\tsite = raw_input(B+\"[?]\"+G+\" Site kodu girin \"+W+\": \")\r\n\tmaxc = raw_input(B+\"[?]\"+G+\" Kac Sayfa aratilacak \"+W+\": \")\r\n\r\n\tprint (43*\"-\")\r\n\tcari(inurl , site , maxc)\r\n\tcek()\r\n" } ]
2
ericzhai918/pythonScriptRep
https://github.com/ericzhai918/pythonScriptRep
81a572a8127b09ab3bf41bdccd3f61485e38a823
ac0a319b8e362ba6c54c2a44263cdd6c3def8bf7
266d6fc582061eebc1447253e6ca6ca68c8abf74
refs/heads/master
2021-05-05T00:24:22.475063
2018-04-11T07:33:02
2018-04-11T07:33:02
119,492,366
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.357051283121109, "alphanum_fraction": 0.3673076927661896, "avg_line_length": 27.88888931274414, "blob_id": "a0f8f1149b06f18d71bcabcf3f550d7f64fa99ab", "content_id": "1696a5530db6df608f3a616a21b9f1760707c300", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1568, "license_type": "no_license", "max_line_length": 55, "num_lines": 54, "path": "/addHostByTem/template.py", "repo_name": "ericzhai918/pythonScriptRep", "src_encoding": "UTF-8", "text": "#\n#coding:utf8\ntem=\"\"\"\n <host>\n <host>{ip}</host>\n <name>{name}</name>\n <description/>\n <proxy>\n <name>{proxy}</name>\n </proxy>\n <status>0</status>\n <ipmi_authtype>-1</ipmi_authtype>\n <ipmi_privilege>2</ipmi_privilege>\n <ipmi_username/>\n <ipmi_password/>\n <tls_connect>1</tls_connect>\n <tls_accept>1</tls_accept>\n <tls_issuer/>\n <tls_subject/>\n <tls_psk_identity/>\n <tls_psk/>\n <templates>\n <template>\n <name>Onebank Linux</name>\n </template>\n <template>\n <name>Template Discover JCJG</name>\n </template>\n </templates>\n <groups>\n <group>\n <name>Onebank 基础架构 WGQ</name>\n </group>\n </groups>\n <interfaces>\n <interface>\n <default>1</default>\n <type>1</type>\n <useip>1</useip>\n <ip>{ip}</ip>\n <dns/>\n <port>10050</port>\n <bulk>1</bulk>\n <interface_ref>if1</interface_ref>\n </interface>\n </interfaces>\n <applications/>\n <items/>\n <discovery_rules/>\n <httptests/>\n <macros/>\n <inventory/>\n </host>\n \"\"\"\n" }, { "alpha_fraction": 0.4934426248073578, "alphanum_fraction": 0.5295081734657288, "avg_line_length": 20.64285659790039, "blob_id": "ced922e34542f83f31911c55b1a2d23d67bc9010", "content_id": "a8fa03f85717e5630637934e88f0ac6bc99a8fcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 610, "license_type": "no_license", "max_line_length": 50, "num_lines": 28, "path": "/addHostByTem/changeTemp.py", "repo_name": "ericzhai918/pythonScriptRep", "src_encoding": "UTF-8", "text": "from template import tem\n\nwith open('nameList') as name:\n names=[]\n for line in name:\n line=line.strip()\n if not line:\n continue\n names.append(line)\n\nwith open('ipList') as ip:\n ips=[]\n for line in ip:\n line=line.strip()\n if not line:\n continue\n ips.append(line) \n\nfor ip in ips:\n if ip.startswith(\"10.2\"):\n proxy=\"10.20.28.10\"\n if ip.startswith(\"10.1\"):\n proxy=\"10.10.32.18\"\n name=\"\"\n for _name in names:\n if ip in _name:\n name=_name\n print(tem.format(proxy=proxy,ip=ip,name=name))\n \n" }, { "alpha_fraction": 0.5916666388511658, "alphanum_fraction": 0.6357142925262451, "avg_line_length": 31.230770111083984, "blob_id": "e59a17288276701f01066d556b0da8b8cef4c2a5", "content_id": "e18e7773cc3403b0546b508dc8dea3580b358989", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 856, "license_type": "no_license", "max_line_length": 220, "num_lines": 26, "path": "/downloadNPics.py", "repo_name": "ericzhai918/pythonScriptRep", "src_encoding": "UTF-8", "text": "\nimport urllib\nimport urllib.request\nimport re\n\ndef download_page(url):\n request = urllib.request.Request(url)\n response = urllib.request.urlopen(request)\n data = response.read()\n return data\n\ndef get_image(html):\n regx = r'http://[\\S]*\\.jpg'\n pattern = re.compile(regx)\n get_img = re.findall(pattern,repr(html))\n num = 1\n for img in get_img:\n image = download_page(img)\n with open('/data/app/deploy_prod/pic/%s.jpg'%num,'wb') as fp:\n fp.write(image)\n num += 1\n print('正在下载第%s张图片'%(num-1))\n return\n\nurl = 'https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1517467478448_R&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&word=frog'\nhtml = download_page(url)\nget_image(html)\n\n" }, { "alpha_fraction": 0.5248227119445801, "alphanum_fraction": 0.5401269197463989, "avg_line_length": 24.514286041259766, "blob_id": "40773c0768dde12e338e67acf7294244f2ae5465", "content_id": "560a81b297e36d3cd820d06824b2dd33788a2832", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2695, "license_type": "no_license", "max_line_length": 224, "num_lines": 105, "path": "/getZabbixAuth.py", "repo_name": "ericzhai918/pythonScriptRep", "src_encoding": "UTF-8", "text": "import json\nfrom urllib import request,parse\n\nZABBIX_URL=\"http://10.11.12.192/zabbix\"\nZABBIX_USER=\"Admin\"\nZABBIX_PASS=\"zabbix\"\n\nurl=\"{}/api_jsonrpc.php\".format(ZABBIX_URL)\nheader={\"Content-Type\": \"application/json\"}\n\ndef userLogin(): \n data={\n \"jsonrpc\": \"2.0\",\n \"method\": \"user.login\",\n \"params\": {\n \"user\": ZABBIX_USER,\n \"password\": ZABBIX_PASS\n },\n \"id\": 0,\n }\n\n value=json.dumps(data).encode('utf-8')\n\n req = request.Request(url, headers=header, data=value)\n\n try:\n result = request.urlopen(req)\n except Exception as e:\n print(\"Auth Failed, Please Check Your Name And Password:\", e)\n else:\n response = json.loads(result.read().decode('utf-8'))\n result.close()\n authID=response['result']\n return authID\n\ndef getHosts(authID):\n data={\n \"jsonrpc\": \"2.0\",\n \"method\": \"host.get\",\n \"params\": {\n \"output\": [\n \"hostid\",\n \"host\"\n ],\n \"selectInterfaces\": [\n \"interfaceid\",\n \"ip\"\n ]\n },\n\n \"id\": 2,\n \"auth\": authID\n } \n value=json.dumps(data).encode('utf-8')\n req = request.Request(url, headers=header, data=value)\n try:\n result = request.urlopen(req)\n except Exception as e:\n print(\"Auth Failed, Please Check Your Name And Password:\", e)\n else:\n response = json.loads(result.read().decode('utf-8'))\n result.close()\n host=response['result']\n return host\n'''\ndef getProc(data):\n dict = {}\n list = data\n for i in list:\n host = i['host']\n inter = i['interfaces']\n for j in inter:\n ip = j['ip']\n dict[host] = ip\n return dict\n\n#排序ip列表\ndef getData(dict):\n data = dict\n ip_list = [ ]\n for key in data.keys():\n \n ip = data[key]\n ip_list.append(ip)\n ip_list = list(set(ip_list))\n ip_list.sort()\n return ip_list \n#整理输出ip\n\ndef getGroup(ip_list):\n ip_group = { }\n ips = ip_list\n for i in ips:\n print(i)\n'''\n\nhosts=getHosts(userLogin())\n\nprint(\"hostid host\")\nfor i in hosts:\n print(i['hostid'],'|',i['host'])\n\n#curl -i -X POST -H 'Content-Type:application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"user.login\",\"params\":{\"user\":\"Admin\",\"password\":\"zabbix\"},\"auth\":null,\"id\":0}' http://10.10.10.10/zabbix/api_jsonrpc.php\n\n#wget --header='Content-Type:application/json' --post-data='{\"jsonrpc\":\"2.0\",\"method\":\"user.login\",\"params\":{\"user\":\"Admin\",\"password\":\"zabbix\"},\"auth\":null,\"id\":0}' http://10.10.10.10/zabbix/api_jsonrpc.php -qO- console\n" }, { "alpha_fraction": 0.4558139443397522, "alphanum_fraction": 0.5209302306175232, "avg_line_length": 18.454545974731445, "blob_id": "332d8e65c054d2c812fe77c0b202ebbbc8ebd2b0", "content_id": "352b7dfdb94a8dce9a154b1243cb3f01ed473129", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 215, "license_type": "no_license", "max_line_length": 31, "num_lines": 11, "path": "/selectionSort.py", "repo_name": "ericzhai918/pythonScriptRep", "src_encoding": "UTF-8", "text": "\na=[12,3,13,2,5,7,6,22,21]\nlength=len(a)\nfor i in range(length):\n index=i\n for j in range(i+1,length):\n if(a[j]<a[index]):\n index=j\n temp=a[i]\n a[i]=a[index]\n a[index]=temp\nprint(a)\n" }, { "alpha_fraction": 0.502173900604248, "alphanum_fraction": 0.539130449295044, "avg_line_length": 22, "blob_id": "8f07fc1ce588847cb8914c8694514004d9d40fa7", "content_id": "d3ffba2ecb16e131f440254a837175ed89027a19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 460, "license_type": "no_license", "max_line_length": 43, "num_lines": 20, "path": "/quickSort.py", "repo_name": "ericzhai918/pythonScriptRep", "src_encoding": "UTF-8", "text": "def quickSort(a,left,right):\n if left>=right:\n return a\n key=a[left]\n low=left\n high=right\n while left < right:\n while left<right and a[right]>=key:\n right -= 1\n a[left]=a[right]\n while left<right and a[left]<=key:\n left += 1\n a[right]=a[left]\n a[right]=key\n quickSort(a,low,left-1)\n quickSort(a,left+1,high)\n return a\n\na=[6,1,2,7,9,3,4,5,10,8]\nprint(quickSort(a,0,len(a)-1))\n" }, { "alpha_fraction": 0.42774567008018494, "alphanum_fraction": 0.5375722646713257, "avg_line_length": 17.66666603088379, "blob_id": "5c10a6afd89da75b3cb28910b73d009cfac82471", "content_id": "83855d8b315d8776af7e7142d803202d9f86e9aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 28, "num_lines": 9, "path": "/bubbleSort.py", "repo_name": "ericzhai918/pythonScriptRep", "src_encoding": "UTF-8", "text": "\na=[20,12,18,3,8,6,4,19,14]\nlength=len(a)\nfor i in range(length-1):\n\tfor j in range(length-1-i):\n\t\tif a[j]>a[j+1]:\n\t\t\ttemp = a[j]\n\t\t\ta[j]=a[j+1]\n\t\t\ta[j+1]=temp\nprint(a)\n\t\t\t\n" }, { "alpha_fraction": 0.29223743081092834, "alphanum_fraction": 0.388127863407135, "avg_line_length": 18.81818199157715, "blob_id": "bc59d3d4cc335f209f1bd494264befa42922fac1", "content_id": "69fd7c82adc3fd6fa2e139da7efe41a713a8c93b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 29, "num_lines": 11, "path": "/insertSort.py", "repo_name": "ericzhai918/pythonScriptRep", "src_encoding": "UTF-8", "text": "a=[22,44,1,3,4,77,32,45,18,2]\nlength = len(a)\nfor i in range(1, length):\n key = a[i]\n j = i - 1\n while j >= 0:\n if a[j] > key:\n a[j + 1] = a[j]\n a[j] = key\n j -= 1\nprint(a)\n\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.7229437232017517, "avg_line_length": 20, "blob_id": "3a810197fbaee4186277c4287ca8b62849964998", "content_id": "b6f4d73b4f45f6c535d7012a55e2e05f6311193c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 49, "num_lines": 11, "path": "/timePlus.py", "repo_name": "ericzhai918/pythonScriptRep", "src_encoding": "UTF-8", "text": "from dateutil.parser import parse\n\na1=input(\"请输入开始日期:\")\na2=input(\"请输入结束日期:\")\n#a1='201801041800'\n#a2='201801052200'\nstart=parse(a1)\nend=parse(a2)\nmyTime = end - start\noneDay=8*60*60\nprint(\"加班天数为:\",myTime.total_seconds()/oneDay,\"天\")\n" } ]
9
tmitovski/random_forest
https://github.com/tmitovski/random_forest
eb8d1e4ab00c8d910a8f87fe5e5342638ad878be
bafefcd77ebd71da1f4eabe5aeddb74fb1523e28
9638d5d47c9a974365ef11c91488d78385304d26
refs/heads/master
2020-03-27T11:20:49.971793
2018-12-13T22:12:04
2018-12-13T22:12:04
146,480,675
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.41299790143966675, "alphanum_fraction": 0.4559049606323242, "avg_line_length": 36.85714340209961, "blob_id": "1ee48faf9e0d2010d08665088cea73792f45ba86", "content_id": "b89a34cba120d594521dded427db0e72ddb2182b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7159, "license_type": "no_license", "max_line_length": 115, "num_lines": 189, "path": "/model_test.py", "repo_name": "tmitovski/random_forest", "src_encoding": "UTF-8", "text": "# =================================\n# =================================\nimport pandas as pd\nimport numpy as np\nfrom tech_indicator import *\nfrom sklearn.metrics import *\nimport os\n\nfrom requests.exceptions import ReadTimeout \nfrom requests.exceptions import Timeout # this handles ReadTimeout or ConnectTimeout\nfrom binance.client import Client\nfrom binance.exceptions import BinanceAPIException\nimport time\nimport random\n# =================================\n# =================================\n# Get online data\napi_key=\"\"\napi_secret=\"\"\n# import data from binance\n\ncount_buy = 0\ncount_sell = 0\n\npairs=[12]\n\nfor trade in range(0,10000):\n time.sleep(random.uniform(55,65))\n print('Loop:', trade)\n client = Client(api_key, api_secret, requests_params={'timeout': 10000})\n try:\n status = client.get_system_status()\n status0 = status.get('status')\n if (status0 != 1):\n # 0: normal,1:system maintenance\n info = client.get_account(recvWindow=600000)\n balance_ETH = client.get_asset_balance(asset='ETH', recvWindow=600000)\n balance_USDT= client.get_asset_balance(asset='USDT',recvWindow=600000)\n numb_ETH = float(balance_ETH.get('free'))\n numb_USDT= float(balance_USDT.get('free'))\n prices = client.get_all_tickers()\n pric0 = prices[pairs[0]].get('price')\n pric0 = float(pric0) \n name0 = prices[pairs[0]].get('symbol')\n if (trade == 0):\n start_balance = (numb_ETH) + (numb_USDT)/pric0\n # Adjust prices to current\n save_sell1 = pric0\n save_buy1 = pric0\n print('Price:', pric0) \n print('ETH amount:', numb_ETH, '; USDT amount:', numb_USDT) \n print('Income:', ((((numb_ETH) + (numb_USDT)/pric0)/(start_balance))*100.)-100., '%')\n\n # 1=buy ETH , 2=sell ETH\n if ((numb_ETH) > (numb_USDT)/pric0):\n trigger = 2\n \n if ((numb_ETH) < (numb_USDT)/pric0):\n trigger = 1\n print('Trigger:', trigger) \n TIME_IN_FORCE_GTC = 'GTC'\n\n\n klines2 = client.get_historical_klines(name0, Client.KLINE_INTERVAL_5MINUTE, \"3 hours ago UTC\")\n save_klines2=np.array(klines2).astype(np.float)\n go_on = 0\n\n for loop_1 in range(0,10):\n if (len(save_klines2[:,3]) == 36) and (go_on == 0): \n go_on=1\n if (len(save_klines2[:,3]) < 36) and (go_on == 0):\n print('ntim2 < 36', loop_1) \n time.sleep(random.uniform(8,12))\n klines2 = client.get_historical_klines(name0, Client.KLINE_INTERVAL_5MINUTE, \"3 hours ago UTC\")\n save_klines2=np.array(klines2).astype(np.float)\n ntim2 = 35 # exclude last data point. This is not 5 minute tick, but current price\n data_i2= np.empty((ntim2,5))\n\n data_i2[:,0] = save_klines2[0:ntim2,1]\n data_i2[:,1] = save_klines2[0:ntim2,2]\n data_i2[:,2] = save_klines2[0:ntim2,3]\n data_i2[:,3] = save_klines2[0:ntim2,4]\n data_i2[:,4] = save_klines2[0:ntim2,5]\n\n df = pd.DataFrame(data_i2[:,:], columns=list(['Open', 'High', 'Low', 'Close', 'Volume']))\n end0=ntim2\n\n# =================================\n# =================================\n\n# constants\n\n n1=6\n n2=27\n# =================================\n# =================================\n# compute technical indicators\n\n MA1 = moving_average(df,n1) \n MA2 = moving_average(df,n2)\n\n MVA1 = MA1.iloc[:end0,5] \n MVA2 = MA2.iloc[:end0,5] \n\n# =================================\n# =================================\n# BUY ETH\n\n if (MVA1[34] > MVA2[34]) and (trigger == 1) and ((numb_USDT)>20.):\n offer = 0. \n if (data_i2[-1,3] <= pric0):\n offer = data_i2[-1,3]\n if (data_i2[-1,3] > pric0):\n offer = pric0\n if (200. < offer < 5000.) and (save_sell1 < 5000.): \n amount = offer*1.0002\n precision1 = 2\n precision2 = 3\n buying = numb_USDT/pric0*0.9995\n \n amt_str = \"{:0.0{}f}\".format(amount, precision1) # price\n numb_str = \"{:0.0{}f}\".format(buying, precision2) # quantity\n \n save_buy1 = amount\n count_buy = count_buy + 1\n print('buy 1:', save_buy1)\n print('amount:', amt_str)\n print('number:', numb_str)\n\n #place a market buy order\n order = client.create_order(\n symbol='ETHUSDT',\n side=Client.SIDE_BUY,\n type=Client.ORDER_TYPE_LIMIT,\n timeInForce=TIME_IN_FORCE_GTC,\n quantity=numb_str,\n price=amt_str, recvWindow=600000)\n time.sleep(random.uniform(20,25))\n trigger = 2\n os.system('say \"Toni you bought Etherium\"') \n\n\n#=================================================\n# sell ETH\n \n if (MVA1[34]<MVA2[34]) and (trigger == 2) and ((numb_ETH)>0.05):\n offer = 10000. \n if (data_i2[-1,3] >= pric0):\n offer = data_i2[-1,3]\n if (data_i2[-1,3] < pric0):\n offer = pric0\n if (200. < offer < 5000.) and (save_buy1 < 5000.):\n amount = offer*0.9998\n precision1 = 2\n precision2 = 3 \n selling = numb_ETH*0.9995\n \n amt_str = \"{:0.0{}f}\".format(amount, precision1) # price\n numb_sell = \"{:0.0{}f}\".format(selling, precision2) # quantity\n\n save_sell1 = amount\n count_sell = count_sell + 1\n print('sell 1:', save_sell1)\n print('amount:', amt_str)\n print('number:', numb_sell)\n\n order = client.create_order(\n symbol='ETHUSDT',\n side=Client.SIDE_SELL,\n type=Client.ORDER_TYPE_LIMIT, \n timeInForce=TIME_IN_FORCE_GTC,\n quantity=numb_sell,\n price=amt_str, recvWindow=600000)\n time.sleep(random.uniform(20,25)) \n trigger = 1\n os.system('say \"Toni you sold Etherium\"') \n\n if (status0 == 1):\n print('status=1') #system maintanance\n \n except ReadTimeout:\n print(\"ReadTimeOut happened. Will try again....\")\n pass\n except Timeout:\n print(\"TimeOut happened. Will try again....\")\n pass\n except BinanceAPIException:\n print(\"TimeOut happened. Will try again....\")\n pass\n" }, { "alpha_fraction": 0.6019376516342163, "alphanum_fraction": 0.6371103525161743, "avg_line_length": 33.13669204711914, "blob_id": "0935ada84607a2c95772d1458196127c208f5fa1", "content_id": "f19772dd28a14d53ad0c317fd8e7f31e439b0030", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4748, "license_type": "no_license", "max_line_length": 129, "num_lines": 139, "path": "/keras_LSTM_ETHBTC_data.py", "repo_name": "tmitovski/random_forest", "src_encoding": "UTF-8", "text": "# LSTM for international airline passengers problem with regression framing\nimport numpy\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pandas import read_csv\nimport math\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\nimport sys\nfrom netCDF4 import Dataset\n\n# convert an array of values into a dataset matrix\ndef create_dataset(dataset, look_back):\n\tdataX, dataY = [], []\n\tfor i in range(len(dataset)-look_back-1):\n\t\ta = dataset[i:(i+look_back), 0]\n\t\tdataX.append(a)\n\t\tdataY.append(dataset[i + look_back, 0])\n\treturn numpy.array(dataX), numpy.array(dataY)\n\n\n# fix random seed for reproducibility\nnumpy.random.seed(7)\n\n# load the dataset\ndata1 = Dataset('ETHBTC_1hour_Sep1_2017_Sep1_2018.nc')\ndata2 = data1.variables['klines']\n# sorted in the order: columns=list(['Open', 'High', 'Low', 'Close', 'Volume']))\nnlen = len(data2[:,3])\ndataset = np.empty((nlen,1))\ndataset[:,0] = data2[:,3]*1000.\ndataset = dataset.astype('float32')\n\n# normalize the dataset\nscaler = MinMaxScaler(feature_range=(0, 1))\ndataset = scaler.fit_transform(dataset)\n\n# split into train and test sets\ntrain_size = int(len(dataset) * 0.80)\ntest_size = len(dataset) - train_size\ntrain, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]\n\n\n# reshape into X=t and Y=t+1\nlook_back = 12\ntrainX, trainY = create_dataset(train, look_back)\ntestX, testY = create_dataset(test, look_back)\n\n# reshape input to be [samples, time steps, features]\ntrainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\ntestX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))\n\n# create and fit the LSTM network\nmodel = Sequential()\nmodel.add(LSTM(4, input_shape=(1, look_back)))\nmodel.add(Dense(1))\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nmodel.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2)\n\n# make predictions\ntrainPredict = model.predict(trainX)\ntestPredict = model.predict(testX)\n\n# invert predictions\ntrainPredict = scaler.inverse_transform(trainPredict)\ntrainY = scaler.inverse_transform([trainY])\ntestPredict = scaler.inverse_transform(testPredict)\ntestY = scaler.inverse_transform([testY])\n\n# calculate root mean squared error\ntrainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))\nprint('Train Score: %.2f RMSE' % (trainScore))\ntestScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))\nprint('Test Score: %.2f RMSE' % (testScore))\n\n# shift train predictions for plotting\ntrainPredictPlot = numpy.empty_like(dataset)\ntrainPredictPlot[:, :] = numpy.nan\ntrainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict\n\n# shift test predictions for plotting\ntestPredictPlot = numpy.empty_like(dataset)\ntestPredictPlot[:, :] = numpy.nan\ntestPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict\n\n# plot baseline and predictions\nplt.plot(scaler.inverse_transform(dataset))\nplt.plot(trainPredictPlot)\nplt.plot(testPredictPlot)\nplt.show()\n\nnlen0 = len(testPredict[:,0])\n# Another test to estimate earnings\nstart_am = 0\nn_buy = 0\nn_sell = 0\ntrigger = 1\nsave_buy = 0.\nsave_sell = 0.\nbuy=0.\nsell=0.\n\nfor count in range(1,nlen0-1):\n if (testPredict[count,0] > 1.001*testY[0,count-1]) and (trigger == 1):\n \n if (start_am == 1):# and (data1orig[tr0+count+start]*1.01 < save_sell):\n buy = (sell/testY[0,count-1])*0.99925\n save_buy = testY[0,count-1]\n n_buy = n_buy + 1\n trigger = 2\n sell = 0.\n if (start_am == 0):\n buy = (1./testY[0,count-1])*0.99925\n save_buy = testY[0,count-1]\n buy_start = buy\n start_am = 1\n trigger = 2\n sell=0.\n\n if (testPredict[count,0] < 0.999*testY[0,count-1]) and (trigger == 2):# and (data1orig[tr0+count]*0.99 > save_buy):\n sell = buy*testY[0,count-1]*0.99925\n save_sell = testY[0,count-1]\n trigger = 1\n n_sell = n_sell + 1\n buy = 0.\n\ngain1 = 0.\ngain1 = (((buy*testY[0,count-1]) + sell)/(testY[0,count-1]/testY[0,1]))\nprint('Relative to no trade (need >1):', gain1)\nprint('Earnings (%) :', 100.*(((buy*testY[0,count-1]) + sell)-1.))\nprint('number of trades:', n_sell+n_buy)\nprint('===========================')\ngain1 = 0.\ngain1 = (buy + (sell/testY[0,count-1]))/(buy_start)\nprint('Relative to no trade (need >1):', gain1)\nprint('===========================') \n\n\n" }, { "alpha_fraction": 0.5445852279663086, "alphanum_fraction": 0.6098493933677673, "avg_line_length": 32.725807189941406, "blob_id": "dd5682a91b24f620c575f11c529633f93ab46e0f", "content_id": "00965204d0fa36e180a0f9f1ed1126489deae50f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4187, "license_type": "no_license", "max_line_length": 111, "num_lines": 124, "path": "/pull_save_binance_data_1hour_1pair.py", "repo_name": "tmitovski/random_forest", "src_encoding": "UTF-8", "text": "#saves 1-hour klines for a pair\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 30 06:50:04 2018\n\n@author: mitovski\n\"\"\"\n#import datetime\n\nfrom requests.exceptions import ReadTimeout \nfrom requests.exceptions import Timeout # this handles ReadTimeout or ConnectTimeout\nfrom binance.client import Client\nfrom binance.exceptions import BinanceAPIException\nimport time\nimport random\nfrom requests.exceptions import ConnectionError\n\nimport numpy as np\nimport numpy\nimport sys\n#import pyopencl\n#from pyopencl.tools import get_test_platforms_and_devices\n#print(get_test_platforms_and_devices())\n#===========\npairs = [0] #specify 1 pair, 0 = ETHBTC\n#===========\n\n#toni_mito (generated Sept 22, 2018)\napi_key=\"aDvDaxmLjqJZ9ryNehFWAEO3iLnGjfV4rM9bf5pKQQ0rIiEUvgt5xV4ii1Dn19Mx\"\napi_secret=\"BDJCtrHI0RHQeKRANHdSPX8fAE5ZmqU6IiCONfGLsnGDVFpTTccAOCeG2RILJogD\"\n# import data from binance\n\n\nclient = Client(api_key, api_secret, requests_params={'timeout': 1000})\n \n #client = Client(api_key, api_secret, {\"verify\": False, \"timeout\": 20})\n #print(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\ntry:\n \n status = client.get_system_status()\n status0 = status.get('status')\n if (status0 != 1):\n # 0: normal,1:system maintenance\n #=================================================\n info = client.get_account(recvWindow=600000)\n #info = client.get_symbol_info('XLMETH')\n \n # prices show PAIR exchange (NOTE: it is last (ask) price)\n prices = client.get_all_tickers()\n name0 = prices[pairs[0]].get('symbol')\n print('Name:', name0)\n \n #klines1 = client.get_historical_klines(name0, Client.KLINE_INTERVAL_5MINUTE, \"10 MONTHS ago UTC\")\n #klines1 = client.get_historical_klines(name0, Client.KLINE_INTERVAL_1HOUR, \"1 Sep, 2017\", \"1 Jun, 2018\")\n klines1 = client.get_historical_klines(name0, Client.KLINE_INTERVAL_1HOUR, \"1 Sep, 2017\", \"1 Sep, 2018\")\n save_klines0=np.array(klines1).astype(np.float)\n #ntim = (np.shape(save_klines0[:,0]))\n ntim = len(save_klines0[:,0])\n plot_klines= np.empty(((ntim,5)))\n print(np.shape(plot_klines))\n \n plot_klines[:,0] = save_klines0[:,1]\n plot_klines[:,1] = save_klines0[:,2]\n plot_klines[:,2] = save_klines0[:,3]\n plot_klines[:,3] = save_klines0[:,4]\n plot_klines[:,4] = save_klines0[:,5]\n #print(plot_klines[loop,:,3])\n #print('loop:', loop)\n # klines are:\n #\"1499040000000\", // Open time\n #\"0.01634790\", // Open\n #\"0.80000000\", // High\n #\"0.01575800\", // Low\n #\"0.01577100\", // Close\n #\"148976.11427815\", // Volume\n #1499644799999, // Close time\n #\"2434.19055334\", // Quote asset volume\n #308, // Number of trades\n #\"1756.87402397\", // Taker buy base asset volume\n #\"28.46694368\", // Taker buy quote asset volume\n #\"17928899.62484339\" // Ignore\n\n from netCDF4 import Dataset\n rootgrp = Dataset('ETHBTC_1hour_Sep1_2017_Sep1_2018.nc', 'w', format='NETCDF4')\n print(rootgrp.file_format)\n rootgrp.close()\n rootgrp = Dataset('ETHBTC_1hour_Sep1_2017_Sep1_2018.nc', 'a')\n fcstgrp = rootgrp.createGroup('forecasts')\n #analgrp = rootgrp.createGroup('analyses')\n\n #pair = rootgrp.createDimension('pair', len(pairs))\n time = rootgrp.createDimension('time', ntim)\n data = rootgrp.createDimension('data', 5)\n\n klines = rootgrp.createVariable('klines','f8',('time','data',))\n klines[:,:] = plot_klines[:,:]\n\n rootgrp.close()\n #================================================= \n if (status0 == 1):\n print('status=1') #system maintanance\n \nexcept ReadTimeout:\n print(\"ReadTimeOut happened. Will try again....1\")\n time.sleep(60)\n pass\nexcept Timeout:\n print(\"TimeOut happened. Will try again....2\")\n time.sleep(60)\n pass\nexcept BinanceAPIException:\n print(\"TimeOut happened. Will try again....3\")\n time.sleep(60)\n pass\nexcept ConnectionError:\n print(\"Connection Error happened. Will try again in 3 minutes\")\n time.sleep(180)\n pass\nexcept MaxRetryError:\n print(\"Max Retry Error happened. Will try again in 4 minutes\")\n time.sleep(240)\n pass\n\n" }, { "alpha_fraction": 0.6112464666366577, "alphanum_fraction": 0.6443361639976501, "avg_line_length": 33.676055908203125, "blob_id": "22311bfe90bf4650550a2ca406ff9f01a8158e5e", "content_id": "f316db8d4ebbd589c0104f5ae42a38b852189fda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4926, "license_type": "no_license", "max_line_length": 128, "num_lines": 142, "path": "/RF_ETHBTC_data.py", "repo_name": "tmitovski/random_forest", "src_encoding": "UTF-8", "text": "# Random Forest for international airline passengers problem\nimport numpy\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pandas import read_csv\nimport math\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\nimport sys\nfrom netCDF4 import Dataset\n\n# convert an array of values into a dataset matrix\ndef create_dataset(dataset, look_back):\n\tdataX, dataY = [], []\n\tfor i in range(len(dataset)-look_back-1):\n\t\ta = dataset[i:(i+look_back), 0]\n\t\tdataX.append(a)\n\t\tdataY.append(dataset[i + look_back, 0])\n\treturn numpy.array(dataX), numpy.array(dataY)\n\n\n# fix random seed for reproducibility\nnumpy.random.seed(7)\n\n# load the dataset\ndata1 = Dataset('ETHBTC_1hour_Sep1_2017_Sep1_2018.nc')\ndata2 = data1.variables['klines']\n# sorted in the order: columns=list(['Open', 'High', 'Low', 'Close', 'Volume']))\nnlen = len(data2[:,3])\ndataset = np.empty((nlen,1))\ndataset[:,0] = data2[:,3]*1000.\ndataset = dataset.astype('float32')\n\n# normalize the dataset\nscaler = MinMaxScaler(feature_range=(0, 1))\ndataset = scaler.fit_transform(dataset)\n\n# split into train and test sets\ntrain_size = int(len(dataset) * 0.8)\ntest_size = len(dataset) - train_size\ntrain, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]\n\n# reshape into X=t and Y=t+1\nlook_back = 1\ntrainX, trainY = create_dataset(train, look_back)\ntestX, testY = create_dataset(test, look_back)\nprint(np.shape(trainX))\n\n# reshape input to be [samples, time steps, features]\ntrainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1]))\ntestX = numpy.reshape(testX, (testX.shape[0], testX.shape[1]))\nprint(np.shape(trainX))\n# create and fit the RF model\n# n_estimators (The number of trees in the forest)\nreg = RandomForestRegressor(n_estimators=10, bootstrap=False, oob_score=False, random_state=0) \nreg.fit(trainX, trainY)\n\n# make predictions\n\ntrainPredict = numpy.empty_like(trainX)\ntestPredict = numpy.empty_like(testX)\n\ntrainPredict[:,0] = reg.predict(trainX.reshape(-1,1))\ntestPredict[:,0] = reg.predict(testX.reshape(-1,1))\n\n# invert predictions\ntrainPredict = scaler.inverse_transform(trainPredict)\ntrainY = scaler.inverse_transform([trainY])\ntestPredict = scaler.inverse_transform(testPredict)\ntestY = scaler.inverse_transform([testY])\n\n# calculate root mean squared error\ntrainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))\nprint('Train Score: %.2f RMSE' % (trainScore))\ntestScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))\nprint('Test Score: %.2f RMSE' % (testScore))\n\n# shift train predictions for plotting\ntrainPredictPlot = numpy.empty_like(dataset)\ntrainPredictPlot[:, :] = numpy.nan\ntrainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict\n\n# shift test predictions for plotting\ntestPredictPlot = numpy.empty_like(dataset)\ntestPredictPlot[:, :] = numpy.nan\ntestPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict\n\n# plot baseline and predictions\nplt.plot(scaler.inverse_transform(dataset))\nplt.plot(trainPredictPlot)\nplt.plot(testPredictPlot)\nplt.show()\n\nnlen0 = len(testPredict[:,0])\n# Another test to estimate earnings\nstart_am = 0\nn_buy = 0\nn_sell = 0\ntrigger = 1\nsave_buy = 0.\nsave_sell = 0.\nbuy=0.\nsell=0.\n\nfor count in range(1,nlen0-1):\n if (testPredict[count,0] > 1.02*testY[0,count-1]) and (trigger == 1):\n \n if (start_am == 1):# and (data1orig[tr0+count+start]*1.01 < save_sell):\n buy = (sell/testY[0,count-1])*0.99925\n save_buy = testY[0,count-1]\n n_buy = n_buy + 1\n trigger = 2\n sell = 0.\n if (start_am == 0):\n buy = (1./testY[0,count-1])*0.99925\n save_buy = testY[0,count-1]\n buy_start = buy\n start_am = 1\n trigger = 2\n sell=0.\n\n if (testPredict[count,0] < 0.98*testY[0,count-1]) and (trigger == 2):# and (data1orig[tr0+count]*0.99 > save_buy):\n sell = buy*testY[0,count-1]*0.99925\n save_sell = testY[0,count-1]\n trigger = 1\n n_sell = n_sell + 1\n buy = 0.\n\ngain1 = 0.\ngain1 = (((buy*testY[0,count-1]) + sell)/(testY[0,count-1]/testY[0,1]))\nprint('Relative to no trade (need >1):', gain1)\nprint('Earnings (%) :', 100.*(((buy*testY[0,count-1]) + sell)-1.))\nprint('number of trades:', n_sell+n_buy)\nprint('===========================')\ngain1 = 0.\ngain1 = (buy + (sell/testY[0,count-1]))/(buy_start)\nprint('Relative to no trade (need >1):', gain1)\nprint('===========================') \n\n" } ]
4
joehewitt/up
https://github.com/joehewitt/up
17df2b48e4a76fbd9d71988f79f2d647be8ba2e4
17d853214ae87c9881057fa51ef7a09b53bff6ce
c0019cda6a81dae403827eff64638470bece351f
refs/heads/master
2020-04-17T08:13:05.701002
2015-10-09T23:47:01
2015-10-09T23:47:01
5,301,551
27
0
null
null
null
null
null
[ { "alpha_fraction": 0.697204053401947, "alphanum_fraction": 0.6977989077568054, "avg_line_length": 39.02381134033203, "blob_id": "8c1ea70f2c7c1101b4d242c7b107722cd2cb8376", "content_id": "cc3040334c7496ffd4158d9d27c99c6c0d5b6715", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1681, "license_type": "permissive", "max_line_length": 108, "num_lines": 42, "path": "/src/vm/include/UpArena.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPARENA_H\n#define UP_UPARENA_H\n\n#include \"Up/UpGlobal.h\"\n\nstruct UpArena {\n};\n\nUpArena* UpArenaCreate();\nvoid UpArenaFree(UpArena* self);\n\nvoid* UpArenaAllocate(UpArena* self, size_t size, const char* typeName);\nvoid* UpArenaAllocateCopy(UpArena* self, size_t size, void* source, size_t sourceSize);\nvoid* UpArenaReallocate(UpArena* self, size_t size, void* source, size_t sourceSize);\nvoid* UpArenaAllocateBuffer(UpArena* self, size_t size);\n\nchar* UpArenaCopyString(UpArena* self, const char* s);\nchar* UpArenaCopyStringN(UpArena* self, const char* s, size_t length);\nchar* UpArenaFormatString(UpArena* self, const char* str, ...);\n\n#define UpArenaAlloc(_ARENA, _SIZE) \\\n (UpArenaAllocateBuffer(_ARENA, _SIZE))\n\n#define UpArenaNew(_ARENA, _TYPE) \\\n ((_TYPE*)UpArenaAllocate(_ARENA, sizeof(_TYPE), #_TYPE))\n\n#define UpArenaNewArray(_ARENA, _TYPE, _COUNT) \\\n ((_TYPE**)UpArenaAllocate(_ARENA, sizeof(_TYPE*) * (_COUNT), \"Array_\" #_TYPE))\n#define UpArenaCopyArray(_ARENA, _SOURCE, _OLDCOUNT, _TYPE, _COUNT) \\\n ((_TYPE**)UpArenaAllocateCopy(_ARENA, sizeof(_TYPE*) * (_COUNT), _SOURCE, sizeof(_TYPE*) * (_OLDCOUNT)))\n#define UpArenaResizeArray(_ARENA, _SOURCE, _OLDCOUNT, _TYPE, _COUNT) \\\n ((_TYPE**)UpArenaReallocate(_ARENA, sizeof(_TYPE*) * (_COUNT), _SOURCE, sizeof(_TYPE*) * (_OLDCOUNT)))\n\n#define UpArenaNewBuffer(_ARENA, _TYPE, _SIZE) \\\n ((_TYPE)UpArenaAllocateBuffer(_ARENA, _SIZE))\n#define UpArenaNewArrayBuffer(_ARENA, _TYPE, _COUNT) \\\n ((_TYPE*)UpArenaAllocateBuffer(_ARENA, sizeof(_TYPE) * (_COUNT)))\n\n#define UpArenaNewString(_ARENA, _LENGTH) \\\n ((char*)UpArenaAllocateBuffer(_ARENA, sizeof(char) * (_LENGTH+1)))\n\n#endif // UP_UPARENA_H\n" }, { "alpha_fraction": 0.5691644549369812, "alphanum_fraction": 0.5944528579711914, "avg_line_length": 29.101755142211914, "blob_id": "92869c99b9c34a5751bbb4d3ab9ebcf34f4fc972", "content_id": "bd9606e6dd29fe0cdc44e861bdd4e8086750ba21", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 8581, "license_type": "permissive", "max_line_length": 63, "num_lines": 285, "path": "/src/vm/UpProbes.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n// Do not edit this file. It is generated!\n\n#ifdef UP_ENABLE_PROBES\n\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpProbes.h\"\n#include \"Up/UpContext.h\"\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t name;\n uint32_t format;\n uint8_t isData;\n} UpDeclareProbe;\n\n\nbool UpProbeCompilerNodeEnabled = false;\nbool UpProbeOp1Enabled = false;\nbool UpProbeOp2Enabled = false;\nbool UpProbeFloatEnabled = false;\nbool UpProbeParseEnabled = false;\nbool UpProbeSetEnabled = false;\nbool UpProbeOp1fEnabled = false;\nbool UpProbeLogEnabled = false;\nbool UpProbeLongEnabled = false;\nbool UpProbeCallEnabled = false;\nbool UpProbeCompilerFrameEnabled = false;\nbool UpProbeRunEnabled = false;\nbool UpProbeStringEnabled = false;\nbool UpProbeGetEnabled = false;\nbool UpProbeCompilerClassEnabled = false;\nbool UpProbeIntegerEnabled = false;\nbool UpProbeOp1vEnabled = false;\nbool UpProbeExcEnabled = false;\nbool UpProbeInstructionEnabled = false;\nbool UpProbeCompileEnabled = false;\nbool UpProbeOp1llEnabled = false;\nbool UpProbeOpEnabled = false;\nbool UpProbeDeleteEnabled = false;\n\nbool* UpGetProbeFlag(const char* name) {\n if (!strcmp(name, \"compilerNode\")) {\n return &UpProbeCompilerNodeEnabled;\n } else if (!strcmp(name, \"op1\")) {\n return &UpProbeOp1Enabled;\n } else if (!strcmp(name, \"op2\")) {\n return &UpProbeOp2Enabled;\n } else if (!strcmp(name, \"float\")) {\n return &UpProbeFloatEnabled;\n } else if (!strcmp(name, \"parse\")) {\n return &UpProbeParseEnabled;\n } else if (!strcmp(name, \"set\")) {\n return &UpProbeSetEnabled;\n } else if (!strcmp(name, \"op1f\")) {\n return &UpProbeOp1fEnabled;\n } else if (!strcmp(name, \"log\")) {\n return &UpProbeLogEnabled;\n } else if (!strcmp(name, \"long\")) {\n return &UpProbeLongEnabled;\n } else if (!strcmp(name, \"call\")) {\n return &UpProbeCallEnabled;\n } else if (!strcmp(name, \"compilerFrame\")) {\n return &UpProbeCompilerFrameEnabled;\n } else if (!strcmp(name, \"run\")) {\n return &UpProbeRunEnabled;\n } else if (!strcmp(name, \"string\")) {\n return &UpProbeStringEnabled;\n } else if (!strcmp(name, \"get\")) {\n return &UpProbeGetEnabled;\n } else if (!strcmp(name, \"compilerClass\")) {\n return &UpProbeCompilerClassEnabled;\n } else if (!strcmp(name, \"integer\")) {\n return &UpProbeIntegerEnabled;\n } else if (!strcmp(name, \"op1v\")) {\n return &UpProbeOp1vEnabled;\n } else if (!strcmp(name, \"exc\")) {\n return &UpProbeExcEnabled;\n } else if (!strcmp(name, \"instruction\")) {\n return &UpProbeInstructionEnabled;\n } else if (!strcmp(name, \"compile\")) {\n return &UpProbeCompileEnabled;\n } else if (!strcmp(name, \"op1ll\")) {\n return &UpProbeOp1llEnabled;\n } else if (!strcmp(name, \"op\")) {\n return &UpProbeOpEnabled;\n } else if (!strcmp(name, \"delete\")) {\n return &UpProbeDeleteEnabled;\n }\n return NULL;\n}\n\nvoid UpInitProbes() {\n {\n char* name = \"compilerNode\";\n char* format = \"%(1)s\";\n UpDeclareProbe probe = {2, UpProbeCompilerNode, 12, 5, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 12);\n UpProbe(format, sizeof(char) * 5);\n }\n {\n char* name = \"op1\";\n char* format = \"%d %(2)s %d\";\n UpDeclareProbe probe = {2, UpProbeOp1, 3, 11, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 3);\n UpProbe(format, sizeof(char) * 11);\n }\n {\n char* name = \"op2\";\n char* format = \"%d %(2)s %d %d\";\n UpDeclareProbe probe = {2, UpProbeOp2, 3, 14, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 3);\n UpProbe(format, sizeof(char) * 14);\n }\n {\n char* name = \"float\";\n char* format = \"%f\";\n UpDeclareProbe probe = {2, UpProbeFloat, 5, 2, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 5);\n UpProbe(format, sizeof(char) * 2);\n }\n {\n char* name = \"parse\";\n char* format = \"%s\";\n UpDeclareProbe probe = {2, UpProbeParse, 5, 2, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 5);\n UpProbe(format, sizeof(char) * 2);\n }\n {\n char* name = \"set\";\n char* format = \"%(3)s %(3)s\";\n UpDeclareProbe probe = {2, UpProbeSet, 3, 11, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 3);\n UpProbe(format, sizeof(char) * 11);\n }\n {\n char* name = \"op1f\";\n char* format = \"%d %(2)s %f\";\n UpDeclareProbe probe = {2, UpProbeOp1f, 4, 11, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 4);\n UpProbe(format, sizeof(char) * 11);\n }\n {\n char* name = \"log\";\n char* format = \"%s\";\n UpDeclareProbe probe = {2, UpProbeLog, 3, 2, 1};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 3);\n UpProbe(format, sizeof(char) * 2);\n }\n {\n char* name = \"long\";\n char* format = \"%lld\";\n UpDeclareProbe probe = {2, UpProbeLong, 4, 4, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 4);\n UpProbe(format, sizeof(char) * 4);\n }\n {\n char* name = \"call\";\n char* format = \"%(3)s\";\n UpDeclareProbe probe = {2, UpProbeCall, 4, 5, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 4);\n UpProbe(format, sizeof(char) * 5);\n }\n {\n char* name = \"compilerFrame\";\n char* format = \"%(3)s\";\n UpDeclareProbe probe = {2, UpProbeCompilerFrame, 13, 5, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 13);\n UpProbe(format, sizeof(char) * 5);\n }\n {\n char* name = \"run\";\n char* format = \"%s\";\n UpDeclareProbe probe = {2, UpProbeRun, 3, 2, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 3);\n UpProbe(format, sizeof(char) * 2);\n }\n {\n char* name = \"string\";\n char* format = \"%s\";\n UpDeclareProbe probe = {2, UpProbeString, 6, 2, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 6);\n UpProbe(format, sizeof(char) * 2);\n }\n {\n char* name = \"get\";\n char* format = \"%(3)s %(3)s\";\n UpDeclareProbe probe = {2, UpProbeGet, 3, 11, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 3);\n UpProbe(format, sizeof(char) * 11);\n }\n {\n char* name = \"compilerClass\";\n char* format = \"%(3)s\";\n UpDeclareProbe probe = {2, UpProbeCompilerClass, 13, 5, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 13);\n UpProbe(format, sizeof(char) * 5);\n }\n {\n char* name = \"integer\";\n char* format = \"%d\";\n UpDeclareProbe probe = {2, UpProbeInteger, 7, 2, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 7);\n UpProbe(format, sizeof(char) * 2);\n }\n {\n char* name = \"op1v\";\n char* format = \"%d %(2)s %llx\";\n UpDeclareProbe probe = {2, UpProbeOp1v, 4, 13, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 4);\n UpProbe(format, sizeof(char) * 13);\n }\n {\n char* name = \"exc\";\n char* format = \"%s %s %d %d\";\n UpDeclareProbe probe = {2, UpProbeExc, 3, 11, 1};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 3);\n UpProbe(format, sizeof(char) * 11);\n }\n {\n char* name = \"instruction\";\n char* format = \"%d\";\n UpDeclareProbe probe = {2, UpProbeInstruction, 11, 2, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 11);\n UpProbe(format, sizeof(char) * 2);\n }\n {\n char* name = \"compile\";\n char* format = \"%s\";\n UpDeclareProbe probe = {2, UpProbeCompile, 7, 2, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 7);\n UpProbe(format, sizeof(char) * 2);\n }\n {\n char* name = \"op1ll\";\n char* format = \"%d %(2)s %lld\";\n UpDeclareProbe probe = {2, UpProbeOp1ll, 5, 13, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 5);\n UpProbe(format, sizeof(char) * 13);\n }\n {\n char* name = \"op\";\n char* format = \"%d %(2)s\";\n UpDeclareProbe probe = {2, UpProbeOp, 2, 8, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 2);\n UpProbe(format, sizeof(char) * 8);\n }\n {\n char* name = \"delete\";\n char* format = \"%(3)s %(3)s\";\n UpDeclareProbe probe = {2, UpProbeDelete, 6, 11, 0};\n UpProbe(&probe, sizeof(probe));\n UpProbe(name, sizeof(char) * 6);\n UpProbe(format, sizeof(char) * 11);\n }\n {\n UpDeclareProbe probe = {2, 0, 0, 0, 0};\n UpProbe(&probe, sizeof(probe));\n }\n}\n\n#endif\n\n" }, { "alpha_fraction": 0.5492021441459656, "alphanum_fraction": 0.5516717433929443, "avg_line_length": 33.625, "blob_id": "53971fd8c6bd794366b43928d45dba9f2e5f6811", "content_id": "f324c3e6a3a3ae0d4e642b2a72e3a6f13ff1f528", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5264, "license_type": "permissive", "max_line_length": 100, "num_lines": 152, "path": "/make/test/TestRunner.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nfrom .TestFileFunc import TestFileFunc\nfrom .utils import FunctionType, sourceLink\nfrom ..Message import testBegin, testComplete, testFailure, testPassed, testMetadata, testNYI\n\n# **************************************************************************************************\n\nclass TestRunner:\n def __init__(self, fixtureName, fixtureClass, writer=None):\n self.fixtureName = fixtureName\n self.fixtureClass = fixtureClass\n self.writer = writer\n self.order = getattr(fixtureClass, \"order\", None)\n\n def getTestNames(self):\n for attrName in dir(self.fixtureClass):\n if attrName.find(\"test\") != 0 and attrName.find(\"inspect\") != 0:\n continue\n\n attrValue = getattr(self.fixtureClass, attrName)\n if callable(attrValue):\n if hasattr(attrValue, \"__status__\"):\n testStatus = attrValue.__status__\n if testStatus == \"ignore\":\n continue\n else:\n testStatus = \"\"\n\n if isinstance(attrValue, TestFileFunc) and attrValue.fixture != self.fixtureClass:\n continue\n yield attrName, FunctionType, testStatus\n\n def run(self, targetName=None, options=None):\n failureCount = 0\n self.testCount = 0\n self.nyiCount = 0\n self.skipCount = 0\n\n for testName, testType, testStatus in self.getTestNames():\n if not testStatus == \"skip\" and (not targetName or testName == targetName):\n self.fixture = self.fixtureClass(testName, self.writer, **options)\n\n testCase = getattr(self.fixture, testName)\n\n self.writer << testBegin(\"Run\", testName, testCase.testFilePath)\n\n failures = []\n\n try:\n failures = self.runTestCase(testCase)\n failureCount += len(failures)\n\n except Exception, exc:\n self.writer << testComplete()\n raise\n\n self.writer << testComplete()\n\n return failureCount, self.testCount, self.nyiCount, self.skipCount\n\n def runTestCase(self, testCase):\n failures = []\n\n if hasattr(testCase, \"testContainer\"):\n for childTest, fileName, lineNo in testCase():\n failure = self.callTestCase(childTest, failures, fileName, lineNo)\n if failure:\n failures.append(failure)\n\n else:\n failure = self.callTestCase(testCase)\n if failure:\n failures.append(failure)\n\n return failures\n\n def callTestCase(self, testCase, failures, fileName=None, lineNo=None):\n from .TestFixture import TestException, RunException, TestAbortException\n\n if getattr(testCase, \"nyi\", None):\n self.nyiCount += 1\n self.writer << testNYI()\n elif getattr(testCase, \"skip\", None):\n self.skipCount += 1\n self.writer << testResult(\"skip\")\n else:\n self.testCount += 1\n\n self.fixture.setUp()\n\n try:\n testCase()\n self.writer << testPassed()\n\n except TestAbortException, exc:\n raise\n\n except TestException, exc:\n fixupException(exc, fileName, lineNo)\n self.writer << testFailure(\"failed\", fileName, lineNo,\n exc.vars['Expected'], exc.vars['Actual'])\n return exc\n\n except RunException, exc:\n fixupException(exc, fileName, lineNo)\n self.writer << testFailure(\"exception\", fileName, lineNo, None,\n exc.errors, exc.args, exc.source)\n raise\n\n except Exception,exc:\n fixupException(exc, fileName, lineNo)\n self.writer << testFailure(\"exception\", fileName, lineNo)\n raise\n finally:\n if self.fixture.metadata:\n self.writer << testMetadata(self.fixture.metadata)\n self.fixture.tearDown()\n\ndef fixupException(exc, fileName, lineNo):\n if not fileName:\n fileName, lineNo = getTracebackSource()\n\n exc.fileName = fileName\n exc.lineNo = lineNo\n\ndef sideBySide(left, right, leftHeader=None, rightHeader=None):\n leftLines = left.split(\"\\n\")\n rightLines = right.split(\"\\n\")\n ll = len(leftLines)\n rl = len(rightLines)\n\n maxWidthL = len(leftHeader) if leftHeader else 0\n for line in leftLines:\n maxWidthL = max(len(line), maxWidthL)\n\n maxWidthR = len(rightHeader) if rightHeader else 0\n for line in rightLines:\n maxWidthR = max(len(line), maxWidthR)\n\n format = \"\".join([\"| %-\", str(maxWidthL), \"s | %-\", str(maxWidthR), \"s |\"])\n lines = []\n\n if leftHeader and rightHeader:\n lines.append(format % (leftHeader, rightHeader))\n lines.append(\"=\" * (maxWidthL + maxWidthR + 7))\n\n for i in xrange(0, max(ll, rl)):\n leftLine = leftLines[i] if i < ll else \"\"\n rightLine = rightLines[i] if i < rl else \"\"\n\n lines.append(format % (leftLine, rightLine))\n\n return \"\\n\".join(lines)\n" }, { "alpha_fraction": 0.5443170070648193, "alphanum_fraction": 0.5464025139808655, "avg_line_length": 26.371429443359375, "blob_id": "070bac6403841489fe5a9e76023c80fc8f20f5b8", "content_id": "439d321375aa5f6f6638e64c15e34a58434f16dd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 959, "license_type": "permissive", "max_line_length": 74, "num_lines": 35, "path": "/make/JSONWriter.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nimport json, sys\n\nclass JSONWriter(object):\n def __init__(self, stream=sys.stdout):\n self.stream = stream\n self.dataUIDs = 0\n self.dataQueue = {}\n\n def write(self, content):\n if self.dataQueue:\n dataSummary = {}\n for key,value in self.dataQueue.iteritems():\n if value:\n dataSummary[key] = len(value)\n\n packet = json.dumps({\"data\": dataSummary, \"content\": content})\n else:\n packet = json.dumps({\"content\": content})\n\n self.stream.write('%s\\n' % len(packet))\n self.stream.write(packet)\n\n for key,value in self.dataQueue.iteritems():\n if value:\n self.stream.write(str(value))\n\n self.stream.flush()\n\n self.dataQueue.clear()\n\n def enqueueData(self, data, typeName=None):\n self.dataUIDs += 1\n uid = self.dataUIDs\n self.dataQueue[uid] = data\n return uid\n" }, { "alpha_fraction": 0.5932452082633972, "alphanum_fraction": 0.5976505279541016, "avg_line_length": 25.153846740722656, "blob_id": "2af9ecefe5375b2493369824c4fffad2036547f8", "content_id": "9a9b166f61b3595a06defd20e1c05ff3cfbdb773", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 681, "license_type": "permissive", "max_line_length": 99, "num_lines": 26, "path": "/src/vm/UpCPointer.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpCPointer.h\"\n#include \"Up/UpObject.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpClass.h\"\n#include \"UpBuffer.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nUpCPointer* UpCPointerCreate(void* ptr) {\n UpCPointer* self = (UpCPointer*)UpObjectCreateWithClass(UP_BUILTIN(cpointer));\n self->ptr = ptr;\n return self;\n}\n\nchar* UpCPointerGetAddress(UpCPointer* self) {\n UpString* str = UpStringFormat(\"0x%i64\", self->ptr);\n return str->value;\n}\n\nbool UpCPointerIsNull(UpCPointer* self) {\n return !!self->ptr;\n}\n" }, { "alpha_fraction": 0.5350725650787354, "alphanum_fraction": 0.5384913086891174, "avg_line_length": 35.6716423034668, "blob_id": "78c1041907ee0900a67d19e127ad7fe37811c05e", "content_id": "9ba1e71a9bdd9ec84af8c4accedcfc3f08a2641c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 49141, "license_type": "permissive", "max_line_length": 100, "num_lines": 1340, "path": "/src/vm/UpDebug.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpDebug.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpObject.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpChannel.h\"\n#include \"Up/UpException.h\"\n#include \"Up/UpFunction.h\"\n#include \"Up/UpCFunction.h\"\n#include \"Up/UpCPointer.h\"\n#include \"Up/UpNull.h\"\n#include \"Up/UpBool.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpLong.h\"\n#include \"Up/UpFloat.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpList.h\"\n#include \"Up/UpMap.h\"\n#include \"Up/UpString.h\"\n#include \"UpSyntax.h\"\n#include \"UpBuffer.h\"\n#include \"UpStrTable.h\"\n#include \"UpIntTable.h\"\n#include \"UpArray.h\"\n#include \"Up/UpArena.h\"\n#include \"UpTestFunctions.h\"\n\n#include <mach/mach_time.h>\n\n// ************************************************************************************************\n\nstatic const char* _OperatorToString(UpOperator op) {\n switch (op) {\n case UpEqOp:\n return \"=\";\n case UpConcatStringOp:\n return \"concat\";\n case UpLookupOp:\n return \".[]\";\n case UpIndexOp:\n return \"[]\";\n case UpSliceOp:\n return \"[to]\";\n case UpOrOp:\n return \"|\";\n case UpAndOp:\n return \"&\";\n case UpNotOp:\n return \"!\";\n case UpEqualsOp:\n return \"==\";\n case UpNotEqualsOp:\n return \"!=\";\n case UpGreaterThanOp:\n return \">\";\n case UpGreaterThanEqualsOp:\n return \">=\";\n case UpLessThanOp:\n return \"<\";\n case UpLessThanEqualsOp:\n return \"<=\";\n case UpHasOp:\n return \"has\";\n case UpHasNotOp:\n return \"has not\";\n case UpIsOp:\n return \"is\";\n case UpIsNotOp:\n return \"is not\";\n case UpIsInOp:\n return \"is in\";\n case UpNotInOp:\n return \"not in\";\n case UpAddOp:\n return \"+\";\n case UpSubtractOp:\n return \"-\";\n case UpMultiplyOp:\n return \"*\";\n case UpDivideOp:\n return \"/\";\n case UpModOp:\n return \"//\";\n case UpPowOp:\n return \"**\";\n case UpConcatOp:\n return \"++\";\n case UpAddEqOp:\n return \"+=\";\n case UpSubtractEqOp:\n return \"-=\";\n case UpMultiplyEqOp:\n return \"*=\";\n case UpDivideEqOp:\n return \"/=\";\n case UpModEqOp:\n return \"//=\";\n case UpPowEqOp:\n return \"**=\";\n case UpConcatEqOp:\n return \"++=\";\n case UpPositiveOp:\n return \"+\";\n case UpNegativeOp:\n return \"-\";\n case UpDeleteOp:\n return \"-=\";\n case UpInOp:\n return \"in\";\n case UpReadOp:\n return \"<<\";\n case UpWriteOp:\n return \">>\";\n case UpWriteAllOp:\n return \"*>>\";\n case UpBindOp:\n return \";\";\n default: {\n return \"\";\n }\n }\n}\n\nstatic const char* _CPrimitiveToString(UpCPrimitive primitive) {\n switch (primitive) {\n case UpCVoidType:\n return \"void\";\n case UpCUpObjectType:\n return \"UpObject\";\n case UpCUpContextType:\n return \"UpContext\";\n case UpCStructType:\n return \"struct\";\n case UpCBoolType:\n return \"bool\";\n case UpCCharType:\n return \"char\";\n case UpCUCharType:\n return \"unsigned char\";\n case UpCShortType:\n return \"short\";\n case UpCUShortType:\n return \"unsigned short\";\n case UpCIntType:\n return \"int\";\n case UpCUIntType:\n return \"unsigned int\";\n case UpCLongType:\n return \"long\";\n case UpCULongType:\n return \"unsigned long\";\n case UpCLongLongType:\n return \"long long\";\n case UpCULongLongType:\n return \"unsigned long long\";\n case UpCFloatType:\n return \"float\";\n case UpCDoubleType:\n return \"unsigned double\";\n case UpCLongDoubleType:\n return \"long double\";\n default:\n return \"(unknown)\";\n }\n}\n\nstatic const char* _AccessModetoString(UpAccessMode mode) {\n switch (mode) {\n case UpNoAccess:\n return \"\";\n case UpPrivate:\n return \" access=\\\"private\\\"\";\n case UpPublic:\n return \" access=\\\"public\\\"\";\n case UpExtra:\n return \" access=\\\"extra\\\"\";\n default:\n return \"\";\n }\n}\n\nvoid _SyntaxToXML(UpBuffer* out, UpSyntax* node) {\n if (!node)\n return;\n switch (node->type) {\n case UpSetSyntaxType: {\n for (UpSyntaxItem* item = ((UpSetSyntax*)node)->first; item; item = item->next) {\n _SyntaxToXML(out, item->value);\n }\n break;\n }\n case UpDeclarationSyntaxType: {\n UpDeclarationSyntax* n = (UpDeclarationSyntax*)node;\n const char* access =\n n->access == UpPublic ? \" access=\\\"public\\\"\"\n : n->access == UpPrivate ? \" access=\\\"private\\\"\"\n : n->access == UpExtra ? \" access=\\\"extra\\\"\" : \"\";\n UpBufferOpen(out, \"<Declaration%s>\", access);\n _SyntaxToXML(out, n->head);\n _SyntaxToXML(out, n->body);\n if (n->where) {\n UpBufferOpen(out, \"<where>\");\n _SyntaxToXML(out, n->where);\n UpBufferClose(out, \"</where>\");\n }\n UpBufferClose(out, \"</Declaration>\");\n break;\n }\n case UpGroupSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n UpBufferOpen(out, \"<GroupExpression>\");\n _SyntaxToXML(out, n->value);\n UpBufferClose(out, \"</GroupExpression>\");\n break;\n }\n case UpPrintSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n UpBufferOpen(out, \"<PrintExpression>\");\n _SyntaxToXML(out, n->value);\n UpBufferClose(out, \"</PrintExpression>\");\n break;\n }\n case UpAssignmentSyntaxType: {\n UpAssignmentSyntax* n = (UpAssignmentSyntax*)node;\n UpBufferOpen(out, \"<AssignmentExpression op=\\\"%s\\\">\", _OperatorToString(n->op));\n _SyntaxToXML(out, n->left);\n _SyntaxToXML(out, n->right);\n UpBufferClose(out, \"</AssignmentExpression>\");\n break;\n }\n case UpBinarySyntaxType: {\n UpBinarySyntax* n = (UpBinarySyntax*)node;\n UpBufferOpen(out, \"<BinaryExpression op=\\\"%s\\\">\", _OperatorToString(n->op));\n _SyntaxToXML(out, n->left);\n _SyntaxToXML(out, n->right);\n UpBufferClose(out, \"</BinaryExpression>\");\n break;\n }\n case UpUnarySyntaxType: {\n UpUnarySyntax* n = (UpUnarySyntax*)node;\n UpBufferOpen(out, \"<UnaryExpression op=\\\"%s\\\">\", _OperatorToString(n->op));\n _SyntaxToXML(out, n->operand);\n UpBufferClose(out, \"</UnaryExpression>\");\n break;\n }\n case UpImportSyntaxType: {\n UpImportSyntax* n = (UpImportSyntax*)node;\n UpBufferOpen(out, \"<ImportExpression\");\n UpSyntax* set = n->names;\n for (UpSyntaxItem* item = ((UpSetSyntax*)set)->first; item; item = item->next) {\n UpBufferOpen(out, \"<module>\");\n _SyntaxToXML(out, item->value);\n UpBufferClose(out, \"</module>\");\n }\n UpBufferClose(out, \"</ImportExpression>\");\n break;\n }\n case UpCallSyntaxType: {\n UpCallSyntax* n = (UpCallSyntax*)node;\n const char* imperative = n->isImperative ? \" imperative=\\\"true\\\"\" : \"\";\n UpBufferOpen(out, \"<CallExpression%s>\", imperative);\n if (n->schedule) {\n UpBufferOpen(out, \"<schedule>\");\n _SyntaxToXML(out, n->schedule);\n UpBufferClose(out, \"</schedule>\");\n }\n _SyntaxToXML(out, n->callable);\n _SyntaxToXML(out, n->args);\n UpBufferClose(out, \"</CallExpression>\");\n break;\n }\n case UpArgumentSyntaxType: {\n for (UpArgumentSyntax* n = (UpArgumentSyntax*)node; n; n = (UpArgumentSyntax*)n->next) {\n const char* variadic = n->isVariadic ? \" variadic=\\\"true\\\"\" : \"\";\n if (n->name && n->externalName) {\n UpBufferOpen(out, \"<Argument name=\\\"%s\\\" external=\\\"%s\\\"%s>\", n->name,\n n->externalName, variadic);\n } else if (n->name) {\n UpBufferOpen(out, \"<Argument name=\\\"%s\\\">\", n->name, variadic);\n } else if (n->externalName) {\n UpBufferOpen(out, \"<Argument external=\\\"%s\\\">\", n->externalName, variadic);\n } else {\n UpBufferOpen(out, \"<Argument%s>\", variadic);\n }\n\n if (n->expr) {\n _SyntaxToXML(out, n->expr);\n }\n UpBufferClose(out, \"</Argument>\");\n }\n break;\n }\n case UpPropertySyntaxType: {\n UpPropertySyntax* n = (UpPropertySyntax*)node;\n UpBufferOpen(out, \"<PropertyExpression name=\\\"%s\\\">\", n->right);\n _SyntaxToXML(out, n->left);\n UpBufferClose(out, \"</PropertyExpression>\");\n break;\n }\n case UpIdSyntaxType: {\n UpIdSyntax* n = (UpIdSyntax*)node;\n UpBufferLine(out, \"<Id name=\\\"%s\\\"/>\", n->name);\n break;\n }\n case UpTypeIdSyntaxType: {\n UpIdSyntax* n = (UpIdSyntax*)node;\n UpBufferLine(out, \"<TypeId name=\\\"%s\\\"/>\", n->name);\n break;\n }\n case UpRangeSyntaxType: {\n UpRangeSyntax* n = (UpRangeSyntax*)node;\n UpBufferOpen(out, \"<Range%s>\", n->isThrough ? \" through=\\\"true\\\"\" : \"\");\n _SyntaxToXML(out, n->from);\n _SyntaxToXML(out, n->to);\n _SyntaxToXML(out, n->by);\n UpBufferClose(out, \"</Range>\");\n break;\n }\n case UpDefaultSyntaxType: {\n UpSyntax2* n = (UpSyntax2*)node;\n UpBufferOpen(out, \"<Default>\");\n _SyntaxToXML(out, n->value);\n _SyntaxToXML(out, n->value2);\n UpBufferClose(out, \"</Default>\");\n break;\n }\n case UpUndefinedSyntaxType: {\n UpBufferLine(out, \"<Undefined/>\");\n break;\n }\n case UpIntSyntaxType: {\n UpNumberSyntax* num = (UpNumberSyntax*)node;\n UpBufferLine(out, \"<Integer value=\\\"%i\\\"\", num->value.i);\n if (num->unit) {\n UpBufferPrint(out, \" unit=\\\"%s\\\"\", num->unit);\n }\n UpBufferPrint(out, \"/>\");\n break;\n }\n case UpLongSyntaxType: {\n UpNumberSyntax* num = (UpNumberSyntax*)node;\n UpBufferLine(out, \"<Long value=\\\"%lli\\\"\", num->value.l);\n if (num->unit) {\n UpBufferPrint(out, \" unit=\\\"%s\\\"\", num->unit);\n }\n UpBufferPrint(out, \"/>\");\n break;\n }\n case UpFloatSyntaxType: {\n UpNumberSyntax* num = (UpNumberSyntax*)node;\n UpBufferLine(out, \"<Float value=\\\"%.14g\\\"\", num->value.f);\n if (num->unit) {\n UpBufferPrint(out, \" unit=\\\"%s\\\"\", num->unit);\n }\n UpBufferPrint(out, \"/>\");\n break;\n }\n case UpStringSyntaxType: {\n UpStringSyntax* str = (UpStringSyntax*)node;\n UpBufferLine(out, \"<String value=\\\"%s\\\"/>\", str->value);\n break;\n }\n case UpListSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n UpBufferOpen(out, \"<List>\");\n _SyntaxToXML(out, n->value);\n UpBufferClose(out, \"</List>\");\n break;\n }\n case UpMapSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n UpBufferOpen(out, \"<Map>\");\n _SyntaxToXML(out, n->value);\n UpBufferClose(out, \"</Map>\");\n break;\n }\n case UpChannelSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n if (n->value) {\n UpBufferOpen(out, \"<Channel>\");\n _SyntaxToXML(out, n->value);\n UpBufferClose(out, \"</Channel>\");\n } else {\n UpBufferLine(out, \"<Channel/>\");\n }\n break;\n }\n case UpReturnSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n UpBufferOpen(out, \"<Return>\");\n _SyntaxToXML(out, n->value);\n UpBufferClose(out, \"</Return>\");\n break;\n }\n case UpBreakSyntaxType: {\n UpBufferOpen(out, \"<Break/>\");\n break;\n }\n case UpContinueSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n if (n->value) {\n UpBufferOpen(out, \"<Continue>\");\n _SyntaxToXML(out, n->value);\n UpBufferClose(out, \"</Continue>\");\n } else {\n UpBufferLine(out, \"<Continue/>\");\n }\n break;\n }\n case UpThrowSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n UpBufferOpen(out, \"<Throw>\");\n _SyntaxToXML(out, n->value);\n UpBufferClose(out, \"</Throw>\");\n break;\n }\n case UpFunctionSyntaxType: {\n UpFunctionSyntax* n = (UpFunctionSyntax*)node;\n const char* expr = n->isExpression ? \" expression=\\\"true\\\"\" : \"\";\n UpBufferOpen(out, \"<Function%s>\", expr);\n if (n->head) {\n UpBufferOpen(out, \"<head>\");\n _SyntaxToXML(out, n->head);\n UpBufferClose(out, \"</head>\");\n }\n _SyntaxToXML(out, n->body);\n UpBufferClose(out, \"</Function>\");\n break;\n }\n case UpIteratorSyntaxType: {\n UpIteratorSyntax* n = (UpIteratorSyntax*)node;\n const char* ison = n->isOn ? \" on=\\\"true\\\"\" : \"\";\n const char* iswhile = n->isWhile ? \" while=\\\"true\\\"\" : \"\";\n UpBufferOpen(out, \"<Iterator%s%s>\", ison, iswhile);\n UpBufferOpen(out, \"<left>\");\n _SyntaxToXML(out, n->left);\n UpBufferClose(out, \"</left>\");\n if (n->iterable) {\n UpBufferOpen(out, \"<iterable>\");\n _SyntaxToXML(out, n->iterable);\n UpBufferClose(out, \"</iterable>\");\n }\n if (n->clause) {\n UpBufferOpen(out, \"<clause>\");\n _SyntaxToXML(out, n->clause);\n UpBufferClose(out, \"</clause>\");\n }\n _SyntaxToXML(out, n->body);\n UpBufferClose(out, \"</Iterator>\");\n break;\n }\n case UpCFunctionSyntaxType: {\n UpCFunctionSyntax* n = (UpCFunctionSyntax*)node;\n if (n->name) {\n UpBufferOpen(out, \"<CFunction name=\\\"%s\\\"\", n->name);\n } else {\n UpBufferOpen(out, \"<CFunction\");\n }\n if (n->library) {\n UpBufferPrint(out, \" library=\\\"%s\\\"\", n->library);\n }\n UpBufferPrint(out, \">\");\n UpSetSyntax* args = (UpSetSyntax*)n->args;\n if (args) {\n for (UpSyntaxItem* item = args->first; item; item = item->next) {\n UpCArgumentSyntax* arg = (UpCArgumentSyntax*)item->value;\n UpBufferLine(out, \"<argument\");\n if (arg->name) {\n UpBufferPrint(out, \" name=\\\"%s\\\"\", arg->name);\n }\n _SyntaxToXML(out, arg->type);\n }\n }\n UpBufferLine(out, \"<return\");\n _SyntaxToXML(out, n->returns);\n UpBufferClose(out, \"</CFunction>\");\n break;\n }\n case UpCTypeSyntaxType: {\n UpCTypeSyntax* n = (UpCTypeSyntax*)node;\n if (n) {\n UpBufferPrint(out, \" type=\\\"%s\\\"\", n->name);\n if (n->pointerCount) {\n UpBufferPrint(out, \" pointers=\\\"%d\\\"\", n->pointerCount);\n }\n UpBufferPrint(out, \"/>\");\n } else {\n UpBufferLine(out, \" type=\\\"void\\\"/>\");\n }\n break;\n }\n case UpIfSyntaxType: {\n UpIfSyntax* n = (UpIfSyntax*)node;\n UpBufferOpen(out, \"<If>\");\n _SyntaxToXML(out, n->condition);\n\n _SyntaxToXML(out, n->transforms);\n\n if (n->elsex) {\n UpBufferOpen(out, \"<else>\");\n _SyntaxToXML(out, n->elsex);\n UpBufferClose(out, \"</else>\");\n }\n\n UpBufferClose(out, \"</If>\");\n break;\n }\n case UpWhileSyntaxType: {\n UpWhileSyntax* n = (UpWhileSyntax*)node;\n UpBufferOpen(out, \"<While>\");\n _SyntaxToXML(out, n->condition);\n _SyntaxToXML(out, n->body);\n UpBufferClose(out, \"</While>\");\n break;\n }\n case UpForSyntaxType: {\n UpForSyntax* n = (UpForSyntax*)node;\n UpBufferOpen(out, \"<For>\");\n _SyntaxToXML(out, n->left);\n\n UpBufferOpen(out, \"<In>\");\n _SyntaxToXML(out, n->right);\n UpBufferClose(out, \"</In>\");\n\n _SyntaxToXML(out, n->body);\n\n UpBufferClose(out, \"</For>\");\n break;\n }\n case UpTrySyntaxType: {\n UpTrySyntax* n = (UpTrySyntax*)node;\n UpBufferOpen(out, \"<Try>\");\n _SyntaxToXML(out, n->tryBlock);\n _SyntaxToXML(out, n->catchBlocks);\n _SyntaxToXML(out, n->finallyBlock);\n UpBufferClose(out, \"</Try>\");\n break;\n }\n case UpCatchSyntaxType: {\n UpCatchSyntax* n = (UpCatchSyntax*)node;\n UpBufferOpen(out, \"<Catch>\");\n _SyntaxToXML(out, n->binding);\n _SyntaxToXML(out, n->statements);\n UpBufferClose(out, \"</Catch>\");\n break;\n }\n case UpFinallySyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n UpBufferOpen(out, \"<Finally>\");\n _SyntaxToXML(out, n->value);\n UpBufferClose(out, \"</Finally>\");\n break;\n }\n case UpCastSyntaxType: {\n UpCastSyntax* n = (UpCastSyntax*)node;\n UpBufferOpen(out, \"<Cast>\");\n _SyntaxToXML(out, n->expr);\n _SyntaxToXML(out, n->typeSig);\n UpBufferClose(out, \"</Cast>\");\n break;\n }\n case UpTypeSyntaxType: {\n UpTypeSyntax* n = (UpTypeSyntax*)node;\n UpBufferOpen(out, \"<Type>\");\n _SyntaxToXML(out, n->params);\n UpBufferClose(out, \"</Type>\");\n break;\n }\n case UpSubtypeSyntaxType: {\n UpSyntax2* n = (UpSyntax2*)node;\n UpBufferOpen(out, \"<Subtype>\");\n _SyntaxToXML(out, n->value);\n _SyntaxToXML(out, n->value2);\n UpBufferClose(out, \"</Subtype>\");\n break;\n }\n case UpWhereSyntaxType: {\n UpSyntax2* n = (UpSyntax2*)node;\n UpBufferOpen(out, \"<Where>\");\n _SyntaxToXML(out, n->value);\n _SyntaxToXML(out, n->value2);\n UpBufferClose(out, \"</Where>\");\n break;\n }\n case UpTransformSyntaxType: {\n UpTransformSyntax* n = (UpTransformSyntax*)node;\n UpBufferOpen(out, \"<Transform>\");\n _SyntaxToXML(out, n->condition);\n _SyntaxToXML(out, n->body);\n UpBufferClose(out, \"</Transform>\");\n _SyntaxToXML(out, n->next);\n break;\n }\n case UpIsSyntaxType: {\n UpSyntax2* n = (UpSyntax2*)node;\n UpBufferOpen(out, \"<Is>\");\n _SyntaxToXML(out, n->value);\n _SyntaxToXML(out, n->value2);\n UpBufferClose(out, \"</Is>\");\n break;\n }\n case UpHasSyntaxType: {\n UpSyntax2* n = (UpSyntax2*)node;\n UpBufferOpen(out, \"<Has>\");\n _SyntaxToXML(out, n->value);\n _SyntaxToXML(out, n->value2);\n UpBufferClose(out, \"</Has>\");\n break;\n }\n default:\n UpBufferLine(out, \"<Unknown %d/>\", node->type);\n break;\n }\n}\n\nstatic void _PrintOp(UpBuffer* out, const char* op, int line, int loc) {\n if (line == -1) {\n UpBufferPrint(out, \"%-6s\", \" \");\n } else {\n UpBufferPrint(out, \"%-6d\", line);\n }\n UpBufferPrint(out, \"%4d %-20s\\n\", loc, op);\n}\n\nstatic void _PrintOp1(UpBuffer* out, const char* op, int line, int loc, int arg1) {\n if (line == -1) {\n UpBufferPrint(out, \"%-6s\", \" \");\n } else {\n UpBufferPrint(out, \"%-6d\", line);\n }\n UpBufferPrint(out, \"%4d %-20s %d\\n\", loc, op, arg1);\n}\n\nstatic void _PrintOp1ll(UpBuffer* out, const char* op, int line, int loc, long long arg1) {\n if (line == -1) {\n UpBufferPrint(out, \"%-6s\", \" \");\n } else {\n UpBufferPrint(out, \"%-6d\", line);\n }\n UpBufferPrint(out, \"%4d %-20s %lld\\n\", loc, op, arg1);\n}\n\nstatic void _PrintOp1f(UpBuffer* out, const char* op, int line, int loc, double arg1) {\n if (line == -1) {\n UpBufferPrint(out, \"%-6s\", \" \");\n } else {\n UpBufferPrint(out, \"%-6d\", line);\n }\n UpBufferPrint(out, \"%4d %-20s %f\\n\", loc, op, arg1);\n}\n\nstatic void _PrintOp1s(UpBuffer* out, const char* op, int line, int loc, int arg1,\n const char* name) {\n if (line == -1) {\n UpBufferPrint(out, \"%-6s\", \" \");\n } else {\n UpBufferPrint(out, \"%-6d\", line);\n }\n UpBufferPrint(out, \"%4d %-20s %-6d (%s)\\n\", loc, op, arg1, name);\n}\n\nstatic void _PrintOp1vs(UpBuffer* out, const char* op, int line, int loc, void* arg1,\n const char* name) {\n if (line == -1) {\n UpBufferPrint(out, \"%-6s\", \" \");\n } else {\n UpBufferPrint(out, \"%-6d\", line);\n }\n UpBufferPrint(out, \"%4d %-20s %-12x (%s)\\n\", loc, op, arg1, name);\n}\n\nstatic void _PrintOp2(UpBuffer* out, const char* op, int line, int loc, int arg1, int arg2) {\n if (line == -1) {\n UpBufferPrint(out, \"%-6s\", \" \");\n } else {\n UpBufferPrint(out, \"%-6d\", line);\n }\n UpBufferPrint(out, \"%4d %-20s %d %d\\n\", loc, op, arg1, arg2);\n}\n\nstatic void _PrintOp2s(UpBuffer* out, const char* op, int line, int loc, int arg1, int arg2,\n const char* name) {\n if (line == -1) {\n UpBufferPrint(out, \"%-6s\", \" \");\n } else {\n UpBufferPrint(out, \"%-6d\", line);\n }\n UpBufferPrint(out, \"%4d %-20s %d %-6d (%s)\\n\", loc, op, arg1, arg2, name);\n}\n\nstatic char* _ReadInt(char* buffer, int* value) {\n memcpy(value, buffer, sizeof(int));\n return buffer += sizeof(int);\n}\n\nstatic char* _ReadLongLong(char* buffer, long long* value) {\n memcpy(value, buffer, sizeof(long long));\n return buffer += sizeof(long long);\n}\n\nstatic char* _ReadDouble(char* buffer, double* value) {\n memcpy(value, buffer, sizeof(double));\n return buffer += sizeof(double);\n}\n\nstatic char* _ReadPointer(char* buffer, void** value) {\n memcpy(value, buffer, sizeof(void*));\n return buffer += sizeof(void*);\n}\n\nvoid _UpBytecodeToString(UpBuffer* out, UpFunctionDef* def) {\n unsigned int bytecodeLen = UpBufferCount(def->ops);\n char* bytecode = def->ops->buf;\n char* maxBytecode = def->ops->buf + bytecodeLen;\n char* cursor = def->ops->buf;\n\n UpLineItem nextLineItem;\n UpArrayGet(def->lines, 0, &nextLineItem);\n int line = -1;\n int lineItemIndex = 0;\n\n while (cursor < maxBytecode) {\n int loc = (int)(cursor-bytecode);\n char* lineCursor = cursor;\n\n int opcode;\n cursor = _ReadInt(cursor, &opcode);\n\n if ((lineCursor-bytecode) == nextLineItem.opOffset) {\n line = nextLineItem.lineOffset;\n ++lineItemIndex;\n UpArrayGet(def->lines, lineItemIndex, &nextLineItem);\n } else {\n line = -1;\n }\n\n switch (opcode) {\n case UpInstructionPop: {\n _PrintOp(out, \"POP\", line, loc);\n break;\n }\n case UpInstructionJump: {\n int offset;\n cursor = _ReadInt(cursor, &offset);\n\n _PrintOp1(out, \"JUMP\", line, loc, offset);\n break;\n }\n case UpInstructionJumpIf: {\n int offset;\n cursor = _ReadInt(cursor, &offset);\n\n _PrintOp1(out, \"JUMP_IF\", line, loc, offset);\n break;\n }\n case UpInstructionJumpIfNot: {\n int offset;\n cursor = _ReadInt(cursor, &offset);\n\n _PrintOp1(out, \"JUMP_IF_NOT\", line, loc, offset);\n break;\n }\n case UpInstructionJumpIfDefined: {\n int offset;\n cursor = _ReadInt(cursor, &offset);\n\n _PrintOp1(out, \"JUMP_IF_DEFINED\", line, loc, offset);\n break;\n }\n case UpInstructionJumpIfNotDefined: {\n int offset;\n cursor = _ReadInt(cursor, &offset);\n\n _PrintOp1(out, \"JUMP_IF_NOT_DEFINED\", line, loc, offset);\n break;\n }\n case UpInstructionJumpIfHas: {\n int offset;\n cursor = _ReadInt(cursor, &offset);\n UpSymbol symbol;\n cursor = _ReadInt(cursor, &offset);\n\n const char* name = UpGetSymbolName(symbol);\n _PrintOp2s(out, \"JUMP_IF_HAS\", line, loc, offset, symbol, name);\n break;\n }\n case UpInstructionJumpIfHasNot: {\n int offset;\n cursor = _ReadInt(cursor, &offset);\n UpSymbol symbol;\n cursor = _ReadInt(cursor, &symbol);\n\n const char* name = UpGetSymbolName(symbol);\n _PrintOp2s(out, \"JUMP_IF_HAS_NOT\", line, loc, offset, symbol, name);\n break;\n }\n case UpInstructionLoadObject: {\n UpObject* value;\n cursor = _ReadPointer(cursor, (void**)&value);\n\n char* str = UpObjectToString(value);\n _PrintOp1vs(out, \"LOAD_OBJECT\", line, loc, value, str);\n break;\n }\n case UpInstructionLoadInteger: {\n int value;\n cursor = _ReadInt(cursor, &value);\n cursor += sizeof(void*) - sizeof(int);\n\n _PrintOp1(out, \"LOAD_INTEGER\", line, loc, value);\n break;\n }\n case UpInstructionLoadLong: {\n long long value;\n cursor = _ReadLongLong(cursor, &value);\n cursor += sizeof(void*) - sizeof(long long);\n\n _PrintOp1ll(out, \"LOAD_LONG\", line, loc, value);\n break;\n }\n case UpInstructionLoadFloat: {\n double value;\n cursor = _ReadDouble(cursor, &value);\n cursor += sizeof(void*) - sizeof(double);\n\n _PrintOp1f(out, \"LOAD_FLOAT\", line, loc, value);\n break;\n }\n case UpInstructionGetLocal: {\n int frameIndex, localIndex;\n cursor = _ReadInt(cursor, &frameIndex);\n cursor = _ReadInt(cursor, &localIndex);\n\n _PrintOp2(out, \"GET_LOCAL\", line, loc, frameIndex, localIndex);\n break;\n }\n case UpInstructionSetLocal: {\n int frameIndex, localIndex;\n cursor = _ReadInt(cursor, &frameIndex);\n cursor = _ReadInt(cursor, &localIndex);\n\n _PrintOp2(out, \"SET_LOCAL\", line, loc, frameIndex, localIndex);\n break;\n }\n case UpInstructionDeleteLocal: {\n int frameIndex, localIndex;\n cursor = _ReadInt(cursor, &frameIndex);\n cursor = _ReadInt(cursor, &localIndex);\n\n _PrintOp2(out, \"DELETE_LOCAL\", line, loc, frameIndex, localIndex);\n break;\n }\n case UpInstructionSetArgument: {\n int localIndex;\n cursor = _ReadInt(cursor, &localIndex);\n\n _PrintOp1(out, \"SET_ARGUMENT\", line, loc, localIndex);\n break;\n }\n case UpInstructionGetProperty: {\n UpSymbol symbol;\n cursor = _ReadInt(cursor, &symbol);\n\n const char* name = UpGetSymbolName(symbol);\n _PrintOp1s(out, \"GET_PROPERTY\", line, loc, symbol, name);\n break;\n }\n case UpInstructionSetProperty: {\n UpSymbol symbol;\n cursor = _ReadInt(cursor, &symbol);\n\n const char* name = UpGetSymbolName(symbol);\n _PrintOp1s(out, \"SET_PROPERTY\", line, loc, symbol, name);\n break;\n }\n case UpInstructionDeleteProperty: {\n break;\n }\n case UpInstructionValidateProperty: {\n UpSymbol symbol;\n cursor = _ReadInt(cursor, &symbol);\n\n const char* name = UpGetSymbolName(symbol);\n _PrintOp1s(out, \"VALIDATE_PROPERTY\", line, loc, symbol, name);\n break;\n }\n case UpInstructionCallProperty: {\n int argsCount;\n cursor = _ReadInt(cursor, &argsCount);\n UpSymbol symbol;\n cursor = _ReadInt(cursor, &symbol);\n\n const char* name = UpGetSymbolName(symbol);\n _PrintOp2s(out, \"CALL_PROPERTY\", line, loc, argsCount, symbol, name);\n break;\n }\n case UpInstructionCallOperator: {\n int argsCount;\n cursor = _ReadInt(cursor, &argsCount);\n UpSymbol symbol;\n cursor = _ReadInt(cursor, &symbol);\n\n const char* name = UpGetSymbolName(symbol);\n _PrintOp2s(out, \"CALL_OPERATOR\", line, loc, argsCount, symbol, name);\n break;\n }\n case UpInstructionCallOperator2: {\n int argsCount;\n cursor = _ReadInt(cursor, &argsCount);\n UpSymbol symbol;\n cursor = _ReadInt(cursor, &symbol);\n\n const char* name = UpGetSymbolName(symbol);\n _PrintOp2s(out, \"CALL_OPERATOR2\", line, loc, argsCount, symbol, name);\n break;\n }\n case UpInstructionIs: {\n _PrintOp(out, \"IS\", line, loc);\n break;\n }\n case UpInstructionIsDefined: {\n _PrintOp(out, \"IS_DEFINED\", line, loc);\n break;\n }\n case UpInstructionImport: {\n UpSymbol symbol;\n cursor = _ReadInt(cursor, &symbol);\n\n const char* fullName = UpGetSymbolName(symbol);\n _PrintOp1s(out, \"IMPORT\", line, loc, symbol, fullName);\n break;\n }\n case UpInstructionSetImport: {\n UpSymbol symbol;\n cursor = _ReadInt(cursor, &symbol);\n\n const char* fullName = UpGetSymbolName(symbol);\n _PrintOp1s(out, \"SET_IMPORT\", line, loc, symbol, fullName);\n break;\n }\n case UpInstructionCall: {\n int argsCount;\n cursor = _ReadInt(cursor, &argsCount);\n\n _PrintOp1(out, \"CALL\", line, loc, argsCount);\n break;\n }\n case UpInstructionReturn: {\n _PrintOp(out, \"RETURN\", line, loc);\n break;\n }\n case UpInstructionThrow: {\n _PrintOp(out, \"THROW\", line, loc);\n break;\n }\n case UpInstructionNewObject: {\n UpClassDef* classDef;\n cursor = _ReadPointer(cursor, (void*)&classDef);\n\n _PrintOp1vs(out, \"NEW_OBJECT\", line, loc, classDef, classDef ? classDef->name : \"\");\n break;\n }\n case UpInstructionNewList: {\n int itemCount;\n cursor = _ReadInt(cursor, &itemCount);\n\n _PrintOp1(out, \"NEW_LIST\", line, loc, itemCount);\n break;\n }\n case UpInstructionNewMap: {\n _PrintOp(out, \"NEW_MAP\", line, loc);\n break;\n }\n case UpInstructionNewClass: {\n UpClassDef* def;\n cursor = _ReadPointer(cursor, (void**)&def);\n\n _PrintOp1vs(out, \"NEW_CLASS\", line, loc, def, def->name);\n break;\n }\n case UpInstructionBindClass: {\n UpObject* value;\n cursor = _ReadPointer(cursor, (void**)&value);\n\n char* str = UpObjectToString(value);\n _PrintOp1vs(out, \"BIND_CLASS\", line, loc, value, str);\n break;\n }\n case UpInstructionNewGenerator: {\n UpFunctionDef* def;\n cursor = _ReadPointer(cursor, (void**)&def);\n\n _PrintOp1vs(out, \"NEW_GENERATOR\", line, loc, def, def->name);\n break;\n }\n case UpInstructionNewFunction: {\n UpFunctionDef* def;\n cursor = _ReadPointer(cursor, (void**)&def);\n\n _PrintOp1vs(out, \"NEW_FUNCTION\", line, loc, def, def->name);\n break;\n }\n case UpInstructionNewCFunction: {\n UpCFunctionDef* def;\n cursor = _ReadPointer(cursor, (void**)&def);\n\n _PrintOp1vs(out, \"NEW_C_FUNCTION\", line, loc, def, UpCFunctionDefGetName(def));\n break;\n }\n case UpInstructionSchedule: {\n _PrintOp(out, \"SCHEDULE\", line, loc);\n break;\n }\n case UpInstructionSubscribe: {\n _PrintOp(out, \"SUBSCRIBE\", line, loc);\n break;\n }\n case UpInstructionRead: {\n _PrintOp(out, \"READ\", line, loc);\n break;\n }\n case UpInstructionWrite: {\n _PrintOp(out, \"WRITE\", line, loc);\n break;\n }\n case UpInstructionWriteAwaitRead: {\n _PrintOp(out, \"WRITE_AWAIT_READ\", line, loc);\n break;\n }\n case UpInstructionWriteAwaitResponse: {\n _PrintOp(out, \"WRITE_AWAIT_RESPONSE\", line, loc);\n break;\n }\n case UpInstructionRespond: {\n _PrintOp(out, \"RESPOND\", line, loc);\n break;\n }\n case UpInstructionJumpIfChannelClosed: {\n int jumpOffset;\n cursor = _ReadInt(cursor, &jumpOffset);\n _PrintOp1(out, \"JUMP_IF_CHANNEL_CLOSED\", line, loc, jumpOffset);\n break;\n }\n case UpInstructionBeginTry: {\n int catchOpOffset, finallyOpOffset;\n cursor = _ReadInt(cursor, &catchOpOffset);\n cursor = _ReadInt(cursor, &finallyOpOffset);\n\n _PrintOp2(out, \"BEGIN_TRY\", line, loc, catchOpOffset, finallyOpOffset);\n break;\n }\n case UpInstructionEndTry: {\n _PrintOp(out, \"END_TRY\", line, loc);\n break;\n }\n case UpInstructionCatch: {\n _PrintOp(out, \"CATCH\", line, loc);\n break;\n }\n case UpInstructionFinally: {\n _PrintOp(out, \"FINALLY\", line, loc);\n break;\n }\n default:\n _PrintOp1(out, \"UNKNOWN\", line, loc, opcode);\n cursor = maxBytecode+1;\n break;\n }\n }\n}\n\n// ************************************************************************************************\n\nuint64_t UpGetProbeTime() {\n return mach_absolute_time();\n}\n\nchar* UpSyntaxToXML(UpSyntax* node) {\n UpBuffer* out = UpBufferCreate(UpGetHeap());\n _SyntaxToXML(out, node);\n return UpBufferString(out);\n}\n\nchar* UpBytecodeToString(UpFunctionDef* functionDef) {\n UpBuffer* out = UpBufferCreate(UpGetHeap());\n _UpBytecodeToString(out, functionDef);\n return UpBufferString(out);\n}\n\nchar* UpObjectToString(UpObject* self) {\n UpArena* heap = UpGetHeap();\n\n UpBuiltinClasses* builtins = UpGetBuiltinClasses();\n\n if (UpIsClass(self)) {\n UpClass* cls = (UpClass*)self;\n return UpArenaFormatString(heap, \"<Class %s>\", cls->def->name);\n } else if (UpIsTypeOf(self, builtins->exceptionClass)) {\n UpException* exc = (UpException*)self;\n if (exc->description) {\n return UpArenaFormatString(heap, \"<Exception '%s'>\", exc->description);\n } else {\n return \"<Exception>\";\n }\n } else if (UpIsTypeOf(self, builtins->channelClass)) {\n // UpChannel* channel = (UpChannel*)self;\n return UpArenaFormatString(heap, \"<Channel>\");\n } else if (UpIsTypeOf(self, builtins->functionClass)) {\n UpFunction* fn = (UpFunction*)self;\n return UpArenaFormatString(heap, \"<Function %s>\", fn->def->name);\n } else if (UpIsTypeOf(self, builtins->cfunctionClass)) {\n UpCFunction* fn = (UpCFunction*)self;\n return UpArenaFormatString(heap, \"<CFunction %s>\", UpCFunctionGetName(fn));\n } else if (UpIsTypeOf(self, builtins->cpointerClass)) {\n UpCPointer* ptr = (UpCPointer*)self;\n return UpArenaFormatString(heap, \"<CPointer 0x%i64>\", ptr->ptr);\n } else if (UpIsTypeOf(self, builtins->nullClass)) {\n return self == UpNull() ? \"null\" : \"undefined\";\n } else if (UpIsTypeOf(self, builtins->boolClass)) {\n return self == UpTrue() ? \"true\" : \"false\";\n } else if (UpIsTypeOf(self, builtins->integerClass)) {\n return UpIntegerToString((UpInteger*)self, 10)->value;\n } else if (UpIsTypeOf(self, builtins->longClass)) {\n return UpLongToString((UpLong*)self, 10)->value;\n } else if (UpIsTypeOf(self, builtins->floatClass)) {\n return UpFloatToString((UpFloat*)self)->value;\n } else if (UpIsTypeOf(self, builtins->stringClass)) {\n return ((UpString*)self)->value;\n } else if (UpIsTypeOf(self, builtins->listClass)) {\n return \"<List>\";\n } else if (UpIsTypeOf(self, builtins->mapClass)) {\n return \"<Map>\";\n } else {\n\n if (self->cls) {\n const char* typeName = self->cls->def->name;\n return UpArenaFormatString(heap, \"<Object %s>\", typeName);\n } else {\n return \"<Object>\";\n }\n }\n\n return \"\";\n}\n\nstatic void _GetterWriter(UpStrTable* self, UpSymbol key, void* value, UpBuffer* buf) {\n UpGetterDef* getter = (UpGetterDef*)value;\n UpBufferLine(buf, \"%s\", getter->name);\n}\n\nstatic void _FuncWriter(UpBuffer* buf, UpFunctionDef* funcDef);\n\nstatic void _ClassWriter(UpBuffer* buf, UpClassDef* classDef) {\n UpBufferOpen(buf, \"%s\", classDef->name);\n\n if (classDef->members) {\n for (int i = 0; i < UpArrayCount(classDef->members); ++i) {\n UpFunctionDef* def;\n UpArrayGet(classDef->members, i, &def);\n _FuncWriter(buf, def);\n }\n }\n UpBufferClose(buf, \"\");\n}\n\nstatic void _FuncWriter(UpBuffer* buf, UpFunctionDef* funcDef) {\n UpBufferOpen(buf, \"%s =\", funcDef->name);\n\n if (funcDef->arguments) {\n UpArray* args = funcDef->arguments;\n for (int i = 0; i < UpArrayCount(args); ++i) {\n UpArgumentDef* arg;\n UpArrayGet(args, i, &arg);\n UpBufferLine(buf, \"%s:Any\", UpGetSymbolName(arg->name));\n }\n }\n\n UpScope* scope = funcDef->scope;\n if (scope->funcDefs) {\n for (int i = 0; i < UpArrayCount(scope->funcDefs); ++i) {\n UpFunctionDef* def;\n UpArrayGet(scope->funcDefs, i, &def);\n _FuncWriter(buf, def);\n }\n }\n if (scope->classDefs) {\n for (int i = 0; i < UpArrayCount(scope->classDefs); ++i) {\n UpClassDef* def;\n UpArrayGet(scope->classDefs, i, &def);\n _ClassWriter(buf, def);\n }\n }\n\n UpBufferClose(buf, NULL);\n}\n\nchar* UpFunctionDefToString(UpFunctionDef* funcDef) {\n UpBuffer* buf = UpBufferCreate(UpGetHeap());\n\n _FuncWriter(buf, funcDef);\n\n return UpBufferString(buf);\n}\n\nchar* UpClassDefToString(UpClassDef* classDef) {\n UpBuffer* buf = UpBufferCreate(UpGetHeap());\n\n UpIntTableIterate(classDef->getters, (UpIntTableIterator)_GetterWriter, buf);\n\n return UpBufferString(buf);\n}\n\nchar* UpObjectClassDefsToString(UpObject* object) {\n UpBuffer* buf = UpBufferCreate(UpGetHeap());\n\n UpBufferLine(buf, \"OBJECT %x========\", object);\n for (UpClass* cls = object->cls; cls; cls = cls->baseClass) {\n UpBufferLine(buf, \"* CLASS %x %s\", cls->def, cls->def->name);\n UpIntTableIterate(cls->def->getters, (UpIntTableIterator)_GetterWriter, buf);\n }\n\n UpBufferLine(buf, \"========\");\n\n return UpBufferString(buf);\n}\n\nvoid _StrTableWriter(UpStrTable* self, const char* key, void* value, UpBuffer* buf) {\n UpBufferLine(buf, \"%s\", key);\n}\n\nchar* UpStrTableToString(UpStrTable* self) {\n UpBuffer* buf = UpBufferCreate(self->heap);\n\n UpStrTableIterate(self, (UpStrTableIterator)_StrTableWriter, buf);\n\n return UpBufferString(buf);\n}\n\nvoid UpMapMappings() {\n UP_MAPPING(UpProbeMappingOpcodes, \"Null\", UpInstructionNull);\n UP_MAPPING(UpProbeMappingOpcodes, \"Pop\", UpInstructionPop);\n UP_MAPPING(UpProbeMappingOpcodes, \"Jump\", UpInstructionJump);\n UP_MAPPING(UpProbeMappingOpcodes, \"JumpIf\", UpInstructionJumpIf);\n UP_MAPPING(UpProbeMappingOpcodes, \"JumpIfNot\", UpInstructionJumpIfNot);\n UP_MAPPING(UpProbeMappingOpcodes, \"JumpIfDefined\", UpInstructionJumpIfDefined);\n UP_MAPPING(UpProbeMappingOpcodes, \"JumpIfNotDefined\", UpInstructionJumpIfNotDefined);\n UP_MAPPING(UpProbeMappingOpcodes, \"JumpIfHas\", UpInstructionJumpIfHas);\n UP_MAPPING(UpProbeMappingOpcodes, \"JumpIfHasNot\", UpInstructionJumpIfHasNot);\n UP_MAPPING(UpProbeMappingOpcodes, \"LoadObject\", UpInstructionLoadObject);\n UP_MAPPING(UpProbeMappingOpcodes, \"LoadInteger\", UpInstructionLoadInteger);\n UP_MAPPING(UpProbeMappingOpcodes, \"LoadLong\", UpInstructionLoadLong);\n UP_MAPPING(UpProbeMappingOpcodes, \"LoadFloat\", UpInstructionLoadFloat);\n UP_MAPPING(UpProbeMappingOpcodes, \"GetLocal\", UpInstructionGetLocal);\n UP_MAPPING(UpProbeMappingOpcodes, \"SetLocal\", UpInstructionSetLocal);\n UP_MAPPING(UpProbeMappingOpcodes, \"DeleteLocal\", UpInstructionDeleteLocal);\n UP_MAPPING(UpProbeMappingOpcodes, \"SetArgument\", UpInstructionSetArgument);\n UP_MAPPING(UpProbeMappingOpcodes, \"GetProperty\", UpInstructionGetProperty);\n UP_MAPPING(UpProbeMappingOpcodes, \"LookupProperty\", UpInstructionLookupProperty);\n UP_MAPPING(UpProbeMappingOpcodes, \"SetProperty\", UpInstructionSetProperty);\n UP_MAPPING(UpProbeMappingOpcodes, \"ValidateProperty\", UpInstructionValidateProperty);\n UP_MAPPING(UpProbeMappingOpcodes, \"DeleteProperty\", UpInstructionDeleteProperty);\n UP_MAPPING(UpProbeMappingOpcodes, \"CallProperty\", UpInstructionCallProperty);\n UP_MAPPING(UpProbeMappingOpcodes, \"CallOperator\", UpInstructionCallOperator);\n UP_MAPPING(UpProbeMappingOpcodes, \"CallOperator2\", UpInstructionCallOperator2);\n UP_MAPPING(UpProbeMappingOpcodes, \"Is\", UpInstructionIs);\n UP_MAPPING(UpProbeMappingOpcodes, \"IsDefined\", UpInstructionIsDefined);\n UP_MAPPING(UpProbeMappingOpcodes, \"Import\", UpInstructionImport);\n UP_MAPPING(UpProbeMappingOpcodes, \"SetImport\", UpInstructionSetImport);\n UP_MAPPING(UpProbeMappingOpcodes, \"Call\", UpInstructionCall);\n UP_MAPPING(UpProbeMappingOpcodes, \"Return\", UpInstructionReturn);\n UP_MAPPING(UpProbeMappingOpcodes, \"Throw\", UpInstructionThrow);\n UP_MAPPING(UpProbeMappingOpcodes, \"NewObject\", UpInstructionNewObject);\n UP_MAPPING(UpProbeMappingOpcodes, \"NewList\", UpInstructionNewList);\n UP_MAPPING(UpProbeMappingOpcodes, \"NewMap\", UpInstructionNewMap);\n UP_MAPPING(UpProbeMappingOpcodes, \"NewClass\", UpInstructionNewClass);\n UP_MAPPING(UpProbeMappingOpcodes, \"BindClass\", UpInstructionBindClass);\n UP_MAPPING(UpProbeMappingOpcodes, \"NewFunction\", UpInstructionNewGenerator);\n UP_MAPPING(UpProbeMappingOpcodes, \"NewFunction\", UpInstructionNewFunction);\n UP_MAPPING(UpProbeMappingOpcodes, \"NewCFunction\", UpInstructionNewCFunction);\n UP_MAPPING(UpProbeMappingOpcodes, \"Schedule\", UpInstructionSchedule);\n UP_MAPPING(UpProbeMappingOpcodes, \"Subscribe\", UpInstructionSubscribe);\n UP_MAPPING(UpProbeMappingOpcodes, \"Read\", UpInstructionRead);\n UP_MAPPING(UpProbeMappingOpcodes, \"Write\", UpInstructionWrite);\n UP_MAPPING(UpProbeMappingOpcodes, \"WriteAwaitRead\", UpInstructionWriteAwaitRead);\n UP_MAPPING(UpProbeMappingOpcodes, \"WriteAwaitResponse\", UpInstructionWriteAwaitResponse);\n UP_MAPPING(UpProbeMappingOpcodes, \"Respond\", UpInstructionRespond);\n UP_MAPPING(UpProbeMappingOpcodes, \"JumpIfChannelClosed\", UpInstructionJumpIfChannelClosed);\n UP_MAPPING(UpProbeMappingOpcodes, \"BeginTry\", UpInstructionBeginTry);\n UP_MAPPING(UpProbeMappingOpcodes, \"EndTry\", UpInstructionEndTry);\n UP_MAPPING(UpProbeMappingOpcodes, \"Catch\", UpInstructionCatch);\n UP_MAPPING(UpProbeMappingOpcodes, \"Finally\", UpInstructionFinally);\n\n UP_MAPPING(UpProbeMappingSyntax, \"No\", UpNoSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Set\", UpSetSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Declaration\", UpDeclarationSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Group\", UpGroupSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Print\", UpPrintSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Assignment\", UpAssignmentSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Binary\", UpBinarySyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Unary\", UpUnarySyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Import\", UpImportSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"ImportWildcard\", UpImportWildcardSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Call\", UpCallSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Argument\", UpArgumentSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Property\", UpPropertySyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Id\", UpIdSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"TypeId\", UpTypeIdSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Range\", UpRangeSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Default\", UpDefaultSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Undefined\", UpUndefinedSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Int\", UpIntSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Long\", UpLongSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Float\", UpFloatSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"String\", UpStringSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"List\", UpListSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Map\", UpMapSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Channel\", UpChannelSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Return\", UpReturnSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Break\", UpBreakSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Continue\", UpContinueSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Throw\", UpThrowSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Function\", UpFunctionSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Iterator\", UpIteratorSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"CFunction\", UpCFunctionSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"CType\", UpCTypeSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"CArgument\", UpCArgumentSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"If\", UpIfSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"While\", UpWhileSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"For\", UpForSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Try\", UpTrySyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Catch\", UpCatchSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Finally\", UpFinallySyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Cast\", UpCastSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Type\", UpTypeSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Subtype\", UpSubtypeSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Is\", UpIsSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Has\", UpHasSyntaxType);\n UP_MAPPING(UpProbeMappingSyntax, \"Where\", UpWhereSyntaxType);\n}\n" }, { "alpha_fraction": 0.48343750834465027, "alphanum_fraction": 0.4859375059604645, "avg_line_length": 33.7717399597168, "blob_id": "14e2fd9a293a61678f1547c7d4c41e6ad8fb6809", "content_id": "00fa47d3168bfd05aebbd68c6a87c4ca1a50f38c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3200, "license_type": "permissive", "max_line_length": 103, "num_lines": 92, "path": "/tests/core/uptests.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nfrom make.test import *\nimport os, subprocess, re\n\n# **************************************************************************************************\n# Constants\n\noutputPath = testOutputPath(__file__)\ntestLibPath = os.path.abspath(os.path.join(outputPath, \"lib\"))\nsharedLibPath = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", \"lib\"))\n\nupExePath = \"/usr/bin/env up\"\n\n# **************************************************************************************************\n\nclass UpTestFixture(TestFixture):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def launch(self, command, cwd=None, env=None, args=None, source=None):\n if env:\n env2 = dict(os.environ)\n env2.update(env)\n env = env2\n\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n cwd=cwd, env=env)\n error = p.wait()\n\n errors = p.stderr.read().strip()\n out = p.stdout.read().strip()\n\n # Remove stack trace entries from inside the standard library, since\n # line numbers and files there will be moving around often\n reLibTrace = re.compile(\"^\\.\\/(up|json|regex)\\.up, line.*?$\\n\", re.M)\n errors = reLibTrace.sub(\"\", errors)\n\n if out and errors:\n out = \"%s\\n---\\n%s\" % (out, errors)\n elif errors:\n out = errors\n\n if error == -11:\n raise RunException(command, error, errors, out, args, source)\n else:\n return out\n\n def runTest(self, args, source, eventLoop=False, **kwds):\n if not eventLoop:\n extraArgs = [\"--disableEventLoop\"]\n else:\n extraArgs = []\n\n escapedSource = source.replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\").replace(\"\\n\", \"\\\\n\")\n exePath = \"%s %s %s -c $'%s'\" % (upExePath, \" \".join(args), \" \".join(extraArgs), escapedSource)\n\n if self.buildOnly:\n raise TestAbortException(exePath)\n\n upPath = \":\".join([os.environ['UPPATH'], sharedLibPath, testLibPath])\n sysPath = os.environ['PATH']\n env = {\"UPPATH\": upPath, \"PATH\": sysPath}\n\n metaArgs = args + extraArgs + ['-c', source]\n self.metadata = {\"command\": upExePath, \"args\": metaArgs, \"env\": env}\n return self.launch(exePath, env=env, args=args, source=source).strip()\n\n# **************************************************************************************************\n\nclass ParseTests(UpTestFixture):\n order = 1\n\n def testParse(self, source, expected, mode=\"ast\", **kwds):\n return self.runTest([\"--debug\", mode], source, **kwds)\n\n# **************************************************************************************************\n\n# class CompileTests(UpTestFixture):\n# order = 1\n#\n# def testCompile(self, source, expected, **kwds):\n# return self.runTest([\"--debug\", \"compile\"], source, **kwds)\n#\n# # **************************************************************************************************\n#\n# class RuntimeTests(ParseTests):\n# order = 2\n#\n# def testRuntime(self, source, expected, **kwds):\n# return self.runTest([], source, **kwds)\n" }, { "alpha_fraction": 0.5438871383666992, "alphanum_fraction": 0.5438871383666992, "avg_line_length": 22.629629135131836, "blob_id": "5b1a4cfb00dc27316656cc4a021e228f7ef3f722", "content_id": "cf6c31eefbfd00166c0ae6ffd717c703f92a2c13", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 638, "license_type": "permissive", "max_line_length": 100, "num_lines": 27, "path": "/src/vm/UpProperty.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpProperty.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpArena.h\"\n\n// *************************************************************************************************\n\nUpProperty* UpNullProperty() {\n UpProperty* prop = NULL;\n if (!prop) {\n prop = UpPropertyCreate();\n }\n return prop;\n}\n\nUpProperty* UpPropertyCreate() {\n UpProperty* self = UpArenaNew(UpGetHeap(), UpProperty);\n self->value = UpUndefined();\n return self;\n}\n\nvoid UpPropertyInvalidate(UpProperty* self, UpObject* object) {\n if (!self->isOverriden) {\n self->isValid = false;\n }\n}" }, { "alpha_fraction": 0.6194751262664795, "alphanum_fraction": 0.6215469837188721, "avg_line_length": 26.320755004882812, "blob_id": "f3a988c9011b291808bd6c97d37b2f5ae05b80fd", "content_id": "1372145a9098b3303e3b89078ee89317333ca153", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1448, "license_type": "permissive", "max_line_length": 99, "num_lines": 53, "path": "/src/vm/include/UpClass.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPCLASS_H\n#define UP_UPCLASS_H\n\n#include \"Up/UpObject.h\"\n\nstruct UpClassDef {\n UP_PROBE_ID;\n char* name;\n char* sourcePath;\n char* moduleName;\n UpSyntax* ast;\n UpArray* members;\n UpFunctionDef* constructorDef;\n UpIntTable* getters;\n UpIntTable* setters;\n};\n\nstruct UpGetterDef {\n char* name;\n UpFunctionDef* funcDef;\n bool isContextual:1;\n bool isMember:1;\n bool isCached:1;\n};\n\nstruct UpClass {\n UpObject __base;\n UpClassDef* def;\n UpVariables* closure;\n UpClass* baseClass;\n size_t size;\n};\n\n// ************************************************************************************************\n\nUpClass* UpClassCreate(UpClassDef* def, UpVariables* closure,\n UpClass* baseClass, size_t size);\nUpObject* UpClassInstantiate(UpClass* self);\n\nbool UpClassIsInstance(UpClass* self, UpObject* object);\n\nchar* UpClassGetName(UpClass* self);\nUpClass* UpClassGetBaseClass(UpClass* self);\n\nUpProperty* UpClassGetGetter(UpClass* self, UpSymbol name, UpObject* objectToBind,\n bool direct, bool create, UpGetterDef** outGetterDef,\n UpClass** outClass, UpClass** outDefClass, UpFunctionDef** outDef);\nUpProperty* UpClassGetSetter(UpClass* self, UpSymbol name, bool direct,\n UpClass** outDefClass, UpFunctionDef** outDef);\n\nUpList* UpClassGetGetters(UpClass* self);\n\n#endif // UP_UPCLASS_H\n" }, { "alpha_fraction": 0.5714018940925598, "alphanum_fraction": 0.5736419558525085, "avg_line_length": 29.959537506103516, "blob_id": "eabcd87e945b83808cd85614c7567030a5a534d5", "content_id": "d2ff2434b2136a1aa9f33d9a8c1207004b81e7ef", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5357, "license_type": "permissive", "max_line_length": 100, "num_lines": 173, "path": "/src/vm/UpIntTable.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpIntTable.h\"\n#include \"Up/UpArena.h\"\n#include \"lookup3.h\"\n\n// *************************************************************************************************\n\nstatic unsigned int _UpIntTableBucketAllocationSize = 100;\n\nstruct UpIntTableItem {\n UpIntTableItem* next;\n unsigned int hash;\n int key;\n void* value;\n};\n\nstatic unsigned int _UpIntTableBucketForHash(UpIntTable* self, unsigned int hash) {\n return hash & (self->bucketsAllocated-1);\n}\n\nstatic unsigned int _UpIntTableBucketForKey(UpIntTable* self, int key) {\n unsigned int hash = key;\n return _UpIntTableBucketForHash(self, hash);\n}\n\nstatic UpIntTableItem* _UpIntTableCreateItem(UpIntTable* self, unsigned int hash, int key,\n void* value) {\n // XXXjoe It would be faster to allocate the buckets and item storage inline, instead\n // of allocating new items every time\n UpIntTableItem* item = UpArenaAllocate(self->heap, self->size+sizeof(UpIntTableItem), \"\");\n item->hash = hash;\n item->key = key;\n item->value = (&item->value)+1;\n memcpy(item->value, value, self->size);\n return item;\n}\n\nstatic void _UpIntTableSetWithHash(UpIntTable* self, unsigned int hash, int key, void* value) {\n unsigned int bucket = _UpIntTableBucketForHash(self, hash);\n UpIntTableItem* item = self->buckets[bucket];\n if (!item) {\n self->buckets[bucket] = _UpIntTableCreateItem(self, hash, key, value);\n ++self->bucketsUsed;\n ++self->itemCount;\n } else {\n while (item) {\n if (item->key == key) {\n memcpy(item->value, value, self->size);\n break;\n } else if (!item->next) {\n item->next = _UpIntTableCreateItem(self, hash, key, value);\n ++self->itemCount;\n break;\n }\n item = item->next;\n }\n }\n}\n\nstatic void _UpIntTableGrowBuckets(UpIntTable* self) {\n unsigned int oldBucketCount = self->bucketsAllocated;\n UpIntTableItem** oldBuckets = self->buckets;\n \n self->bucketsUsed = 0;\n self->bucketsAllocated = self->bucketsAllocated + _UpIntTableBucketAllocationSize;\n self->buckets = UpArenaNewArray(self->heap, UpIntTableItem, self->bucketsAllocated);\n \n for (unsigned int i = 0; i < oldBucketCount; ++i) {\n UpIntTableItem* item = oldBuckets[i];\n while (item) {\n UpIntTableItem* next = item->next;\n _UpIntTableSetWithHash(self, item->hash, item->key, item->value);\n item = next;\n }\n }\n}\n\n// *************************************************************************************************\n\nUpIntTable* UpIntTableCreate(UpArena* heap, size_t size) {\n UpIntTable* self = UpArenaNew(heap, UpIntTable);\n self->heap = heap;\n self->bucketsAllocated = _UpIntTableBucketAllocationSize;\n self->bucketsUsed = 0;\n self->itemCount = 0;\n self->size = size;\n self->buckets = UpArenaNewArray(heap, UpIntTableItem, self->bucketsAllocated);\n return self;\n}\n\nunsigned int UpIntTableCount(UpIntTable* self) {\n return self->itemCount;\n}\n\nbool UpIntTableGet(UpIntTable* self, int key, void* value) {\n unsigned int bucket = _UpIntTableBucketForKey(self, key);\n UpIntTableItem* item = self->buckets[bucket];\n\n while (item) {\n if (item->key == key) {\n if (value) {\n memcpy(value, item->value, self->size);\n }\n return true;\n }\n item = item->next;\n }\n\n return false;\n}\n\nint UpIntTableReverseGet(UpIntTable* self, void* value) {\n for (unsigned int i = 0; i < self->bucketsAllocated; ++i) {\n UpIntTableItem* item = self->buckets[i];\n while (item && item->value != value) {\n item = item->next;\n }\n if (item) {\n return item->key;\n }\n }\n return UpNullSymbol;\n}\n\nvoid UpIntTableSet(UpIntTable* self, int key, void* value) {\n if (self->bucketsUsed == self->bucketsAllocated) {\n _UpIntTableGrowBuckets(self);\n }\n\n unsigned int hash = key;\n _UpIntTableSetWithHash(self, hash, key, value);\n}\n\nvoid UpIntTableRemove(UpIntTable* self, int key) {\n unsigned int bucket = _UpIntTableBucketForKey(self, key);\n UpIntTableItem* item = self->buckets[bucket];\n UpIntTableItem* previousItem = NULL;\n while (item) {\n UpIntTableItem* next = item->next;\n if (!next || item->key == key) {\n if (previousItem) {\n previousItem->next = next;\n } else {\n self->buckets[bucket] = next;\n if (!next) {\n --self->bucketsUsed;\n }\n }\n --self->itemCount;\n break;\n }\n previousItem = item;\n item = next;\n }\n}\n\nvoid UpIntTableIterate(UpIntTable* self, UpIntTableIterator callback, void* context) {\n unsigned int bucketCount = self->bucketsAllocated;\n UpIntTableItem** buckets = self->buckets;\n\n for (unsigned int i = 0; i < bucketCount; ++i) {\n UpIntTableItem* item = buckets[i];\n while (item) {\n void* value;\n void* ptr = &value;\n memcpy(ptr, item->value, self->size);\n\n callback(self, item->key, value, context);\n item = item->next;\n }\n }\n}\n" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 17.05555534362793, "blob_id": "3bbc64421ec7daca918289cd5519c7d101b8654e", "content_id": "10a3c9663e4ed20a3634ce5bf47b17e2a267ce96", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 325, "license_type": "permissive", "max_line_length": 57, "num_lines": 18, "path": "/src/vm/UpCLibrary.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPCLIBRARY_H\n#define UP_UPCLIBRARY_H\n\n#include \"Up/UpGlobal.h\"\n\nstruct UpCLibrary {\n char* name;\n char* path;\n void* library;\n};\n\nUpCLibrary* UpCLibraryCreate(const char* name);\n\nvoid UpCLibraryClose(UpCLibrary* self);\n\nvoid* UpCLibraryLoad(UpCLibrary* self, const char* name);\n\n#endif // UP_UPCLIBRARY_H\n" }, { "alpha_fraction": 0.6110367178916931, "alphanum_fraction": 0.6144600510597229, "avg_line_length": 25.76593589782715, "blob_id": "93351e384a075ca34119a2a150eb2b64d561cf8e", "content_id": "923612c5db558a501f90d1f178b116db2860fb90", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 26874, "license_type": "permissive", "max_line_length": 100, "num_lines": 1004, "path": "/src/vm/UpContext.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/Up.h\"\n#include \"Up/UpChannel.h\"\n#include \"Up/UpException.h\"\n#include \"Up/UpCFunction.h\"\n#include \"Up/UpCPointer.h\"\n#include \"Up/UpNull.h\"\n#include \"Up/UpBool.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpLong.h\"\n#include \"Up/UpFloat.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpList.h\"\n#include \"Up/UpMap.h\"\n#include \"UpScheduler.h\"\n#include \"UpTask.h\"\n#include \"UpCompiler.h\"\n#include \"UpParser.h\"\n#include \"UpCLibrary.h\"\n#include \"Up/UpArena.h\"\n#include \"UpArray.h\"\n#include \"UpStrTable.h\"\n#include \"UpIntTable.h\"\n#include \"UpBuffer.h\"\n#include \"UpEventLoop.h\"\n\n#include <fcntl.h>\n#include <sys/mman.h>\n#include <sys/stat.h>\n\n// ************************************************************************************************\n\nconst size_t kTempProbesSize = 104096;\n\nstatic UpStatus _Init();\n\nUpContext* UpGlobalContext = NULL;\n\n// ************************************************************************************************\n\ntypedef struct {\n UpObject* module;\n UpFunctionDef* functionDef;\n UpClassDef* classDef;\n} UpModule;\n\nstruct UpContext {\n UpArena* heap;\n UpStrTable* symbolMap;\n UpArray* searchPaths;\n int recursionLimit;\n\n FILE* logStream;\n int probesFile;\n char* probes;\n char* probesCursor;\n char* probeDumpPath;\n int probeBufferSize;\n int probeNumeral;\n\n UpIntTable* modules;\n UpStrTable* clibraries;\n UpObject* builtinModule;\n UpBuiltinClasses builtinTypes;\n UpFunctionDef* mainFunctionDef;\n UpCompileFrame* compileFrame;\n UpException* exception;\n\n UpObject* bfalse;\n UpObject* btrue;\n UpObject* bnull;\n UpObject* bundefined;\n UpObject* bclosed;\n UpObject* beval;\n UpModule* modulePlaceholder;\n UpString* emptyString;\n UpInteger** sharedIntegers;\n UpLong** sharedLongs;\n UpFloat** sharedFloats;\n\n bool didInit:1;\n bool useEventLoop:1;\n};\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint32_t length;\n} UpLogProbe;\n\n// ************************************************************************************************\n\nstatic bool UpSymbolCompare(void* a, void* b) {\n int ia = *(UpSymbol*)a;\n int ib = *(UpSymbol*)b;\n return ia == ib;\n}\n\nstatic void _LoadSearchPaths(UpArena* heap, UpArray* searchPaths) {\n const char* envPath = getenv(\"UPPATH\");\n if (envPath) {\n char* name = UpArenaCopyString(heap, envPath);\n for (char* s = name; *s; ++s) {\n if (*s == ':') {\n *s = 0;\n char* nameCopy = UpArenaCopyString(heap, name);\n UpArrayAppend(searchPaths, &nameCopy);\n *s = ';';\n name = s+1;\n }\n }\n\n if (*name) {\n UpArrayAppend(searchPaths, &name);\n }\n }\n\n char wd[2048];\n getcwd(wd, 2048);\n char* wdCopy = UpArenaCopyString(heap, wd);\n UpArrayAppend(searchPaths, &wdCopy);\n}\n\nstatic char* _ReadSource(UpArena* heap, const char* sourcePath) {\n FILE* f = fopen(sourcePath, \"r\");\n if (!f) {\n return NULL;\n }\n\n fseek(f, 0, SEEK_END);\n int fileSize = ftell(f);\n fseek(f, 0, SEEK_SET);\n\n char* buffer = UpArenaAlloc(heap, fileSize*+1);\n char* buf = buffer;\n int count = 0;\n while (count < fileSize) {\n int read = fread(buf, 1, 1024, f);\n if (read > 0) {\n count += read;\n buf += read;\n }\n\n }\n\n buffer[fileSize] = 0;\n fclose(f);\n\n return buffer;\n}\n\nstatic UpStatus _CompileSource(const char* source, const char* sourcePath,\n const char* moduleName, UpFunctionDef** outDef) {\n if (!_Init(UPCTX)) {\n return UpFailure;\n }\n\n UpPushCompilation(sourcePath, moduleName);\n\n UpSyntax* ast = NULL;\n if (!UpParseSource(source, &ast)) {\n UpPopCompilation(UPCTX);\n return UpFailure;\n }\n\n BEGIN_COMPILE(moduleName);\n\n UpCompiler* compiler = UpCompilerCreate(UPCTX);\n if (!UpCompilerCompileImperative(compiler, ast, sourcePath, moduleName, outDef)) {\n UpPopCompilation(UPCTX);\n return UpFailure;\n }\n\n END_COMPILE(moduleName);\n\n UpPopCompilation(UPCTX);\n return UpSuccess;\n}\n\nstatic UpStatus _CompileFile(const char* sourcePath, const char* moduleName,\n char writeToDisk, UpFunctionDef** outDef, UpClassDef** outClassDef) {\n // 1. If extension is .hic, just read the binary directly\n // 2. If extension is .hi, look for .hic file, read it if timestamp is newer than source\n if (!_Init(UPCTX)) {\n return UpFailure;\n }\n\n UpPushCompilation(sourcePath, moduleName);\n\n UpSyntax* ast;\n if (!UpParseFile(sourcePath, &ast)) {\n UpPopCompilation(UPCTX);\n return UpFailure;\n }\n\n BEGIN_COMPILE(moduleName);\n\n UpCompiler* compiler = UpCompilerCreate(UPCTX);\n if (!UpCompilerCompileDeclarative(compiler, ast, sourcePath, moduleName, outDef, outClassDef)) {\n UpPopCompilation(UPCTX);\n return UpFailure;\n }\n\n END_COMPILE(moduleName);\n\n if (writeToDisk) {\n // NYI\n }\n\n UpPopCompilation(UPCTX);\n\n return UpSuccess;\n}\n\nstatic UpStatus _ExecuteModule(UpFunctionDef* def, UpObject** outModule) {\n UpScheduler* scheduler = UpSchedulerCreate(UPCTX->heap);\n\n UpTask* task = UpTaskCreateWithFunctionDef(UPCTX->heap, def, NULL, NULL);\n UpTaskExpectReturn(task);\n UpSchedulerScheduleTask(scheduler, task);\n\n BEGIN_RUN(def->moduleName);\n UpStatus ok = UpSchedulerRun(scheduler);\n END_RUN(def->moduleName);\n\n if (ok) {\n return UpTaskGetReturnValue(task, outModule);\n } else {\n return UpFailure;\n }\n}\n\nstatic UpStatus _RunModule(UpFunctionDef* def, UpObject** outModule) {\n UPCTX->mainFunctionDef = def;\n\n UpStatus status = UpContextRun(outModule);\n return status;\n}\n\nvoid _SymbolMapWriter(UpStrTable* table, const char* key, void* value, UpBuffer* buf) {\n UpBufferPrint(buf, \"%d,%s\\1\", (uint32_t)(uint64_t)value, key);\n}\n\nstatic void _InitProbesHeader(UpContext* self) {\n uint32_t zero = 0;\n memcpy(self->probesCursor, &zero, sizeof(zero));\n self->probesCursor += sizeof(zero);\n\n uint8_t done = 0;\n memcpy(self->probesCursor, &done, sizeof(done));\n self->probesCursor += sizeof(done);\n}\n\nstatic void _InitProbes(UpContext* self) {\n self->probeBufferSize = kTempProbesSize;\n\n self->probes = malloc(kTempProbesSize);\n memset(self->probes, 0, kTempProbesSize);\n self->probesCursor = self->probes;\n\n _InitProbesHeader(self);\n UpInitProbes();\n UpMapMappings();\n}\n\nstatic void _InitProbesFile(const char* dumpPath) {\n UPCTX->probesFile = open(dumpPath, O_CREAT|O_RDWR, 0644);\n if (UPCTX->probesFile == -1) {\n UpWarn(\"Unable to dump probes at %s\", dumpPath);\n return;\n }\n\n struct stat stats;\n int err = stat(dumpPath, &stats);\n if (!err && stats.st_size) {\n UPCTX->probeBufferSize = stats.st_size;\n } else if (UPCTX->probeBufferSize) {\n ftruncate(UPCTX->probesFile, UPCTX->probeBufferSize);\n } else {\n // XXXjoe This should never happen\n }\n\n char* probes = mmap(NULL, UPCTX->probeBufferSize, PROT_READ|PROT_WRITE, MAP_FILE|MAP_SHARED,\n UPCTX->probesFile, 0);\n if (probes == MAP_FAILED) {\n UpWarn(\"Unable to map probes file\");\n return;\n }\n\n if (UPCTX->probes) {\n uint32_t size = UPCTX->probesCursor - UPCTX->probes;\n memcpy(probes, UPCTX->probes, size);\n free(UPCTX->probes);\n\n UPCTX->probes = probes;\n UPCTX->probesCursor = probes + size;\n } else {\n UPCTX->probesCursor = UPCTX->probes = probes;\n }\n}\n\nstatic void _CreateBuiltins(UpContext* self) {\n self->builtinTypes.objectClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpObject));\n self->builtinTypes.classClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpClass));\n self->builtinTypes.functionClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpFunction));\n self->builtinTypes.cfunctionClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpCFunction));\n self->builtinTypes.cpointerClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpCPointer));\n self->builtinTypes.channelClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpChannel));\n self->builtinTypes.exceptionClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpException));\n self->builtinTypes.nullClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpObject));\n self->builtinTypes.boolClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpObject));\n self->builtinTypes.integerClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpInteger));\n self->builtinTypes.longClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpLong));\n self->builtinTypes.floatClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpFloat));\n self->builtinTypes.stringClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpString));\n self->builtinTypes.listClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpList));\n self->builtinTypes.mapClass = UpClassCreate(NULL, NULL, NULL, sizeof(UpMap));\n}\n\nstatic void _CreateSharedNumbers(UpContext* self) {\n UpInteger** sharedIntegers = UpArenaNewArray(self->heap, UpInteger,\n UpSharedIntegerMax-UpSharedIntegerMin);\n for (int i = UpSharedIntegerMin; i <= UpSharedIntegerMax; ++i) {\n sharedIntegers[i-UpSharedIntegerMin] = UpIntegerCreateWithUnit(i, UpNullSymbol);\n }\n self->sharedIntegers = sharedIntegers;\n\n UpLong** sharedLongs = UpArenaNewArray(self->heap, UpLong, UpSharedLongMax-UpSharedLongMin);\n for (int i = UpSharedLongMin; i <= UpSharedLongMax; ++i) {\n sharedLongs[i-UpSharedLongMin] = UpLongCreateWithUnit(i, UpNullSymbol);\n }\n self->sharedLongs = sharedLongs;\n\n UpFloat** sharedFloats = UpArenaNewArray(self->heap, UpFloat,\n UpSharedFloatMax-UpSharedFloatMin);\n for (int i = UpSharedFloatMin; i <= UpSharedFloatMax; ++i) {\n sharedFloats[i-UpSharedFloatMin] = UpFloatCreateWithUnit(i, UpNullSymbol);\n }\n self->sharedFloats = sharedFloats;\n}\n\nstatic UpStatus _Init() {\n if (UPCTX->didInit) {\n return UpSuccess;\n }\n\n UPCTX->didInit = true;\n\n _CreateSharedNumbers(UPCTX);\n\n // UpFunctionDef* builtinDef;\n // UpClassDef* classDef;\n // if (!UpCompileModule(\"up\", &builtinDef, &classDef)) {\n // return UpFailure;\n // }\n //\n // UpObject* builtin;\n // if (_ExecuteModule(builtinDef, &builtin)) {\n // UpSetBuiltinModule(builtin);\n // return UpSuccess;\n // } else {\n // return UpFailure;\n // }\n return UpSuccess;\n}\n\n// ************************************************************************************************\n\nUpContext* UpContextCreate() {\n UpContext* self = malloc(sizeof(UpContext));\n memset(self, 0, sizeof(UpContext));\n\n UpContext* previousContext = UpSwitchContext(self);\n self->heap = UpArenaCreate();\n self->symbolMap = UpStrTableCreate(self->heap, sizeof(UpSymbol));\n self->modules = UpIntTableCreate(self->heap, sizeof(UpModule*));\n self->searchPaths = UpArrayCreate(self->heap, sizeof(char*));\n self->modulePlaceholder = UpArenaNew(self->heap, UpModule);\n self->logStream = stdout;\n self->recursionLimit = 10000;\n self->useEventLoop = true;\n\n _InitProbes(self);\n\n _LoadSearchPaths(self->heap, self->searchPaths);\n\n // Take up the first spot with null symbol\n UpGetSymbol(\"\");\n\n _CreateBuiltins(self);\n\n UpSwitchContext(previousContext);\n return self;\n}\n\nvoid UpContextShutdown(UpContext* self, UpStatus status) {\n if (!status) {\n UpException* exc = UpGetError();\n if (exc) {\n if (UpProbeExcEnabled) {\n UpExceptionLog(exc);\n } else {\n char* trace = UpExceptionGetStackTrace(exc);\n fputs(trace, stderr);\n }\n } else {\n fputs(\"Unknown error\", stderr);\n }\n }\n\n if (self->probes) {\n uint8_t done = 1;\n memcpy(self->probes+sizeof(uint32_t), &done, sizeof(done));\n\n uint8_t zero = 0;\n UpProbe(&zero, 1);\n\n munmap(self->probes, self->probeBufferSize);\n close(self->probesFile);\n }\n\n UpArenaFree(self->heap);\n free(self);\n}\n\nUpContext* UpSwitchContext(UpContext* context) {\n UpContext* previous = UpGlobalContext;\n UpGlobalContext = context;\n return previous;\n}\n\nUpObject* UpFalse() {\n if (!UPCTX->bfalse) {\n UPCTX->bfalse = UpBoolCreate(UPCTX);\n }\n return UPCTX->bfalse;\n}\n\nUpObject* UpTrue() {\n if (!UPCTX->btrue) {\n UPCTX->btrue = UpBoolCreate(UPCTX);\n }\n return UPCTX->btrue;\n}\n\nUpObject* UpNull() {\n if (!UPCTX->bnull) {\n UPCTX->bnull = UpNullCreate(UPCTX);\n }\n return UPCTX->bnull;\n}\n\nUpObject* UpUndefined() {\n if (!UPCTX->bundefined) {\n UPCTX->bundefined = UpNullCreate(UPCTX);\n }\n return UPCTX->bundefined;\n}\n\nUpObject* UpClosed() {\n if (!UPCTX->bclosed) {\n UPCTX->bclosed = (UpObject*)UpExceptionCreate(\"closed\");\n }\n return UPCTX->bclosed;\n}\n\nUpObject* UpEval() {\n if (!UPCTX->beval) {\n UPCTX->beval = (UpObject*)UpFunctionCreate(NULL, NULL, NULL);\n }\n return UPCTX->beval;\n}\n\nUpObject* UpTrueOrFalse(bool truth) {\n return truth ? UpTrue() : UpFalse();\n}\n\nUpString* UpEmptyString() {\n if (!UPCTX->emptyString) {\n UPCTX->emptyString = UpStringCreate(\"\");\n }\n return UPCTX->emptyString;\n}\n\nchar* UpFormatString(const char* str, ...) {\n va_list args;\n\n va_start(args, str);\n int count = vsnprintf(NULL, 0, str, args);\n va_end(args);\n\n char* buf = UpArenaNewString(UPCTX->heap, count);\n\n va_start(args, str);\n vsnprintf(buf, count+1, str, args);\n va_end(args);\n\n return buf;\n}\n\nUpArena* UpGetHeap() {\n return UPCTX->heap;\n}\n\nchar* UpGetVersion() {\n #ifdef GIT_VERSION\n #define STRINGIZE(x) #x\n #define STRINGIZE_VALUE_OF(x) STRINGIZE(x)\n return STRINGIZE_VALUE_OF(GIT_VERSION);\n #else\n return \"(no version)\";\n #endif\n}\n\nUpSymbol UpGetSymbol(const char* name) {\n if (!name) {\n return UpNullSymbol;\n } else {\n UpSymbol symbol;\n if (!UpStrTableGet(UPCTX->symbolMap, name, &symbol)) {\n symbol = UpStrTableCount(UPCTX->symbolMap)+1;\n UpStrTableSet(UPCTX->symbolMap, name, &symbol);\n\n UP_MAPPING(UpProbeMappingSymbol, name, symbol);\n\n }\n return symbol;\n }\n}\n\nconst char* UpGetSymbolName(UpSymbol symbol) {\n return UpStrTableReverseGet(UPCTX->symbolMap, &symbol, UpSymbolCompare);\n}\n\nvoid UpReleaseSymbols() {\n UPCTX->symbolMap = NULL;\n}\n\nint UpGetRecursionLimit() {\n return UPCTX->recursionLimit;\n}\n\nvoid UpSetRecursionLimit(int limit) {\n UPCTX->recursionLimit = limit;\n}\n\nUpArray* UpGetSearchPaths() {\n return UPCTX->searchPaths;\n}\n\nUpBuiltinClasses* UpGetBuiltinClasses() {\n return &UPCTX->builtinTypes;\n}\n\nUpInteger** UpGetSharedIntegers() {\n return UPCTX->sharedIntegers;\n}\n\nUpLong** UpGetSharedLongs() {\n return UPCTX->sharedLongs;\n}\n\nUpFloat** UpGetSharedFloats() {\n return UPCTX->sharedFloats;\n}\n\nvoid UpDisableEventLoop(bool disabled) {\n UPCTX->useEventLoop = !disabled;\n}\n\nvoid* UpGetLogStream() {\n return UPCTX->logStream;\n}\n\nvoid UpSetLogStream(void* stream) {\n UPCTX->logStream = (FILE*)stream;\n}\n\nvoid UpLog(const char* text) {\n #ifdef UP_ENABLE_PROBES\n int len = sizeof(char) * strlen(text);\n UpLogProbe probe = {4, len};\n UpProbe(&probe, sizeof(probe));\n UpProbe((void*)text, len);\n #endif\n}\n\nvoid UpPrintf(const char* fmt, ...) {\n if (UPCTX->logStream) {\n va_list args;\n va_start(args, fmt);\n vfprintf(UPCTX->logStream, fmt, args);\n fprintf(UPCTX->logStream, \"\\n\");\n va_end(args);\n }\n}\n\nvoid UpWarn(const char* fmt, ...) {\n if (UPCTX->logStream) {\n va_list args;\n va_start(args, fmt);\n fprintf(UPCTX->logStream, \"WARNING: \");\n vfprintf(UPCTX->logStream, fmt, args);\n fprintf(UPCTX->logStream, \"\\n\");\n va_end(args);\n }\n}\n\nvoid UpEnableProbe(const char* probeName, bool enabled) {\n#ifdef UP_ENABLE_PROBES\n bool* probeSymbol = UpGetProbeFlag(probeName);\n if (probeSymbol) {\n *probeSymbol = enabled;\n } else {\n UpWarn(\"Unsupported probe %s\", probeName);\n }\n#endif\n}\n\nvoid UpProbe(void* probe, size_t size) {\n if (UPCTX->probes) {\n char* buf2 = UPCTX->probesCursor + size;\n\n if (size+(buf2-UPCTX->probes) >= UPCTX->probeBufferSize) {\n char* oldProbes = UPCTX->probes;\n int oldProbesFile = UPCTX->probesFile;\n UPCTX->probes = NULL;\n\n uint8_t zero = 0;\n memcpy(UPCTX->probesCursor, &zero, sizeof(zero));\n\n int num = ++UPCTX->probeNumeral;\n char* newDumpPath = UpArenaFormatString(UPCTX->heap, \"%s%d\", UPCTX->probeDumpPath, num);\n _InitProbesFile(newDumpPath);\n _InitProbesHeader(UPCTX);\n\n uint8_t done = 2;\n memcpy(oldProbes+sizeof(uint32_t), &done, sizeof(done));\n munmap(oldProbes, UPCTX->probeBufferSize);\n close(oldProbesFile);\n\n memcpy(UPCTX->probesCursor, probe, size);\n UPCTX->probesCursor += size;\n\n uint32_t totalSize = UPCTX->probesCursor - UPCTX->probes;\n memcpy(UPCTX->probes, &totalSize, sizeof(totalSize));\n } else {\n memcpy(UPCTX->probesCursor, probe, size);\n UPCTX->probesCursor = buf2;\n\n uint32_t totalSize = buf2 - UPCTX->probes;\n memcpy(UPCTX->probes, &totalSize, sizeof(totalSize));\n }\n }\n}\n\nvoid UpSetProbeDumpPath(const char* path) {\n if (path) {\n UPCTX->probeDumpPath = UpArenaCopyString(UPCTX->heap, path);\n } else {\n UPCTX->probeDumpPath = NULL;\n }\n\n if (path) {\n #ifdef UP_ENABLE_PROBES\n _InitProbesFile(path);\n #endif\n }\n}\n\nchar* UpGetFileConstructorName(const char* path) {\n const char* lastSlash = strrchr(path, '/');\n if (lastSlash) {\n path = lastSlash+1;\n }\n\n char* name;\n const char* lastDot = strrchr(path, '.');\n if (lastDot) {\n name = UpArenaCopyStringN(UpGetHeap(), path, lastDot-path);\n } else {\n name = UpArenaCopyString(UpGetHeap(), path);\n }\n\n char* c = name;\n if (*c >= '0' && *c <= '9') {\n *c = '_';\n }\n\n for (; *c; ++c) {\n if (!((*c >= '0' && *c <= '9')\n || (*c >= 'a' && *c <= 'z')\n || (*c >= 'A' && *c <= 'Z'))) {\n *c = '_';\n }\n }\n\n return name;\n}\n\nUpCompileFrame* UpGetCompileFrame() {\n return UPCTX->compileFrame;\n}\n\nvoid UpPushCompilation(const char* path, const char* moduleName) {\n UpCompileFrame* frame = UpArenaNew(UPCTX->heap, UpCompileFrame);\n frame->file = path ? UpArenaCopyString(UPCTX->heap, path) : NULL;\n frame->module = moduleName ? UpArenaCopyString(UPCTX->heap, moduleName) : NULL;\n frame->previous = UPCTX->compileFrame;\n UPCTX->compileFrame = frame;\n}\n\nvoid UpPopCompilation(UpContext* self) {\n self->compileFrame = self->compileFrame->previous;\n}\n\nvoid UpSetCompileLocation(int line, int column) {\n if (UPCTX->compileFrame) {\n UPCTX->compileFrame->line = line;\n UPCTX->compileFrame->column = column;\n }\n}\n\nUpException* UpGetError() {\n return UPCTX->exception;\n}\n\nUpException* UpClaimError() {\n UpException* exc = UPCTX->exception;\n UPCTX->exception = NULL;\n return exc;\n}\n\nvoid UpSetException(UpException* exception) {\n if (exception && UPCTX->exception) {\n UpWarn(\"Setting exception when one already exists! (%s)\",\n UPCTX->exception->description);\n }\n\n UPCTX->exception = exception;\n\n if (exception) {\n if (!exception->compileFrame) {\n exception->compileFrame = UPCTX->compileFrame;\n }\n }\n}\n\nUpException* UpSetError(const char* description, ...) {\n if (UPCTX->exception) {\n return UPCTX->exception;\n }\n\n va_list args;\n\n va_start(args, description);\n int count = vsnprintf(NULL, 0, description, args);\n va_end(args);\n\n char* buf = UpArenaNewString(UPCTX->heap, count);\n\n va_start(args, description);\n vsnprintf(buf, count+1, description, args);\n va_end(args);\n\n UpException* exc = UpExceptionCreate(buf);\n UpSetException(exc);\n return exc;\n}\n\nvoid UpSetErrorFrame(UpCallFrame* frame) {\n if (UPCTX->exception) {\n UPCTX->exception->callFrame = frame;\n }\n}\n\n\nUpIntTable* UpGetModuleMap() {\n return UPCTX->modules;\n}\n\nUpObject* UpGetBuiltinModule() {\n return UPCTX->builtinModule;\n}\n\nvoid UpSetBuiltinModule(UpObject* module) {\n UPCTX->builtinModule = module;\n}\n\nconst char* UpFindModulePath(const char* moduleName) {\n char* names = UpArenaCopyString(UPCTX->heap, moduleName);\n for (char* n = names; *n; ++n) {\n if (*n == '.') {\n *n = '/';\n }\n }\n\n int searchCount = UpArrayCount(UPCTX->searchPaths);\n for (int i = 0; i < searchCount; ++i) {\n const char* searchPath;\n UpArrayGet(UPCTX->searchPaths, i, &searchPath);\n char* modulePath = UpArenaFormatString(UPCTX->heap, \"%s/%s.up\", searchPath, names);\n FILE* f = fopen(modulePath, \"r\");\n if (f) {\n fclose(f);\n return modulePath;\n }\n }\n\n return NULL;\n}\n\nUpStatus UpImport(UpSymbol name, UpObject** outModule, UpFunctionDef** outDef,\n UpClassDef** outClassDef) {\n UpModule* entry;\n if (UpIntTableGet(UPCTX->modules, name, &entry)) {\n if (entry == UPCTX->modulePlaceholder) {\n UpSetError(\"Circular import\");\n return UpFailure;\n } else {\n *outModule = entry->module;\n *outDef = entry->functionDef;\n *outClassDef = entry->classDef;\n return UpSuccess;\n }\n } else {\n UpIntTableSet(UPCTX->modules, name, &(UPCTX->modulePlaceholder));\n\n UpFunctionDef* functionDef;\n UpClassDef* classDef;\n const char* moduleName = UpGetSymbolName(name);\n if (UpCompileModule(moduleName, &functionDef, &classDef)) {\n entry = UpArenaNew(UPCTX->heap, UpModule);\n *outModule = NULL;\n *outDef = entry->functionDef = functionDef;\n *outClassDef = entry->classDef = classDef;\n\n UpIntTableSet(UPCTX->modules, name, &entry);\n return UpSuccess;\n } else {\n UpIntTableRemove(UPCTX->modules, name);\n return UpFailure;\n }\n }\n}\n\nUpObject* UpGetImport(UpSymbol name) {\n UpModule* entry;\n if (UpIntTableGet(UPCTX->modules, name, &entry)) {\n return entry->module;\n } else {\n return NULL;\n }\n}\n\nvoid UpSetImport(UpSymbol name, UpObject* module) {\n UpModule* entry;\n if (UpIntTableGet(UPCTX->modules, name, &entry)) {\n entry->module = module;\n }\n}\n\nUpCLibrary* UpGetCLibrary(const char* name) {\n if (!UPCTX->clibraries) {\n UPCTX->clibraries = UpStrTableCreate(UPCTX->heap, sizeof(UpCLibrary*));\n }\n\n const char* key = name ? name : \"\";\n\n UpCLibrary* outLibrary;\n if (UpStrTableGet(UPCTX->clibraries, key, &outLibrary)) {\n return outLibrary;\n } else {\n outLibrary = UpCLibraryCreate(name);\n UpStrTableSet(UPCTX->clibraries, key, &outLibrary);\n return outLibrary;\n }\n}\n\nUpStatus UpExecuteModule(const char* moduleName, UpObject** outModule) {\n UpFunctionDef* moduleDef;\n UpClassDef* classDef;\n if (!UpCompileModule(moduleName, &moduleDef, &classDef)) {\n return UpFailure;\n }\n\n return _RunModule(moduleDef, outModule);\n}\n\nUpStatus UpExecuteFile(const char* sourcePath, const char* moduleName,\n char writeToDisk, UpObject** outModule) {\n UpFunctionDef* def;\n UpClassDef* classDef;\n if (!_CompileFile(sourcePath, moduleName, writeToDisk, &def, &classDef)) {\n return UpFailure;\n }\n\n return _RunModule(def, outModule);\n}\n\nUpStatus UpExecuteSource(const char* source, const char* sourcePath,\n const char* moduleName, UpObject** outModule) {\n UpFunctionDef* def;\n if (_CompileSource(source, sourcePath, moduleName, &def)) {\n return _RunModule(def, outModule);\n } else {\n return UpFailure;\n }\n}\n\nUpStatus UpContextRun(UpObject** outModule) {\n return _ExecuteModule(UPCTX->mainFunctionDef, outModule);\n}\n\nUpStatus UpCompileModule(const char* moduleName, UpFunctionDef** outDef,\n UpClassDef** outClassDef) {\n const char* modulePath = UpFindModulePath(moduleName);\n if (!modulePath) {\n UpSetError(\"Module '%s' not found\", moduleName);\n return UpFailure;\n }\n\n if (_CompileFile(modulePath, moduleName, true, outDef, outClassDef)) {\n return UpSuccess;\n } else {\n return UpFailure;\n }\n}\n\nUpStatus UpCompileFile(const char* sourcePath, const char* moduleName,\n char writeToDisk, UpFunctionDef** outDef, UpClassDef** outClassDef) {\n return _CompileFile(sourcePath, moduleName, writeToDisk, outDef, outClassDef);\n}\n\nUpStatus UpCompileSource(const char* source, const char* sourcePath,\n const char* moduleName, UpFunctionDef** outDef) {\n return _CompileSource(source, sourcePath, moduleName, outDef);\n}\n\nUpStatus UpCompileEval(const char* source, UpFunctionDef* caller,\n UpFunctionDef** outDef) {\n UpPushCompilation(\"<eval>\", \"<eval>\");\n\n UpSyntax* ast = NULL;\n if (!UpParseSource(source, &ast)) {\n UpPopCompilation(UPCTX);\n return UpFailure;\n }\n\n BEGIN_COMPILE(\"eval\");\n\n UpCompiler* compiler = UpCompilerCreate(UPCTX);\n if (!UpCompilerCompileEval(compiler, ast, caller, outDef)) {\n UpPopCompilation(UPCTX);\n return UpFailure;\n }\n\n END_COMPILE(\"eval\");\n\n UpPopCompilation(UPCTX);\n return UpSuccess;\n}\n\nUpStatus UpParseModule(const char* moduleName, UpSyntax** out) {\n const char* modulePath = UpFindModulePath(moduleName);\n if (!modulePath) {\n UpSetError(\"Module '%s' not found\", moduleName);\n return UpFailure;\n }\n\n UpPushCompilation(modulePath, moduleName);\n\n UpSyntax* ast;\n if (!UpParseFile(modulePath, &ast)) {\n UpPopCompilation(UPCTX);\n return UpFailure;\n }\n\n UpPopCompilation(UPCTX);\n\n *out = ast;\n return UpSuccess;\n}\n\nUpStatus UpParseFile(const char* sourcePath, UpSyntax** out) {\n BEGIN_PARSE(sourcePath);\n\n char* source = _ReadSource(UPCTX->heap, sourcePath);\n if (!source) {\n UpSetError(\"File not found\");\n return UpFailure;\n }\n\n UpStatus status = UpParseSource(source, out);\n END_PARSE(sourcePath);\n\n return status;\n}\n\nUpStatus UpParseSource(const char* source, UpSyntax** out) {\n\n return UpParse(source, out);\n}\n" }, { "alpha_fraction": 0.686181366443634, "alphanum_fraction": 0.6874600648880005, "avg_line_length": 26.101484298706055, "blob_id": "52cd6460bd612bbb1ac20f1011b18743bd18d015", "content_id": "383f198adcfeb6d56a6bd35ffd1698dcf0ac6975", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 10949, "license_type": "permissive", "max_line_length": 101, "num_lines": 404, "path": "/src/vm/UpSyntax.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPSYNTAX_H\n#define UP_UPSYNTAX_H\n\n#include \"Up/UpGlobal.h\"\n\ntypedef enum {\n UpNoSyntaxType,\n UpSetSyntaxType,\n UpDeclarationSyntaxType,\n UpGroupSyntaxType,\n UpPrintSyntaxType,\n UpAssignmentSyntaxType,\n UpBinarySyntaxType,\n UpUnarySyntaxType,\n UpImportSyntaxType,\n UpImportWildcardSyntaxType,\n UpCallSyntaxType,\n UpArgumentSyntaxType,\n UpPropertySyntaxType,\n UpIdSyntaxType,\n UpTypeIdSyntaxType,\n UpRangeSyntaxType,\n UpDefaultSyntaxType,\n UpUndefinedSyntaxType,\n UpIntSyntaxType,\n UpLongSyntaxType,\n UpFloatSyntaxType,\n UpStringSyntaxType,\n UpListSyntaxType,\n UpMapSyntaxType,\n UpChannelSyntaxType,\n UpReturnSyntaxType,\n UpBreakSyntaxType,\n UpContinueSyntaxType,\n UpThrowSyntaxType,\n UpFunctionSyntaxType,\n UpIteratorSyntaxType,\n UpCFunctionSyntaxType,\n UpCTypeSyntaxType,\n UpCArgumentSyntaxType,\n UpIfSyntaxType,\n UpWhileSyntaxType,\n UpForSyntaxType,\n UpTrySyntaxType,\n UpCatchSyntaxType,\n UpFinallySyntaxType,\n UpCastSyntaxType,\n UpTypeSyntaxType,\n UpSubtypeSyntaxType,\n UpWhereSyntaxType,\n UpIsSyntaxType,\n UpHasSyntaxType,\n UpTransformSyntaxType,\n} UpSyntaxType;\n\ntypedef enum {\n UpNoOp,\n UpEqOp,\n UpConcatStringOp,\n UpLookupOp,\n UpIndexOp,\n UpSliceOp,\n UpOrOp,\n UpAndOp,\n UpNotOp,\n UpEqualsOp,\n UpNotEqualsOp,\n UpGreaterThanOp,\n UpGreaterThanEqualsOp,\n UpLessThanOp,\n UpLessThanEqualsOp,\n UpIsOp,\n UpIsNotOp,\n UpHasOp,\n UpHasNotOp,\n UpIsInOp,\n UpNotInOp,\n UpAddOp,\n UpSubtractOp,\n UpMultiplyOp,\n UpDivideOp,\n UpModOp,\n UpPowOp,\n UpConcatOp,\n UpAddEqOp,\n UpSubtractEqOp,\n UpMultiplyEqOp,\n UpDivideEqOp,\n UpModEqOp,\n UpPowEqOp,\n UpConcatEqOp,\n UpPositiveOp,\n UpNegativeOp,\n UpDeleteOp,\n UpInOp,\n UpReadOp,\n UpWriteOp,\n UpWriteAllOp,\n UpBindOp,\n} UpOperator;\n\ntypedef enum {\n UpNoAccess,\n UpPrivate,\n UpPublic,\n UpExtra,\n} UpAccessMode;\n\ntypedef enum {\n UpCallImmediate,\n UpCallConcurrent,\n UpCallParallel\n} UpCallSchedule;\n\n// ************************************************************************************************\n\ntypedef union {\n bool b;\n int i;\n unsigned int u;\n long long l;\n unsigned long long ul;\n double f;\n} UpSyntaxNumber;\n\nstruct UpSyntax {\n UpSyntaxType type;\n int line;\n int col;\n};\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* value;\n} UpSyntax1;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* value;\n UpSyntax* value2;\n} UpSyntax2;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntaxNumber value;\n char* unit;\n} UpNumberSyntax;\n\ntypedef struct {\n UpSyntax __base;\n char* value;\n char* specialty;\n} UpStringSyntax;\n\ntypedef struct UpSyntaxItem UpSyntaxItem;\n\nstruct UpSyntaxItem {\n UpSyntax* value;\n UpSyntaxItem* next;\n};\n\ntypedef struct {\n UpSyntax __base;\n UpSyntaxItem* first;\n UpSyntaxItem* last;\n} UpSetSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* head;\n UpSyntax* body;\n UpSyntax* where;\n UpAccessMode access;\n} UpDeclarationSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpOperator op;\n UpSyntax* left;\n UpSyntax* right;\n} UpAssignmentSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpOperator op;\n UpSyntax* left;\n UpSyntax* right;\n} UpBinarySyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpOperator op;\n UpSyntax* operand;\n} UpUnarySyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* names;\n bool wildcard;\n} UpImportSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* callable;\n UpSyntax* args;\n UpSyntax* schedule;\n bool isImperative:1;\n} UpCallSyntax;\n\ntypedef struct {\n UpSyntax __base;\n const char* externalName;\n const char* name;\n UpSyntax* expr;\n UpSyntax* next;\n bool isVariadic:1;\n} UpArgumentSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* left;\n const char* right;\n} UpPropertySyntax;\n\ntypedef struct {\n UpSyntax __base;\n const char* name;\n} UpIdSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* from;\n UpSyntax* by;\n UpSyntax* to;\n bool isThrough:1;\n} UpRangeSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* head;\n UpSyntax* body;\n bool isGenerator:1;\n bool isExpression:1;\n} UpFunctionSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* left;\n UpSyntax* iterable;\n UpSyntax* clause;\n UpSyntax* body;\n const char* TEMPchannelName;\n bool isOn:1;\n bool isWhile:1;\n bool isMapper:1;\n} UpIteratorSyntax;\n\ntypedef struct {\n UpSyntax __base;\n const char* name;\n const char* library;\n UpSyntax* returns;\n UpSyntax* args;\n} UpCFunctionSyntax;\n\ntypedef struct {\n UpSyntax __base;\n const char* name;\n int pointerCount;\n} UpCTypeSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* type;\n const char* name;\n} UpCArgumentSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* condition;\n UpSyntax* transforms;\n UpSyntax* elsex;\n} UpIfSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* condition;\n UpSyntax* body;\n} UpWhileSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* left;\n UpSyntax* right;\n UpSyntax* body;\n} UpForSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* tryBlock;\n UpSyntax* catchBlocks;\n UpSyntax* finallyBlock;\n} UpTrySyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* binding;\n UpSyntax* statements;\n} UpCatchSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* expr;\n UpSyntax* typeSig;\n} UpCastSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* params;\n} UpTypeSyntax;\n\ntypedef struct {\n UpSyntax __base;\n UpSyntax* condition;\n UpSyntax* body;\n UpSyntax* next;\n} UpTransformSyntax;\n\n// ************************************************************************************************\n\nsize_t UpGetSyntaxTypeSize(UpSyntaxType type);\nconst char* UpGetOperatorOverrideName(UpOperator op);\n\nUpSyntax* UpCreateSyntaxWithType(UpArena* heap, UpSyntaxType type, int line, int col);\n\nUpSyntax* UpParse1(UpArena* heap, UpSyntax* node, UpSyntax* a);\nUpSyntax* UpParse2(UpArena* heap, UpSyntax* node, UpSyntax* a, UpSyntax* B);\nUpSyntax* UpSyntaxSetEnsure(UpArena* heap, UpSyntax* maybeSet);\nvoid UpSyntaxSetAppend(UpArena* heap, UpSyntax* list, UpSyntax* item);\nUpSyntax* UpParseWildcard(UpArena* heap, UpSyntax* node);\nUpSyntax* UpParseAssignment(UpArena* heap, UpScanner* scanner, UpSyntax* node,\n UpOperator op, UpSyntax* left, UpSyntax* right);\nUpSyntax* UpParseBinary(UpArena* heap, UpScanner* scanner, UpSyntax* node, UpOperator op,\n UpSyntax* left, UpSyntax* right);\nUpSyntax* UpParseUnary(UpArena* heap, UpScanner* scanner, UpSyntax* node, UpOperator op,\n UpSyntax* operand);\nUpSyntax* UpParseImport(UpArena* heap, UpSyntax* node, UpSyntax* name);\nUpSyntax* UpParseCall(UpArena* heap, UpSyntax* node, UpSyntax* callable, UpSyntax* arguments,\n bool isImperative, UpSyntax* schedule);\nUpSyntax* UpParseProperty(UpArena* heap, UpSyntax* node, UpSyntax* left, const char* right);\nUpSyntax* UpParseId(UpArena* heap, UpSyntax* node, const char* name);\nUpSyntax* UpParseRange(UpArena* heap, UpSyntax* node, UpSyntax* from, UpSyntax* to, UpSyntax* by,\n bool isThrough);\nUpSyntax* UpParseDefault(UpArena* heap, UpSyntax* node, UpSyntax* value, UpSyntax* value2);\nUpSyntax* UpParseBool(UpArena* heap, UpSyntax* node, bool value);\nUpSyntax* UpParseInt(UpArena* heap, UpSyntax* node, int value, const char* unit);\nUpSyntax* UpParseUInt(UpArena* heap, UpSyntax* node, unsigned int value, const char* unit);\nUpSyntax* UpParseLong(UpArena* heap, UpSyntax* node, long long value, const char* unit);\nUpSyntax* UpParseULong(UpArena* heap, UpSyntax* node, unsigned long long value, const char* unit);\nUpSyntax* UpParseFloat(UpArena* heap, UpSyntax* node, double value, const char* unit);\nUpSyntax* UpParseStr(UpArena* heap, const char* value, const char* specialty, int line, int column);\nUpSyntax* UpParseList(UpArena* heap, UpSyntax* list, UpSyntax* items);\nUpSyntax* UpParseMap(UpArena* heap, UpSyntax* node, UpSyntax* items);\nUpSyntax* UpParseChannel(UpArena* heap, UpSyntax* node, UpSyntax* typeSig);\n\nUpSyntax* UpParseDeclaration(UpArena* heap, UpSyntax* node, UpAccessMode access, UpSyntax* head,\n UpSyntax* body, UpSyntax* where);\nUpSyntax* UpParseFunction(UpArena* heap, UpScanner* scanner, UpSyntax* node, UpSyntax* head,\n UpSyntax* body, bool isImperative);\nUpSyntax* UpParseIterator(UpArena* heap, UpScanner* scanner, UpSyntax* node, UpSyntax* left,\n UpSyntax* iterable, UpSyntax* clause, UpSyntax* body, bool isOn,\n bool isWhile, bool isMapper);\nUpSyntax* UpParseCFunction(UpArena* heap, UpSyntax* node, const char* name, UpSyntax* returns,\n UpSyntax* args);\nUpSyntax* UpParseCType(UpArena* heap, UpSyntax* node, const char* name);\nUpSyntax* UpParseCArgument(UpArena* heap, UpSyntax* node, UpSyntax* type, const char* name);\nUpSyntax* UpAddCTypePointer(UpSyntax* type);\nUpSyntax* UpMakeCFunctionsPrivate(UpSyntax* node);\nUpSyntax* UpParseIf(UpArena* heap, UpSyntax* node, UpSyntax* transforms, UpSyntax* elsex);\nUpSyntax* UpParseTransform(UpArena* heap, UpSyntax* node, UpSyntax* condition, UpSyntax* body);\nUpSyntax* UpAppendTransform(UpSyntax* left, UpSyntax* right);\nUpSyntax* UpParseWhile(UpArena* heap, UpSyntax* node, UpSyntax* condition, UpSyntax* body);\nUpSyntax* UpParseFor(UpArena* heap, UpSyntax* node, UpSyntax* left, UpSyntax* right, UpSyntax* body);\nUpSyntax* UpParseArgument(UpArena* heap, UpSyntax* node, const char* name, UpSyntax* body,\n bool isVariadic, UpSyntax* next);\nUpSyntax* UpParseTry(UpArena* heap, UpSyntax* node, UpSyntax* tryBlock,\n UpSyntax* catchBlocks, UpSyntax* finallyBlock);\nUpSyntax* UpParseCatch(UpArena* heap, UpSyntax* node, UpSyntax* binding, UpSyntax* statements);\n\nUpSyntax* UpParseGroup(UpArena* heap, UpSyntax* expr, int line, int col);\n\nUpSyntax* UpParseWhere(UpArena* heap, UpSyntax* node, UpSyntax* body, UpSyntax* assignments);\n\nUpSyntax* UpAppendArgs(UpArena* heap, UpScanner* scanner, UpSyntax* head, UpSyntax* args);\nUpSyntax* UpAppendArg(UpArena* heap, UpSyntax* arg, UpSyntax* args);\n\nUpSyntax* UpParseIs(UpArena* heap, UpSyntax* node, UpSyntax* subject, UpSyntax* transform);\nUpSyntax* UpParseHas(UpArena* heap, UpSyntax* node, UpSyntax* subject, UpSyntax* transform);\n\nUpSyntax* UpParseCast(UpArena* heap, UpSyntax* node, UpSyntax* expr, UpSyntax* typeSig);\nUpSyntax* UpAppendType(UpArena* heap, UpSyntax* left, UpSyntax* right);\nUpSyntax* UpParseSubtype(UpArena* heap, UpSyntax* node, UpSyntax* left, UpSyntax* right);\n\n#endif // UP_UPSYNTAX_H\n" }, { "alpha_fraction": 0.6420560479164124, "alphanum_fraction": 0.6420560479164124, "avg_line_length": 32.40625, "blob_id": "ba89ded995e6441c824058eef9605b98f8b3aa0d", "content_id": "a124fa0b0006168c18b69a2b4e25dc3c7db1bad2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1070, "license_type": "permissive", "max_line_length": 100, "num_lines": 32, "path": "/src/vm/UpScheduler.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPSCHEDULER_H\n#define UP_UPSCHEDULER_H\n\n#include \"Up/UpGlobal.h\"\n\n// *************************************************************************************************\n\ntypedef struct UpScheduleItem UpScheduleItem;\n\nstruct UpScheduler {\n UpThreadId threadId;\n UpScheduleItem* nextEvent;\n UpScheduleItem* lastEvent;\n};\n\n// *************************************************************************************************\n\nUpScheduler* UpSchedulerCreate(UpArena* heap);\n\nUpEvent* UpCreateNullEvent(UpArena* heap);\nUpEvent* UpCreateResumeEvent(UpArena* heap, UpTask* task, char* cursor, bool isImmediate);\nUpEvent* UpCreateStartEvent(UpArena* heap, UpFunction* function, bool isImmediate);\n\nvoid UpSchedulerResumeTask(UpScheduler* self, UpTask* task);\nvoid UpSchedulerScheduleTask(UpScheduler* self, UpTask* task);\nvoid UpSchedulerScheduleEvent(UpScheduler* self, UpEvent* event, UpObject* input);\n\nUpStatus UpSchedulerDoEvent(UpScheduler* self, UpEvent* event, UpObject* input);\n\nUpStatus UpSchedulerRun(UpScheduler* self);\n\n#endif // UP_UPSCHEDULER_H\n" }, { "alpha_fraction": 0.4938271641731262, "alphanum_fraction": 0.4938271641731262, "avg_line_length": 25.91666603088379, "blob_id": "c35b56ba4c3f0d8cffd3ab29a8198b4340cc8205", "content_id": "f70ef94f3bc4e0a3a1f19f242ddfd6960dd9ae9c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 324, "license_type": "permissive", "max_line_length": 99, "num_lines": 12, "path": "/src/vm/UpNull.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpNull.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nUpObject* UpNullCreate() {\n return UpObjectCreateWithClass(UP_BUILTIN(null));\n}\n" }, { "alpha_fraction": 0.3913043439388275, "alphanum_fraction": 0.3913043439388275, "avg_line_length": 21.899999618530273, "blob_id": "bead9bac8f4a40d3b1d48409e2d65f56f32e20bb", "content_id": "0564e79aedb55c9fa8ecd87f5cc04458ede014c0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 230, "license_type": "permissive", "max_line_length": 100, "num_lines": 10, "path": "/src/json/include/UpJSON.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPJSON_H\n#define UP_UPJSON_H\n\n#include \"Up/Up.h\"\n\n// *************************************************************************************************\n\nUpObject* UpJSONParse(const char* source);\n\n#endif // UP_UPJSON_H\n" }, { "alpha_fraction": 0.37442922592163086, "alphanum_fraction": 0.37442922592163086, "avg_line_length": 20.799999237060547, "blob_id": "4f7e35096186e2d8b1db28bd0d02388ba5cda8eb", "content_id": "c12968159443b952845492a2e1289d14d3ee3e2c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 219, "license_type": "permissive", "max_line_length": 100, "num_lines": 10, "path": "/src/vm/include/UpNull.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPNULL_H\n#define UP_UPNULL_H\n\n#include \"Up/UpObject.h\"\n\n// *************************************************************************************************\n\nUpObject* UpNullCreate();\n\n#endif // UP_UPNULL_H\n" }, { "alpha_fraction": 0.5553519129753113, "alphanum_fraction": 0.5568181872367859, "avg_line_length": 26.738983154296875, "blob_id": "1105393ba2709586f985bd7df117dbcc191fb6c0", "content_id": "cc4fd364ad21ae5f88336822684f4c98baff2697", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 8184, "license_type": "permissive", "max_line_length": 99, "num_lines": 295, "path": "/src/vm/UpMap.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpMap.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpList.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nstatic unsigned int _BucketAllocationSize = 8;\n\nstruct UpMapItem {\n UpHash hash;\n UpObject* key;\n UpObject* value;\n UpMapItem* next;\n};\n\nstatic unsigned int _BucketForHash(UpMap* self, UpHash hash) {\n return hash & (self->bucketsAllocated-1);\n}\n\nstatic UpMapItem* _CreateMapItem(UpHash hash, UpObject* key, UpObject* value) {\n if (hash == -1) {\n UpSetError(\"Illegal key\");\n }\n\n UpMapItem* item = UpArenaNew(UpGetHeap(), UpMapItem);\n item->hash = hash;\n item->key = key;\n item->value = value; \n item->next = NULL;\n return item;\n}\n\nstatic void _SetUnique(UpMap* self, UpObject* key, UpHash hash, UpObject* value) {\n unsigned int bucket = _BucketForHash(self, hash);\n UpMapItem* item = self->buckets[bucket];\n if (!item) {\n self->buckets[bucket] = _CreateMapItem(hash, key, value);\n ++self->bucketsUsed;\n ++self->itemCount;\n } else {\n while (item) {\n if (!item->next) {\n item->next = _CreateMapItem(hash, key, value);\n ++self->itemCount;\n break; \n }\n item = item->next;\n } \n }\n}\n\nstatic void _GrowBuckets(UpMap* self) {\n UpArena* heap = UpGetHeap();\n unsigned int oldBucketCount = self->bucketsAllocated;\n UpMapItem** oldBuckets = self->buckets;\n \n self->bucketsUsed = 0;\n self->bucketsAllocated = self->bucketsAllocated + _BucketAllocationSize;\n self->buckets = UpArenaNewArray(heap, UpMapItem, self->bucketsAllocated);\n \n for (unsigned int i = 0; i < oldBucketCount; ++i) {\n UpMapItem* oldItem = oldBuckets[i];\n while (oldItem) {\n _SetUnique(self, oldItem->key, oldItem->hash, oldItem->value);\n oldItem = oldItem->next;\n }\n }\n}\n\nstatic UpMapItem* _CloneBucket(UpMapItem* item) {\n if (item) {\n UpMapItem* newItem = _CreateMapItem(item->hash, item->key, item->value);\n newItem->next = _CloneBucket(item->next);\n return newItem;\n } else {\n return NULL;\n }\n}\n\nstatic void _CloneBuckets(UpMap* self, UpMap* target) {\n int bucketCount = self->bucketsAllocated;\n for (unsigned int i = 0; i < bucketCount; ++i) {\n UpMapItem* item = self->buckets[i];\n target->buckets[i] = _CloneBucket(item);\n }\n}\n\n// ************************************************************************************************\n\nUpMap* UpMapCreate() {\n UpMap* self = (UpMap*)UpObjectCreateWithClass(UP_BUILTIN(map));\n self->bucketsAllocated = _BucketAllocationSize;\n self->bucketsUsed = 0;\n self->itemCount = 0;\n self->buckets = UpArenaNewArray(UpGetHeap(), UpMapItem, self->bucketsAllocated);\n return self;\n}\n\nUpMap* UpMapClone(UpMap* other) {\n UpMap* self = (UpMap*)UpObjectCreateWithClass(UP_BUILTIN(map));\n self->bucketsAllocated = other->bucketsAllocated;\n self->bucketsUsed = other->bucketsUsed;\n self->itemCount = other->itemCount;\n self->buckets = UpArenaNewArray(UpGetHeap(), UpMapItem, self->bucketsAllocated);\n _CloneBuckets(other, self);\n return self;\n}\n\nunsigned int UpMapCount(UpMap* self) {\n return self->itemCount;\n}\n\nUpObject* UpMapKeys(UpMap* self) {\n UpList* list = UpListCreate();\n for (unsigned int i = 0; i < self->bucketsAllocated; ++i) {\n UpMapItem* item = self->buckets[i];\n while (item) {\n UpListAppend(list, item->key);\n item = item->next;\n }\n }\n return (UpObject*)list;\n}\n\nUpObject* UpMapValues(UpMap* self) {\n UpList* list = UpListCreate();\n for (unsigned int i = 0; i < self->bucketsAllocated; ++i) {\n UpMapItem* item = self->buckets[i];\n while (item) {\n UpListAppend((UpList*)list, item->value);\n item = item->next;\n }\n }\n return (UpObject*)list;\n}\n\nUpObject* UpMapBeginGet(UpMap* self, UpHash hash) {\n unsigned int bucket = _BucketForHash(self, hash);\n UpMapItem* item = self->buckets[bucket];\n while (item) {\n if (item->hash == hash) {\n self->searchItem = item;\n return item->key;\n }\n item = item->next;\n }\n\n return UpUndefined();\n}\n\nUpObject* UpMapNextGet(UpMap* self, UpHash hash) {\n UpMapItem* item = self->searchItem;\n while (item) {\n if (item->hash == hash) {\n self->searchItem = item;\n return item->key;\n }\n item = item->next;\n }\n\n return UpUndefined();\n}\n\nUpObject* UpMapEndGet(UpMap* self) {\n if (self->searchItem) {\n UpObject* value = self->searchItem->value;\n self->searchItem = NULL;\n return value;\n } else {\n return UpUndefined();\n }\n}\n\nUpObject* UpMapBeginSet(UpMap* self, UpObject* key, UpHash hash, UpObject* value) {\n if (self->bucketsUsed+1 == self->bucketsAllocated) {\n _GrowBuckets(self);\n }\n\n unsigned int bucket = _BucketForHash(self, hash);\n UpMapItem* item = self->buckets[bucket];\n if (!item) {\n self->buckets[bucket] = _CreateMapItem(hash, key, value);\n ++self->bucketsUsed;\n ++self->itemCount;\n } else {\n while (item) {\n if (item->hash == hash) {\n self->searchItem = item;\n return item->key;\n } else if (!item->next) {\n item->next = _CreateMapItem(hash, key, value);\n ++self->itemCount;\n break; \n }\n item = item->next;\n } \n }\n\n return UpUndefined();\n}\n\nUpObject* UpMapNextSet(UpMap* self, UpObject* key, UpHash hash, UpObject* value) {\n UpMapItem* lastItem = self->searchItem;\n UpMapItem* item = lastItem->next;\n while (item) {\n if (item->hash == hash) {\n self->searchItem = item;\n return item->key;\n } else if (!item->next) {\n item->next = _CreateMapItem(hash, key, value);\n ++self->itemCount;\n break; \n }\n lastItem = item;\n item = item->next;\n }\n\n self->searchItem = NULL;\n return UpUndefined();\n}\n\nvoid UpMapEndSet(UpMap* self, UpObject* value) {\n if (self->searchItem) {\n self->searchItem->value = value;\n self->searchItem = NULL;\n }\n}\n\nUpObject* UpMapBeginDelete(UpMap* self, UpHash hash) {\n unsigned int bucket = _BucketForHash(self, hash);\n UpMapItem* item = self->buckets[bucket];\n UpMapItem* lastItem = NULL;\n while (item) {\n if (item->hash == hash) {\n self->searchItem = lastItem;\n return item->key;\n }\n lastItem = item;\n item = item->next;\n }\n\n return UpUndefined();\n}\n\nUpObject* UpMapNextDelete(UpMap* self, UpHash hash) {\n UpMapItem* lastItem = self->searchItem->next;\n UpMapItem* item = lastItem->next;\n while (item) {\n if (item->hash == hash) {\n self->searchItem = lastItem;\n return item->key;\n }\n lastItem = item;\n item = item->next;\n }\n\n return UpUndefined();\n}\n\nbool UpMapEndDelete(UpMap* self, UpHash hash) {\n UpMapItem* lastItem = self->searchItem;\n if (lastItem) {\n UpMapItem* item = lastItem->next;\n if (item) {\n lastItem->next = item->next;\n --self->itemCount;\n return true;\n }\n } else {\n unsigned int bucket = _BucketForHash(self, hash);\n UpMapItem* lastItem = self->buckets[bucket];\n if (lastItem) {\n UpMapItem* next = self->buckets[bucket] = lastItem->next;\n if (!next) {\n --self->bucketsUsed;\n }\n --self->itemCount;\n return true;\n }\n }\n return false;\n}\n\nvoid UpMapAppend(UpMap* self, UpObject* key, UpObject* value) {\n if (self->bucketsUsed+1 == self->bucketsAllocated) {\n _GrowBuckets(self);\n }\n\n UpHash hash = UpGetBuiltinHash(key);\n _SetUnique(self, key, hash, value);\n}\n" }, { "alpha_fraction": 0.6121537089347839, "alphanum_fraction": 0.6121537089347839, "avg_line_length": 30.05555534362793, "blob_id": "ce5a32b729c456a2fc33d1659726fb68da9a675d", "content_id": "f85c5e6e4c38b1200042aa317a479c2c45d89858", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1119, "license_type": "permissive", "max_line_length": 100, "num_lines": 36, "path": "/src/vm/UpTask.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPTASK_H\n#define UP_UPTASK_H\n\n#include \"Up/UpGlobal.h\"\n\n// *************************************************************************************************\n\nstruct UpTask {\n UpThreadId threadId;\n UpChannel* returnChannel;\n UpChannel* generatorChannel;\n UpStorageFrame* storageFrame;\n UpCallFrame* callFrame;\n int stackDepth;\n};\n\n// *************************************************************************************************\n\nUpTask* UpTaskCreate(UpArena* heap);\nUpTask* UpTaskCreateWithFunctionDef(UpArena* heap, UpFunctionDef* functionDef, UpVariables* closure,\n UpObject* boundThis);\nUpTask* UpTaskCreateWithFunction(UpArena* heap, UpFunction* function);\n\nvoid UpTaskExpectReturn(UpTask* self);\n\nbool UpTaskIsGenerator(UpTask* self);\nvoid UpTaskMakeGenerator(UpTask* self, UpChannel* channel);\n\nbool UpTaskIsOnCurrentThread(UpTask* self);\n\nvoid UpTaskPushArgument(UpTask* task, UpObject* argument);\nUpStatus UpTaskGetReturnValue(UpTask* task, UpObject** out);\n\nUpStatus UpTaskResume(UpTask* task, UpScheduler* scheduler);\n\n#endif // UP_UPTASK_H\n" }, { "alpha_fraction": 0.555060863494873, "alphanum_fraction": 0.665924608707428, "avg_line_length": 21.756755828857422, "blob_id": "64519127b291c520659649f151ef16a326ee001f", "content_id": "45b38a722ed7f6a0c9c789c716109ec0be93837a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6738, "license_type": "permissive", "max_line_length": 75, "num_lines": 296, "path": "/src/vm/Up.tab.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "/* A Bison parser, made by GNU Bison 2.3. */\n\n/* Skeleton interface for Bison's Yacc-like parsers in C\n\n Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006\n Free Software Foundation, Inc.\n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2, or (at your option)\n any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program; if not, write to the Free Software\n Foundation, Inc., 51 Franklin Street, Fifth Floor,\n Boston, MA 02110-1301, USA. */\n\n/* As a special exception, you may create a larger work that contains\n part or all of the Bison parser skeleton and distribute that work\n under terms of your choice, so long as that work isn't itself a\n parser generator using the skeleton or a modified version thereof\n as a parser skeleton. Alternatively, if you modify or redistribute\n the parser skeleton itself, you may (at your option) remove this\n special exception, which will cause the skeleton and the resulting\n Bison output files to be licensed under the GNU General Public\n License without this special exception.\n\n This special exception was added by the Free Software Foundation in\n version 2.2 of Bison. */\n\n/* Tokens. */\n#ifndef YYTOKENTYPE\n# define YYTOKENTYPE\n /* Put the tokens into the symbol table, so that GDB and other debuggers\n know about them. */\n enum yytokentype {\n IDENTIFIER = 258,\n UIDENTIFIER = 259,\n BIDENTIFIER = 260,\n STRING = 261,\n FSTRING = 262,\n STRING_LINE = 263,\n FSTRING_LINE = 264,\n INTEGER = 265,\n LONG = 266,\n FLOAT = 267,\n CPRIMITIVE = 268,\n CFUNCTION = 269,\n CLOSE_BLOCK = 270,\n OPEN_BLOCK = 271,\n NEWLINE = 272,\n INLINE_EXPR = 273,\n PRIVATE = 274,\n PUBLIC = 275,\n THROW = 276,\n FINALLY = 277,\n CATCH = 278,\n TRY = 279,\n DO = 280,\n CONTINUE = 281,\n BREAK = 282,\n WHILE = 283,\n ON = 284,\n FOR = 285,\n ORELSE = 286,\n ELSE = 287,\n IF = 288,\n UNDERSCORE = 289,\n AT = 290,\n CARET = 291,\n POUND = 292,\n RARROW2MUL = 293,\n LARROW3 = 294,\n RARROW3 = 295,\n RARROW2 = 296,\n LARROW2 = 297,\n RARROW = 298,\n LARROW = 299,\n FATARROW2 = 300,\n FATARROW = 301,\n SEMICOLON = 302,\n COLON2 = 303,\n COLON = 304,\n CONCAT_EQ = 305,\n STAR2_EQ = 306,\n SLASH2_EQ = 307,\n SLASH_EQ = 308,\n STAR_EQ = 309,\n SUBTRACT_EQ = 310,\n ADD_EQ = 311,\n EQ = 312,\n NOTIN = 313,\n ISIN = 314,\n IN = 315,\n HASNOT = 316,\n HAS = 317,\n ISNOT = 318,\n IS = 319,\n AS = 320,\n CONCATSTR = 321,\n CONCAT = 322,\n GTE = 323,\n GT = 324,\n LTE = 325,\n LT = 326,\n NEQ = 327,\n EQ2 = 328,\n STAR2 = 329,\n SLASH2 = 330,\n SLASH = 331,\n STAR = 332,\n SUBTRACT = 333,\n ADD = 334,\n PIPE2 = 335,\n PIPE = 336,\n AMPERSAND = 337,\n TILDE = 338,\n DASHDASH = 339,\n COMMA = 340,\n QUESTION = 341,\n EXCLAMATION = 342,\n DOT3 = 343,\n DOT2 = 344,\n DOT = 345,\n OPERATORQ = 346,\n OPERATOR = 347,\n CLOSE_OPERATOR = 348,\n OPEN_OPERATORQ = 349,\n OPEN_OPERATOR = 350,\n WHERE = 351,\n BY = 352,\n THROUGH = 353,\n TO = 354,\n BULLET = 355,\n BACKSLASH = 356,\n RCB = 357,\n LCB = 358,\n RB = 359,\n LB = 360,\n RP = 361,\n LP = 362,\n STRUCT = 363,\n CONST = 364,\n OPEN_C = 365\n };\n#endif\n/* Tokens. */\n#define IDENTIFIER 258\n#define UIDENTIFIER 259\n#define BIDENTIFIER 260\n#define STRING 261\n#define FSTRING 262\n#define STRING_LINE 263\n#define FSTRING_LINE 264\n#define INTEGER 265\n#define LONG 266\n#define FLOAT 267\n#define CPRIMITIVE 268\n#define CFUNCTION 269\n#define CLOSE_BLOCK 270\n#define OPEN_BLOCK 271\n#define NEWLINE 272\n#define INLINE_EXPR 273\n#define PRIVATE 274\n#define PUBLIC 275\n#define THROW 276\n#define FINALLY 277\n#define CATCH 278\n#define TRY 279\n#define DO 280\n#define CONTINUE 281\n#define BREAK 282\n#define WHILE 283\n#define ON 284\n#define FOR 285\n#define ORELSE 286\n#define ELSE 287\n#define IF 288\n#define UNDERSCORE 289\n#define AT 290\n#define CARET 291\n#define POUND 292\n#define RARROW2MUL 293\n#define LARROW3 294\n#define RARROW3 295\n#define RARROW2 296\n#define LARROW2 297\n#define RARROW 298\n#define LARROW 299\n#define FATARROW2 300\n#define FATARROW 301\n#define SEMICOLON 302\n#define COLON2 303\n#define COLON 304\n#define CONCAT_EQ 305\n#define STAR2_EQ 306\n#define SLASH2_EQ 307\n#define SLASH_EQ 308\n#define STAR_EQ 309\n#define SUBTRACT_EQ 310\n#define ADD_EQ 311\n#define EQ 312\n#define NOTIN 313\n#define ISIN 314\n#define IN 315\n#define HASNOT 316\n#define HAS 317\n#define ISNOT 318\n#define IS 319\n#define AS 320\n#define CONCATSTR 321\n#define CONCAT 322\n#define GTE 323\n#define GT 324\n#define LTE 325\n#define LT 326\n#define NEQ 327\n#define EQ2 328\n#define STAR2 329\n#define SLASH2 330\n#define SLASH 331\n#define STAR 332\n#define SUBTRACT 333\n#define ADD 334\n#define PIPE2 335\n#define PIPE 336\n#define AMPERSAND 337\n#define TILDE 338\n#define DASHDASH 339\n#define COMMA 340\n#define QUESTION 341\n#define EXCLAMATION 342\n#define DOT3 343\n#define DOT2 344\n#define DOT 345\n#define OPERATORQ 346\n#define OPERATOR 347\n#define CLOSE_OPERATOR 348\n#define OPEN_OPERATORQ 349\n#define OPEN_OPERATOR 350\n#define WHERE 351\n#define BY 352\n#define THROUGH 353\n#define TO 354\n#define BULLET 355\n#define BACKSLASH 356\n#define RCB 357\n#define LCB 358\n#define RB 359\n#define LB 360\n#define RP 361\n#define LP 362\n#define STRUCT 363\n#define CONST 364\n#define OPEN_C 365\n\n\n\n\n#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED\ntypedef union YYSTYPE\n#line 21 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n{\n int indentLevel;\n long long tagValue;\n char* stringValue;\n UpSyntax* objectValue;\n int operatorValue;\n}\n/* Line 1529 of yacc.c. */\n#line 277 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.tab.h\"\n\tYYSTYPE;\n# define yystype YYSTYPE /* obsolescent; will be withdrawn */\n# define YYSTYPE_IS_DECLARED 1\n# define YYSTYPE_IS_TRIVIAL 1\n#endif\n\n\n\n#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED\ntypedef struct YYLTYPE\n{\n int first_line;\n int first_column;\n int last_line;\n int last_column;\n} YYLTYPE;\n# define yyltype YYLTYPE /* obsolescent; will be withdrawn */\n# define YYLTYPE_IS_DECLARED 1\n# define YYLTYPE_IS_TRIVIAL 1\n#endif\n\n\n" }, { "alpha_fraction": 0.7058318853378296, "alphanum_fraction": 0.7058318853378296, "avg_line_length": 31.36111068725586, "blob_id": "7255255b79233794585a34fc49380690ec5bbc49", "content_id": "870bc11fb4ce0a65b4eeb0aefd0e4bbeb344e12b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1166, "license_type": "permissive", "max_line_length": 100, "num_lines": 36, "path": "/src/vm/include/UpFloat.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPFLOAT_H\n#define UP_UPFLOAT_H\n\n#include \"Up/UpObject.h\"\n\nstruct UpFloat {\n UpObject __base;\n double value;\n UpSymbol unit;\n};\n\n// *************************************************************************************************\n\nUpFloat* UpFloatCreate(double value);\nUpFloat* UpFloatCreateWithUnit(double value, UpSymbol unit);\n\nconst char* UpFloatToCString(UpFloat* self);\nUpString* UpFloatToString(UpFloat* self);\n\nbool UpFloatEquals(UpFloat* self, UpObject* other);\nbool UpFloatGreaterThan(UpFloat* self, UpObject* other);\nbool UpFloatGreaterThanEquals(UpFloat* self, UpObject* other);\nbool UpFloatLessThan(UpFloat* self, UpObject* other);\nbool UpFloatLessThanEquals(UpFloat* self, UpObject* other);\n\nUpObject* UpFloatAdd(UpFloat* self, UpObject* other);\nUpObject* UpFloatSubtract(UpFloat* self, UpObject* other);\nUpObject* UpFloatMultiply(UpFloat* self, UpObject* other);\nUpObject* UpFloatDivide(UpFloat* self, UpObject* other);\nUpObject* UpFloatMod(UpFloat* self, UpObject* other);\nUpObject* UpFloatPow(UpFloat* self, UpObject* other);\nUpObject* UpFloatNegate(UpFloat* self);\n\nUpObject* UpFloatModf(UpFloat* self);\n\n#endif // UP_UPFLOAT_H\n" }, { "alpha_fraction": 0.8563535809516907, "alphanum_fraction": 0.8563535809516907, "avg_line_length": 59, "blob_id": "45d72d27a7a843ef5bc34801a0a995d9e8d199f1", "content_id": "cbe8d41b3b0233688f31891bdbfca7698aa8438f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "permissive", "max_line_length": 85, "num_lines": 3, "path": "/make/test/__init__.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nfrom .run import runTests, makeTestCatalog, testOutputPath\nfrom .TestFixture import TestFixture, RunException, TestException, TestAbortException\nfrom .pages import renderTestPages\n" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 23, "blob_id": "9c9f04b238603254850512f6bbd92249b20a6409", "content_id": "26624319eb9172863a0220a1a0771f2068d7ccb9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 25, "license_type": "permissive", "max_line_length": 23, "num_lines": 1, "path": "/src/vm/pch.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"UpInternal.h\"\n" }, { "alpha_fraction": 0.48510077595710754, "alphanum_fraction": 0.48510077595710754, "avg_line_length": 29.013158798217773, "blob_id": "ddd630be5f16ea1879f5dde4a6c23dc715dbcee9", "content_id": "d21662836b608a27fd0c1a55ea95e17e6257b8ef", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2282, "license_type": "permissive", "max_line_length": 99, "num_lines": 76, "path": "/src/json/UpJSON.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"UpJSON/UpJSON.h\"\n#include \"Up/UpList.h\"\n#include \"Up/UpMap.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpFloat.h\"\n#include \"Up/UpBool.h\"\n\n#include <stdio.h>\n#include <string.h>\n#include \"cJSON.h\"\n\n// ************************************************************************************************\n\nstatic UpObject* _Convert(cJSON* json) {\n if (json->type == cJSON_False) {\n return UpFalse();\n } else if (json->type == cJSON_True) {\n return UpTrue();\n } else if (json->type == cJSON_NULL) {\n return UpNull();\n } else if (json->type == cJSON_Number) {\n if (json->valueint == json->valuedouble) {\n return (UpObject*)UpIntegerCreate(json->valueint);\n } else {\n return (UpObject*)UpFloatCreate(json->valuedouble);\n }\n return UpNull();\n } else if (json->type == cJSON_String) {\n return (UpObject*)UpStringCreate(json->valuestring);\n } else if (json->type == cJSON_Array) {\n UpList* list = UpListCreate();\n for (cJSON* child = json->child; child; child = child->next) {\n UpObject* value = _Convert(child);\n if (!value) {\n return NULL;\n }\n UpListAppend(list, value);\n }\n return (UpObject*)list;\n } else if (json->type == cJSON_Object) {\n UpMap* map = UpMapCreate();\n for (cJSON* child = json->child; child; child = child->next) {\n UpObject* key = (UpObject*)UpStringCreate(child->string);\n UpObject* value = _Convert(child);\n if (!value) {\n return NULL;\n }\n UpMapAppend(map, key, value);\n }\n return (UpObject*)map;\n } else {\n return UpNull();\n }\n}\n\n// ************************************************************************************************\n\nUpObject* UpJSONParse(const char* source) {\n cJSON* json = cJSON_Parse(source);\n if (!json) {\n free(json);\n UpSetError(\"JSON syntax error\");\n return NULL;\n } else {\n UpObject* result = _Convert(json);\n free(json);\n if (result) {\n return result;\n } else {\n UpSetError(\"JSON syntax error\");\n return NULL;\n }\n }\n\n return UpNull();\n}\n" }, { "alpha_fraction": 0.5720602869987488, "alphanum_fraction": 0.574371874332428, "avg_line_length": 32.614864349365234, "blob_id": "7a4fbde50d15a735119fd0c91a0576d14607b55d", "content_id": "d8e1431f4cf6dd61a9b87ac8c292f2f7887fc67b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9950, "license_type": "permissive", "max_line_length": 100, "num_lines": 296, "path": "/make/cmdline.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport time, os.path, sys, argparse, json, traceback\nfrom .build.Project import Project, projects\nfrom .util import BuildError, importLocalModule\nfrom .Message import Message, summary\nfrom .JSONWriter import JSONWriter\n\n# **************************************************************************************************\n\ndef parseArguments():\n parser = argparse.ArgumentParser(description='Make')\n parser.add_argument(\"action\", default=\"build\")\n\n # General options\n parser.add_argument('--showCommands', dest='showCommands', action='store_true',\n default=False,\n help='Show literal command line tasks as as they are performed')\n parser.add_argument('--formatOutput', dest='formatOutput', action='store_true',\n default=True,\n help='Parse command line output and reformat it.')\n\n # Build options\n parser.add_argument('--build', dest='buildPath', action='store',\n help='Path to store build files.')\n parser.add_argument('--config', dest='configName', action='store',\n default=\"MacOSX\",\n help='Name of configuration to build.')\n parser.add_argument('--optimize', dest='optimize', action='store',\n help='Optimize setting (speed or size)')\n parser.add_argument('--enableProbes', dest='enableProbes', action='store_true',\n default=False,\n help='Enable probes in the build')\n parser.add_argument('--osxDeveloper', dest='osxDeveloper', action='store',\n default='',\n help='Path to the Xcode Developer directory.')\n parser.add_argument('--androidSDK', dest='androidSDK', action='store',\n default='',\n help='Path to the Android SDK.')\n parser.add_argument('--androidNDK', dest='androidNDK', action='store',\n default='',\n help='Path to the Android NDK.')\n parser.add_argument('--androidSource', dest='androidSource', action='store',\n default='',\n help='Path to the Android source repository.')\n\n # Install options\n parser.add_argument('--install', dest='installPath', action='store',\n help='Path to install files to.')\n parser.add_argument('--link', dest='installLink', action='store_true',\n default=False,\n help='Install files as symbolic links.')\n\n # Test options\n parser.add_argument('--test', dest='test', action='store',\n default=None,\n help='Name of test to run.')\n parser.add_argument('--build-only', dest='buildOnly', action='store_true',\n default=False,\n help='Compile tests but do not run them.')\n\n # Metrics options\n parser.add_argument('--analyst', dest='analyst', action='store',\n default=None,\n help='Name of analyst to run.')\n parser.add_argument('--dump', dest='dump', action='store',\n default=None,\n help='Path of file to read probe data from.')\n\n parser.add_argument('--output', dest='outputPath', action='store',\n default=None,\n help='Path of directory to write generated pages to.')\n\n args = parser.parse_args()\n\n pairs = []\n for name,value in loadConf().iteritems():\n if isinstance(value, bool):\n if value:\n pairs +=[\"--%s\" % name]\n else:\n pairs +=[\"--%s\" % name, str(value)]\n\n # Parser requires action parameter, so restate the one we already have\n pairs.append(args.action)\n\n # Pass the json config dictionary through parser so the \"dest\" mappings are honored\n # This allows me to reuse argparser.\n args2 = parser.parse_known_args(pairs)\n for name,value in vars(args2[0]).iteritems():\n if value != parser.get_default(name):\n setattr(args, name, value)\n\n return args\n\n# **************************************************************************************************\n\ndef make(configurations, testInfo, metricInfo):\n from signal import signal, SIGTERM\n signal(SIGTERM, lambda num, frame: sys.exit(SIGTERM))\n\n out = JSONOutput(sys.stdout)\n\n args = parseArguments()\n args.tests = testInfo\n args.metrics = metricInfo\n\n if args.action == \"test\":\n makeTests(out, args)\n elif args.action == \"testCatalog\":\n makeTestCatalog(out, args)\n elif args.action == \"testCommands\":\n makeTestCommands(out, args)\n elif args.action == \"testPages\":\n makeTestPages(out, args)\n elif args.action == \"analyze\":\n makeAnalysis(out, args)\n elif args.action == \"metricsCatalog\":\n makeMetricsCatalog(out, args)\n else:\n makeProjects(out, args.action, args, configurations)\n\ndef makeProjects(out, action, args, configurations):\n if not args.buildPath:\n sys.stderr.write(\"Required 'build' argument is missing.\\n\")\n sys.exit(1)\n\n for name,value in vars(args).iteritems():\n setattr(Project, name, value)\n\n basicConfig = configurations.get(Project.configName)\n if not basicConfig:\n raise Exception, \"Configuration %s not found\" % args.config\n\n for name,value in basicConfig.iteritems():\n setattr(Project, name, value)\n\n c1 = time.time()\n\n Project.initWithConfig()\n\n for project in projects:\n project.fn(project)\n project.normalize()\n\n result = 0\n\n for project in projects:\n result = project.make(action, out)\n if result != 0:\n break\n\n c2 = time.time()\n\n if out.commandCount:\n message = \"Finished in %.2f seconds. \" % (c2-c1)\n if out.errorCount:\n message += \"%d errors. \" % out.errorCount\n if out.warningCount:\n message += \"%d warnings. \" % out.warningCount\n\n out << summary(message)\n else:\n out << summary(\"Nothing to do.\")\n\n sys.exit(result)\n\ndef makeTests(out, args):\n from .test import runTests\n\n modulesPath, moduleNames = args.tests\n errors = False\n for moduleName in moduleNames:\n testModule = importLocalModule(moduleName, modulesPath)\n if runTests(testModule, out, args.test, runAll=not args.test, buildOnly=args.buildOnly):\n errors = True\n\n sys.exit(1 if errors else 0)\n\ndef makeTestCommands(out, args):\n from .test import runTests\n\n modulesPath, moduleNames = args.tests\n errors = False\n for moduleName in moduleNames:\n testModule = importLocalModule(moduleName, modulesPath)\n if runTests(testModule, out, args.test, runAll=not args.test, buildOnly=args.buildOnly):\n errors = True\n\n sys.exit(1 if errors else 0)\n\ndef makeTestPages(out, args):\n from .test import renderTestPages\n\n modulesPath, moduleNames = args.tests\n errors = False\n for moduleName in moduleNames:\n testModule = importLocalModule(moduleName, modulesPath)\n if renderTestPages(testModule, args.outputPath):\n errors = True\n\n sys.exit(1 if errors else 0)\n\ndef makeTestCatalog(out, args):\n from .test import makeTestCatalog\n\n catalogs = []\n\n modulesPath, moduleNames = args.tests\n for moduleName in moduleNames:\n testModule = importLocalModule(moduleName, modulesPath)\n catalog = makeTestCatalog(testModule)\n catalogs += catalog\n\n print '{\"tests\": %s}' % json.dumps(catalogs)\n\ndef makeAnalysis(out, args):\n from .metric import Analyst\n\n if not args.analyst:\n raise Exception(\"Analyst name not specified\")\n\n if not args.dump:\n raise Exception(\"Probes file not specified\")\n\n modules = importModules(*args.metrics)\n analystClass = Analyst.scanForAnalyst(modules, args.analyst)\n if not analystClass:\n raise Exception(\"Analyst '%s' not found\" % args.analyst)\n\n try:\n analyst = analystClass()\n analyst.analyze(args.dump)\n except Exception,exc:\n print \"ERROR: %s\" % exc\n raise\n\ndef makeMetricsCatalog(out, args):\n from .metric import Analyst\n\n modules = importModules(*args.metrics)\n catalog = Analyst.scanCatalog(modules)\n print json.dumps(catalog)\n\n# **************************************************************************************************\n\ndef loadConf():\n confPath = os.environ.get(\"UPCONF\")\n if confPath and os.path.isfile(confPath):\n f = file(confPath)\n data = f.read()\n f.close()\n return json.loads(data)\n else:\n return {}\n\ndef importModules(modulesPath, moduleNames):\n for moduleName in moduleNames:\n yield importLocalModule(moduleName, modulesPath)\n\n# **************************************************************************************************\n\nclass JSONOutput(object):\n def __init__(self, stdout=sys.stdout):\n self.writer = JSONWriter(stdout)\n self.commandCount = 0\n self.errorCount = 0\n self.warningCount = 0\n\n def __lshift__(self, message):\n if isinstance(message, Message):\n message.affect(self)\n self.writer.write(message.getJSON())\n else:\n self.writer.write({\"type\": \"text\", \"text\": str(message)})\n\n return self\n\n def write(self, text):\n self << text\n\n# **************************************************************************************************\n\nclass TextOutput(object):\n def __init__(self, stdout=sys.stdout):\n self.stdout = stdout\n self.commandCount = 0\n self.errorCount = 0\n self.warningCount = 0\n\n def __lshift__(self, message):\n self.stdout.write(str(message)+ '\\n')\n return self\n\n def write(self, text):\n self.stdout.write(str(text) + '\\n')\n" }, { "alpha_fraction": 0.7111448049545288, "alphanum_fraction": 0.71266108751297, "avg_line_length": 31.950000762939453, "blob_id": "f1b38529a937fa7ee1c9a3016a48ee6d6d575515", "content_id": "84680274b98be9209ee22b83d30de4926c427f16", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1319, "license_type": "permissive", "max_line_length": 100, "num_lines": 40, "path": "/src/vm/include/UpString.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPSTRING_H\n#define UP_UPSTRING_H\n\n#include \"Up/UpObject.h\"\n\nstruct UpString {\n UpObject __base;\n char* value;\n int length;\n UpHash hash;\n};\n\n// *************************************************************************************************\n\nUpString* UpStringCreate(const char* value);\nUpString* UpStringCreateWithLength(const char* value, int length);\nUpString* UpStringFormat(const char* value, ...);\nUpString* UpStringCreateWithConcatenation(const char* left, int len1, const char* right, int len2);\nUpString* UpStringCreateWithCode(char code);\n\nvoid UpStringInit(UpString* self, const char* value);\n\nUpObject* UpStringToNumber(UpString* self, bool validate);\nUpString* UpStringToLowercase(UpString* self);\nUpString* UpStringToUppercase(UpString* self);\n\nbool UpStringIsEmpty(UpString* self);\nint UpStringGetLength(UpString* self);\nUpHash UpStringHash(UpString* self);\n\nUpObject* UpStringConcat(UpString* self, UpObject* other);\nbool UpStringEquals(UpString* self, UpObject* other);\n\nUpString* UpStringIndex(UpString* self, int index);\nUpString* UpStringSubstring(UpString* self, int begin, int end);\n\nint UpStringFind(UpString* self, char* findString, int startIndex);\nUpString* UpStringReplace(UpString* self, char* findString, char* replaceString, bool global);\n\n#endif // UP_UPSTRING_H\n" }, { "alpha_fraction": 0.6751893758773804, "alphanum_fraction": 0.6751893758773804, "avg_line_length": 28.30555534362793, "blob_id": "0a53c5fa8fe1e2f569aec899b6de8bce24186d4f", "content_id": "a866c7bed37e0a55693d94dcd639151ce4a1e922", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1056, "license_type": "permissive", "max_line_length": 100, "num_lines": 36, "path": "/src/vm/UpArray.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPARRAY_H\n#define UP_UPARRAY_H\n\n#include \"Up/UpGlobal.h\"\n\nstruct UpArray {\n UpArena* heap;\n size_t size;\n char* items;\n unsigned int itemCount;\n unsigned int itemsAllocated;\n};\n\n// *************************************************************************************************\n\nUpArray* UpArrayCreate(UpArena* heap, size_t size);\n\nunsigned int UpArrayCount(UpArray* self);\nvoid* UpArrayBuffer(UpArray* self);\nvoid UpArrayCopyBuffer(UpArray* self, void** buffer, unsigned int* outCount);\n\nbool UpArrayGet(UpArray* self, UpIndex index, void* out);\nvoid UpArrayPop(UpArray* self, void* out);\nvoid UpArrayShift(UpArray* self, void* out);\n\nUpIndex UpArrayFind(UpArray* self, void* value);\n\nvoid UpArraySet(UpArray* self, UpIndex index, void* value);\nUpIndex UpArrayAppend(UpArray* self, void* value);\nvoid UpArrayInsert(UpArray* self, UpIndex index, void* value);\n\nbool UpArrayRemove(UpArray* self, void* value);\nbool UpArrayRemoveAtIndex(UpArray* self, UpIndex index);\nvoid UpArrayRemoveAll(UpArray* self);\n\n#endif // UP_UPARRAY_H\n" }, { "alpha_fraction": 0.5718740224838257, "alphanum_fraction": 0.5766552090644836, "avg_line_length": 32.87323760986328, "blob_id": "2335c09545ffe8a235ddf6f48cbd47d708894f57", "content_id": "ec0cf5aa2c60e9469d9990b3f3514357e7c559f5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 9621, "license_type": "permissive", "max_line_length": 99, "num_lines": 284, "path": "/src/vm/UpInteger.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpLong.h\"\n#include \"Up/UpFloat.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nstatic char* _EncodeInteger(uint32_t n, uint8_t base, bool pad, char* buf, int bufsize) {\n static char* bases = \"0123456789abcdefghijklmnopqrstuvwxyz\";\n\n char* p = buf + bufsize -1;\n *p-- = '\\0';\n\n while (n) {\n int x = n % base;\n *p-- = bases[x];\n n /= base;\n }\n\n if (pad) {\n return p;\n } else {\n return p+1;\n }\n}\n\nstatic char* _ToString(UpInteger* self, int base, char* buf, int bufsize) {\n if (base == 10 || base == 8 || base == 16) {\n if (self->unit) {\n sprintf(buf, base == 10 ? \"%i%s\" : (base == 8 ? \"%o%s\" : \"%x%s\"), self->value,\n UpGetSymbolName(self->unit));\n } else {\n sprintf(buf, base == 10 ? \"%d\" : (base == 8 ? \"%o\" : \"%x\"), self->value);\n }\n return buf;\n } else {\n static char buf2[sizeof(self->value)*8+1];\n char* encoded = _EncodeInteger(self->value, base, false, buf2, sizeof(buf2));\n if (self->unit) {\n sprintf(buf, \"%s%s\", encoded, UpGetSymbolName(self->unit));\n return buf;\n } else {\n return encoded;\n }\n }\n}\n\n// ************************************************************************************************\n\nUpInteger* UpIntegerCreate(int value) {\n return UpIntegerCreateWithUnit(value, UpNullSymbol);\n}\n\nUpInteger* UpIntegerCreateWithUnit(int value, UpSymbol unit) {\n if (!unit && value >= UpSharedIntegerMin && value <= UpSharedIntegerMax) {\n UpInteger** shared = UpGetSharedIntegers();\n if (shared) {\n return shared[value-UpSharedIntegerMin];\n }\n }\n\n UpClass* cls = UpGetBuiltinClasses()->integerClass;\n UpInteger* self = (UpInteger*)UpClassInstantiate(cls);\n self->value = value;\n self->unit = unit;\n \n COUNT_INTEGER(self->value);\n return self;\n}\n\nvoid UpIntegerInit(UpInteger* self, const char* value, int base, const char* unit) {\n self->value = strtol(value, NULL, base);\n if (unit) {\n self->unit = UpGetSymbol(unit);\n }\n}\n\nconst char* UpIntegerToCString(UpInteger* self, int base) {\n static char buf[sizeof(self->value)*8+1];\n return _ToString(self, base, buf, sizeof(buf));\n}\n\nUpString* UpIntegerToString(UpInteger* self, int base) {\n if (base > 36) {\n UpSetError(\"Base exceeds maximum of 36 for string conversion\");\n return NULL;\n }\n\n char buf[sizeof(self->value)*8+1];\n char* encoded = _ToString(self, base, buf, sizeof(buf));\n return UpStringCreate(encoded);\n}\n\nbool UpIntegerEquals(UpInteger* self, UpObject* other) {\n int result;\n if (UpAsInt(other, &result)) {\n return self->value == result;\n } else {\n return false;\n }\n}\n\nbool UpIntegerGreaterThan(UpInteger* self, UpObject* other) {\n int result;\n if (UpAsInt(other, &result)) {\n return self->value > result;\n } else {\n return false;\n }\n}\n\nbool UpIntegerGreaterThanEquals(UpInteger* self, UpObject* other) {\n int result;\n if (UpAsInt(other, &result)) {\n return self->value >= result;\n } else {\n return false;\n }\n}\n\nbool UpIntegerLessThan(UpInteger* self, UpObject* other) {\n int result;\n if (UpAsInt(other, &result)) {\n return self->value < result;\n } else {\n return false;\n }\n}\n\nbool UpIntegerLessThanEquals(UpInteger* self, UpObject* other) {\n int result;\n if (UpAsInt(other, &result)) {\n return self->value <= result;\n } else {\n return false;\n }\n}\n\nUpObject* UpIntegerAdd(UpInteger* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpIntegerCreateWithUnit(self->value + n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value + n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value + n->value, unit);\n } else {\n UpSetError(\"Invalid operand for +\");\n return NULL;\n }\n}\n\nUpObject* UpIntegerSubtract(UpInteger* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpIntegerCreateWithUnit(self->value - n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value - n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value - n->value, unit);\n } else {\n UpSetError(\"Invalid operand for -\");\n return NULL;\n }\n}\n\nUpObject* UpIntegerMultiply(UpInteger* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpIntegerCreateWithUnit(self->value * n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value * n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value * n->value, unit);\n } else {\n UpSetError(\"Invalid operand for *\");\n return NULL;\n }\n}\n\nUpObject* UpIntegerDivide(UpInteger* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpIntegerCreateWithUnit(self->value / n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value / n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value / n->value, unit);\n } else {\n UpSetError(\"Invalid operand for /\");\n return NULL;\n }\n}\n\nUpObject* UpIntegerMod(UpInteger* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpIntegerCreateWithUnit(self->value % n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value % n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(fmod(self->value, n->value), unit);\n } else {\n UpSetError(\"%s\", \"Invalid operand for //\");\n return NULL;\n }\n}\n\nUpObject* UpIntegerPow(UpInteger* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpIntegerCreateWithUnit(pow(self->value, n->value), unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(pow(self->value, n->value), unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(pow(self->value, n->value), unit);\n } else {\n UpSetError(\"Invalid operand for **\");\n return NULL;\n }\n}\n\nUpObject* UpIntegerNegate(UpInteger* self) {\n return (UpObject*)UpIntegerCreateWithUnit(-self->value, self->unit);\n}\n" }, { "alpha_fraction": 0.5129403471946716, "alphanum_fraction": 0.5152168869972229, "avg_line_length": 28.280702590942383, "blob_id": "f7119783ba19a513317e9f4b8343b37ad178a420", "content_id": "8df29b1c700ec8ecc028ab893159241fe0a673cc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8346, "license_type": "permissive", "max_line_length": 100, "num_lines": 285, "path": "/metrics/benchmarks.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nfrom make.metric import *\nfrom pandas import Series, DataFrame, MultiIndex\n\ndef formatName(name):\n parts = [name for name in name.split(\"|\") if name]\n if len(parts) > 1:\n return parts[-1], \":\".join(parts[0:-1])\n else:\n return name, name\n\n# **************************************************************************************************\n\n@probe('parse')\n@probe('compile')\n@probe('run')\nclass Phases(ProbeAnalyst):\n repeat = True\n\n def _group(self, phases):\n return phases.groupby(lambda index: index.split(':')[0])\n\n @column('PHASE', index=True)\n @column('COUNT', 'scalar')\n @column('MEAN', 'ms')\n @column('TOTAL', 'ms', sortDescending)\n @table\n def Phases(self):\n rows = []\n for prefix in ('parse', 'compile', 'run'):\n for name,callTimes in self.times[prefix].iteritems():\n s = Series(callTimes)\n callCount = s.count()\n meanTime = s.mean()\n totalTime = s.sum()\n rows.append((\"%s:%s\" % (prefix,name),callCount,meanTime,totalTime))\n\n columns = ('PHASE', 'COUNT', 'MEAN', 'TOTAL')\n return DataFrame.from_records(rows, columns=columns, index=('PHASE'))\n\n @metric(format='ms')\n def parserTime(self, Phases, **tables):\n return self._group(Phases).sum().ix['parse']['TOTAL']\n\n @metric(format='ms')\n def compilerTime(self, Phases, **tables):\n return self._group(Phases).sum().ix['compile']['TOTAL']\n\n @metric(format='ms')\n def runTime(self, Phases, **tables):\n return self._group(Phases).sum().ix['run']['TOTAL']\n\n# **************************************************************************************************\n\n@probe('call')\nclass Calls(ProbeAnalyst):\n repeat = True\n\n @column('FUNCTION', index=True)\n @column('SOURCE', index=True)\n @column('COUNT', 'scalar')\n @column('MEAN', 'ms')\n @column('TOTAL', 'ms', sortDescending)\n @table\n def Calls(self):\n rows = []\n for name,callTimes in self.times['call'].iteritems():\n s = Series(callTimes)\n func,loc = formatName(name)\n callCount = s.count()\n meanTime = s.mean()\n totalTime = s.sum()\n rows.append((func,loc,callCount,meanTime,totalTime))\n\n columns = ('FUNCTION', 'SOURCE', 'COUNT', 'MEAN', 'TOTAL')\n return DataFrame.from_records(rows, columns=columns, index=('FUNCTION', 'SOURCE'))\n\n @metric(format='scalar')\n def calls(self, Calls, **tables):\n return Calls['COUNT'].sum()\n\n# **************************************************************************************************\n\n@probe('get')\n@probe('set')\n@probe('delete')\nclass Properties(ProbeAnalyst):\n def _sum(self, probe):\n rows = []\n for (name,loc),count in self.counts.get(probe, {}).iteritems():\n rows += [(name,loc,1) for data in count]\n\n if not rows:\n rows = [('NONE', 'NONE', 0)]\n\n index = ('PROPERTY', 'SOURCE')\n columns = ('PROPERTY', 'SOURCE', 'COUNT')\n df = DataFrame.from_records(rows, columns=columns, index=index)\n grouped = df.groupby(level=index).sum()\n return grouped\n\n @column('PROPERTY', index=True)\n @column('SOURCE', index=True)\n @column('COUNT', 'scalar', sortDescending)\n @table\n def PropertyGets(self):\n return self._sum('get')\n\n @column('PROPERTY', index=True)\n @column('COUNT', 'scalar', sortDescending)\n @table\n def PropertySets(self):\n return self._sum('set')\n\n @column('PROPERTY', index=True)\n @column('COUNT', 'scalar', sortDescending)\n @table\n def PropertyDeletes(self):\n return self._sum('delete')\n\n @metric(format='scalar')\n def propertyGets(self, PropertyGets, **tables):\n return PropertyGets['COUNT'].sum()\n\n @metric(format='scalar')\n def propertySets(self, PropertySets, **tables):\n return PropertySets['COUNT'].sum()\n\n @metric(format='scalar')\n def propertyDeletes(self, PropertyDeletes, **tables):\n return PropertyDeletes['COUNT'].sum()\n\n# **************************************************************************************************\n\n@probe('string')\nclass Strings(ProbeAnalyst):\n @column('STRING', index=True)\n @column('COUNT', 'scalar', sortDescending)\n @table\n def Strings(self):\n rows = []\n for probeName,counts in self.counts.iteritems():\n for probeId,count in counts.iteritems():\n rows.append((probeId[0:25], len(count)))\n\n columns = ('STRING', 'COUNT')\n return DataFrame.from_records(rows, columns=columns, index=('STRING'))\n\n @metric(format='scalar')\n def strings(self, Strings, **tables):\n return Strings['COUNT'].sum()\n\n# **************************************************************************************************\n\n@probe('number')\nclass Numbers(ProbeAnalyst):\n @column('NUMBER', index=True)\n @column('COUNT', 'scalar', sortDescending)\n @table\n def Numbers(self):\n rows = []\n for probeName,counts in self.counts.iteritems():\n for probeId,count in counts.iteritems():\n if probeId == 'float':\n convert = lambda n: str(float(n))\n else:\n convert = str\n\n rows += [(probeId, convert(data), 1) for data in count]\n\n def fn(index):\n return \": \".join(index)\n columns = ('TYPE', 'NUMBER', 'COUNT')\n\n df = DataFrame.from_records(rows, columns=columns, index=('TYPE', 'NUMBER'))\n df = df.groupby(fn).sum()\n df.index.name = 'NUMBER'\n return df\n\n @metric(format='scalar')\n def numbers(self, Numbers, **tables):\n return Numbers['COUNT'].sum()\n\n# **************************************************************************************************\n\nopNames = [\n \"Null\",\n \"Pop\",\n \"Jump\",\n \"JumpIf\",\n \"JumpIfNot\",\n \"JumpIfDefined\",\n \"JumpIfNotDefined\",\n \"LoadObject\",\n \"LoadInteger\",\n \"LoadLong\",\n \"LoadFloat\",\n \"GetLocal\",\n \"SetLocal\",\n \"DeleteLocal\",\n \"SetArgument\",\n \"GetProperty\",\n \"LookupProperty\",\n \"SetProperty\",\n \"ValidateProperty\",\n \"DeleteProperty\",\n \"CallProperty\",\n \"CallOperator\",\n \"CallOperator2\",\n \"Is\",\n \"IsDefined\",\n \"Import\",\n \"SetImport\",\n \"Call\",\n \"Return\",\n \"Yield\",\n \"Throw\",\n \"NewList\",\n \"NewMap\",\n \"NewClass\",\n \"BindObject\",\n \"NewFunction\",\n \"NewCFunction\",\n \"CallIterator\",\n \"JumpIfNoYield\",\n \"JumpBreak\",\n \"ResumeIterator\",\n \"BeginTry\",\n \"EndTry\",\n \"Catch\",\n \"Finally\",\n]\n\n@probe('instruction')\nclass Instructions(ProbeAnalyst):\n @column('INSTRUCTION', index=True)\n @column('COUNT', 'scalar', sortDescending)\n @table\n def Instructions(self):\n rows = []\n for probeName,counts in self.counts.iteritems():\n for probeId,count in counts.iteritems():\n rows += [(opNames[int(data)], 1) for data in count]\n\n def fn(index):\n return index\n columns = ('INSTRUCTION', 'COUNT')\n df = DataFrame.from_records(rows, columns=columns, index=('INSTRUCTION')).groupby(fn).sum()\n df.index.name = 'INSTRUCTION'\n return df\n\n @metric(format='scalar')\n def instructions(self, Instructions, **tables):\n return Instructions['COUNT'].sum()\n\n# **************************************************************************************************\n\n@probe('call')\n@probe('number')\nclass StackTrace(ProbeAnalyst):\n def breaker(self, probeType, entry):\n if probeType == 3:\n _, numberType, n = entry\n n = abs(n)\n if numberType == 'float' and n > 0.89 and n < 0.9:\n return 'call'\n\n def stackTrace(self, stack):\n print \"Stack trace:\"\n for name in stack:\n func,loc = formatName(name)\n if not loc:\n print \"* %s()\" % (func)\n else:\n print \"* %s() in %s\" % (func, loc)\n\n# **************************************************************************************************\n\n@probe('compilerNode')\n@probe('compilerFrame')\n@probe('compilerClass')\n@probe('compilerOp')\nclass OtherProbes(ProbeAnalyst):\n @table\n def NothingToSeeHere(self):\n pass\n" }, { "alpha_fraction": 0.4743458330631256, "alphanum_fraction": 0.5876525640487671, "avg_line_length": 29.359455108642578, "blob_id": "dca64d5dbea9e35aae95def822df509e57c153a6", "content_id": "a385b489d597d6567c5def99a1093669a3f49d6b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 95696, "license_type": "permissive", "max_line_length": 126, "num_lines": 3152, "path": "/src/vm/Up.yy.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#line 2 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.yy.c\"\n\n#line 4 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.yy.c\"\n\n#define YY_INT_ALIGNED short int\n\n/* A lexical scanner generated by flex */\n\n#define FLEX_SCANNER\n#define YY_FLEX_MAJOR_VERSION 2\n#define YY_FLEX_MINOR_VERSION 5\n#define YY_FLEX_SUBMINOR_VERSION 35\n#if YY_FLEX_SUBMINOR_VERSION > 0\n#define FLEX_BETA\n#endif\n\n/* First, we deal with platform-specific or compiler-specific issues. */\n\n/* begin standard C headers. */\n#include <stdio.h>\n#include <string.h>\n#include <errno.h>\n#include <stdlib.h>\n\n/* end standard C headers. */\n\n/* flex integer type definitions */\n\n#ifndef FLEXINT_H\n#define FLEXINT_H\n\n/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */\n\n#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L\n\n/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,\n * if you want the limit (max/min) macros for int types. \n */\n#ifndef __STDC_LIMIT_MACROS\n#define __STDC_LIMIT_MACROS 1\n#endif\n\n#include <inttypes.h>\ntypedef int8_t flex_int8_t;\ntypedef uint8_t flex_uint8_t;\ntypedef int16_t flex_int16_t;\ntypedef uint16_t flex_uint16_t;\ntypedef int32_t flex_int32_t;\ntypedef uint32_t flex_uint32_t;\ntypedef uint64_t flex_uint64_t;\n#else\ntypedef signed char flex_int8_t;\ntypedef short int flex_int16_t;\ntypedef int flex_int32_t;\ntypedef unsigned char flex_uint8_t; \ntypedef unsigned short int flex_uint16_t;\ntypedef unsigned int flex_uint32_t;\n#endif /* ! C99 */\n\n/* Limits of integral types. */\n#ifndef INT8_MIN\n#define INT8_MIN (-128)\n#endif\n#ifndef INT16_MIN\n#define INT16_MIN (-32767-1)\n#endif\n#ifndef INT32_MIN\n#define INT32_MIN (-2147483647-1)\n#endif\n#ifndef INT8_MAX\n#define INT8_MAX (127)\n#endif\n#ifndef INT16_MAX\n#define INT16_MAX (32767)\n#endif\n#ifndef INT32_MAX\n#define INT32_MAX (2147483647)\n#endif\n#ifndef UINT8_MAX\n#define UINT8_MAX (255U)\n#endif\n#ifndef UINT16_MAX\n#define UINT16_MAX (65535U)\n#endif\n#ifndef UINT32_MAX\n#define UINT32_MAX (4294967295U)\n#endif\n\n#endif /* ! FLEXINT_H */\n\n#ifdef __cplusplus\n\n/* The \"const\" storage-class-modifier is valid. */\n#define YY_USE_CONST\n\n#else\t/* ! __cplusplus */\n\n/* C99 requires __STDC__ to be defined as 1. */\n#if defined (__STDC__)\n\n#define YY_USE_CONST\n\n#endif\t/* defined (__STDC__) */\n#endif\t/* ! __cplusplus */\n\n#ifdef YY_USE_CONST\n#define yyconst const\n#else\n#define yyconst\n#endif\n\n/* Returned upon end-of-file. */\n#define YY_NULL 0\n\n/* Promotes a possibly negative, possibly signed char to an unsigned\n * integer for use as an array index. If the signed char is negative,\n * we want to instead treat it as an 8-bit unsigned char, hence the\n * double cast.\n */\n#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c)\n\n/* An opaque pointer. */\n#ifndef YY_TYPEDEF_YY_SCANNER_T\n#define YY_TYPEDEF_YY_SCANNER_T\ntypedef void* yyscan_t;\n#endif\n\n/* For convenience, these vars (plus the bison vars far below)\n are macros in the reentrant scanner. */\n#define yyin yyg->yyin_r\n#define yyout yyg->yyout_r\n#define yyextra yyg->yyextra_r\n#define yyleng yyg->yyleng_r\n#define yytext yyg->yytext_r\n#define yylineno (YY_CURRENT_BUFFER_LVALUE->yy_bs_lineno)\n#define yycolumn (YY_CURRENT_BUFFER_LVALUE->yy_bs_column)\n#define yy_flex_debug yyg->yy_flex_debug_r\n\n/* Enter a start condition. This macro really ought to take a parameter,\n * but we do it the disgusting crufty way forced on us by the ()-less\n * definition of BEGIN.\n */\n#define BEGIN yyg->yy_start = 1 + 2 *\n\n/* Translate the current start state into a value that can be later handed\n * to BEGIN to return to the state. The YYSTATE alias is for lex\n * compatibility.\n */\n#define YY_START ((yyg->yy_start - 1) / 2)\n#define YYSTATE YY_START\n\n/* Action number for EOF rule of a given start state. */\n#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)\n\n/* Special action meaning \"start processing a new file\". */\n#define YY_NEW_FILE Uprestart(yyin ,yyscanner )\n\n#define YY_END_OF_BUFFER_CHAR 0\n\n/* Size of default input buffer. */\n#ifndef YY_BUF_SIZE\n#define YY_BUF_SIZE 16384\n#endif\n\n/* The state buf must be large enough to hold one state per character in the main buffer.\n */\n#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))\n\n#ifndef YY_TYPEDEF_YY_BUFFER_STATE\n#define YY_TYPEDEF_YY_BUFFER_STATE\ntypedef struct yy_buffer_state *YY_BUFFER_STATE;\n#endif\n\n#ifndef YY_TYPEDEF_YY_SIZE_T\n#define YY_TYPEDEF_YY_SIZE_T\ntypedef size_t yy_size_t;\n#endif\n\n#define EOB_ACT_CONTINUE_SCAN 0\n#define EOB_ACT_END_OF_FILE 1\n#define EOB_ACT_LAST_MATCH 2\n\n #define YY_LESS_LINENO(n)\n \n/* Return all but the first \"n\" matched characters back to the input stream. */\n#define yyless(n) \\\n\tdo \\\n\t\t{ \\\n\t\t/* Undo effects of setting up yytext. */ \\\n int yyless_macro_arg = (n); \\\n YY_LESS_LINENO(yyless_macro_arg);\\\n\t\t*yy_cp = yyg->yy_hold_char; \\\n\t\tYY_RESTORE_YY_MORE_OFFSET \\\n\t\tyyg->yy_c_buf_p = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \\\n\t\tYY_DO_BEFORE_ACTION; /* set up yytext again */ \\\n\t\t} \\\n\twhile ( 0 )\n\n#define unput(c) yyunput( c, yyg->yytext_ptr , yyscanner )\n\n#ifndef YY_STRUCT_YY_BUFFER_STATE\n#define YY_STRUCT_YY_BUFFER_STATE\nstruct yy_buffer_state\n\t{\n\tFILE *yy_input_file;\n\n\tchar *yy_ch_buf;\t\t/* input buffer */\n\tchar *yy_buf_pos;\t\t/* current position in input buffer */\n\n\t/* Size of input buffer in bytes, not including room for EOB\n\t * characters.\n\t */\n\tyy_size_t yy_buf_size;\n\n\t/* Number of characters read into yy_ch_buf, not including EOB\n\t * characters.\n\t */\n\tyy_size_t yy_n_chars;\n\n\t/* Whether we \"own\" the buffer - i.e., we know we created it,\n\t * and can realloc() it to grow it, and should free() it to\n\t * delete it.\n\t */\n\tint yy_is_our_buffer;\n\n\t/* Whether this is an \"interactive\" input source; if so, and\n\t * if we're using stdio for input, then we want to use getc()\n\t * instead of fread(), to make sure we stop fetching input after\n\t * each newline.\n\t */\n\tint yy_is_interactive;\n\n\t/* Whether we're considered to be at the beginning of a line.\n\t * If so, '^' rules will be active on the next match, otherwise\n\t * not.\n\t */\n\tint yy_at_bol;\n\n int yy_bs_lineno; /**< The line count. */\n int yy_bs_column; /**< The column count. */\n \n\t/* Whether to try to fill the input buffer when we reach the\n\t * end of it.\n\t */\n\tint yy_fill_buffer;\n\n\tint yy_buffer_status;\n\n#define YY_BUFFER_NEW 0\n#define YY_BUFFER_NORMAL 1\n\t/* When an EOF's been seen but there's still some text to process\n\t * then we mark the buffer as YY_EOF_PENDING, to indicate that we\n\t * shouldn't try reading from the input source any more. We might\n\t * still have a bunch of tokens to match, though, because of\n\t * possible backing-up.\n\t *\n\t * When we actually see the EOF, we change the status to \"new\"\n\t * (via Uprestart()), so that the user can continue scanning by\n\t * just pointing yyin at a new input file.\n\t */\n#define YY_BUFFER_EOF_PENDING 2\n\n\t};\n#endif /* !YY_STRUCT_YY_BUFFER_STATE */\n\n/* We provide macros for accessing buffer states in case in the\n * future we want to put the buffer states in a more general\n * \"scanner state\".\n *\n * Returns the top of the stack, or NULL.\n */\n#define YY_CURRENT_BUFFER ( yyg->yy_buffer_stack \\\n ? yyg->yy_buffer_stack[yyg->yy_buffer_stack_top] \\\n : NULL)\n\n/* Same as previous macro, but useful when we know that the buffer stack is not\n * NULL or when we need an lvalue. For internal use only.\n */\n#define YY_CURRENT_BUFFER_LVALUE yyg->yy_buffer_stack[yyg->yy_buffer_stack_top]\n\nvoid Uprestart (FILE *input_file ,yyscan_t yyscanner );\nvoid Up_switch_to_buffer (YY_BUFFER_STATE new_buffer ,yyscan_t yyscanner );\nYY_BUFFER_STATE Up_create_buffer (FILE *file,int size ,yyscan_t yyscanner );\nvoid Up_delete_buffer (YY_BUFFER_STATE b ,yyscan_t yyscanner );\nvoid Up_flush_buffer (YY_BUFFER_STATE b ,yyscan_t yyscanner );\nvoid Uppush_buffer_state (YY_BUFFER_STATE new_buffer ,yyscan_t yyscanner );\nvoid Uppop_buffer_state (yyscan_t yyscanner );\n\nstatic void Upensure_buffer_stack (yyscan_t yyscanner );\nstatic void Up_load_buffer_state (yyscan_t yyscanner );\nstatic void Up_init_buffer (YY_BUFFER_STATE b,FILE *file ,yyscan_t yyscanner );\n\n#define YY_FLUSH_BUFFER Up_flush_buffer(YY_CURRENT_BUFFER ,yyscanner)\n\nYY_BUFFER_STATE Up_scan_buffer (char *base,yy_size_t size ,yyscan_t yyscanner );\nYY_BUFFER_STATE Up_scan_string (yyconst char *yy_str ,yyscan_t yyscanner );\nYY_BUFFER_STATE Up_scan_bytes (yyconst char *bytes,yy_size_t len ,yyscan_t yyscanner );\n\nvoid *Upalloc (yy_size_t ,yyscan_t yyscanner );\nvoid *Uprealloc (void *,yy_size_t ,yyscan_t yyscanner );\nvoid Upfree (void * ,yyscan_t yyscanner );\n\n#define yy_new_buffer Up_create_buffer\n\n#define yy_set_interactive(is_interactive) \\\n\t{ \\\n\tif ( ! YY_CURRENT_BUFFER ){ \\\n Upensure_buffer_stack (yyscanner); \\\n\t\tYY_CURRENT_BUFFER_LVALUE = \\\n Up_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); \\\n\t} \\\n\tYY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \\\n\t}\n\n#define yy_set_bol(at_bol) \\\n\t{ \\\n\tif ( ! YY_CURRENT_BUFFER ){\\\n Upensure_buffer_stack (yyscanner); \\\n\t\tYY_CURRENT_BUFFER_LVALUE = \\\n Up_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); \\\n\t} \\\n\tYY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \\\n\t}\n\n#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)\n\n/* Begin user sect3 */\n\n#define Upwrap(n) 1\n#define YY_SKIP_YYWRAP\n\ntypedef unsigned char YY_CHAR;\n\ntypedef int yy_state_type;\n\n#define yytext_ptr yytext_r\n\nstatic yy_state_type yy_get_previous_state (yyscan_t yyscanner );\nstatic yy_state_type yy_try_NUL_trans (yy_state_type current_state ,yyscan_t yyscanner);\nstatic int yy_get_next_buffer (yyscan_t yyscanner );\nstatic void yy_fatal_error (yyconst char msg[] ,yyscan_t yyscanner );\n\n/* Done after the current pattern has been matched and before the\n * corresponding action - sets up yytext.\n */\n#define YY_DO_BEFORE_ACTION \\\n\tyyg->yytext_ptr = yy_bp; \\\n\tyyleng = (yy_size_t) (yy_cp - yy_bp); \\\n\tyyg->yy_hold_char = *yy_cp; \\\n\t*yy_cp = '\\0'; \\\n\tyyg->yy_c_buf_p = yy_cp;\n\n#define YY_NUM_RULES 133\n#define YY_END_OF_BUFFER 134\n/* This struct is not used in this scanner,\n but its presence is necessary. */\nstruct yy_trans_info\n\t{\n\tflex_int32_t yy_verify;\n\tflex_int32_t yy_nxt;\n\t};\nstatic yyconst flex_int16_t yy_accept[370] =\n { 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 134, 101,\n 1, 88, 88, 79, 101, 73, 98, 75, 24, 25,\n 32, 30, 22, 31, 19, 33, 92, 92, 61, 23,\n 47, 51, 48, 78, 70, 99, 26, 80, 27, 71,\n 72, 3, 97, 97, 97, 97, 97, 97, 97, 97,\n 97, 97, 97, 97, 97, 28, 76, 29, 74, 98,\n 128, 102, 103, 103, 104, 105, 106, 107, 108, 127,\n 126, 126, 126, 126, 126, 126, 126, 126, 126, 126,\n 129, 129, 130, 132, 1, 25, 27, 29, 87, 0,\n 46, 0, 90, 90, 89, 0, 98, 24, 24, 35,\n\n 64, 0, 36, 63, 22, 22, 81, 65, 55, 20,\n 34, 66, 0, 93, 0, 92, 93, 62, 54, 56,\n 49, 45, 52, 50, 57, 0, 99, 0, 26, 26,\n 0, 2, 3, 97, 37, 97, 85, 97, 97, 14,\n 97, 97, 97, 97, 6, 42, 38, 97, 10, 97,\n 97, 83, 97, 97, 28, 28, 77, 0, 102, 127,\n 126, 126, 126, 126, 126, 126, 126, 126, 126, 126,\n 126, 126, 129, 0, 0, 131, 0, 24, 68, 60,\n 69, 0, 21, 67, 0, 0, 93, 93, 94, 92,\n 93, 91, 91, 91, 58, 0, 53, 59, 0, 26,\n\n 97, 97, 97, 97, 97, 9, 40, 0, 97, 0,\n 97, 97, 15, 97, 97, 28, 82, 126, 126, 126,\n 126, 126, 117, 126, 126, 126, 126, 126, 131, 131,\n 131, 0, 0, 96, 96, 91, 91, 0, 0, 97,\n 97, 97, 7, 97, 0, 0, 0, 0, 0, 97,\n 97, 97, 97, 100, 112, 113, 126, 126, 126, 119,\n 126, 126, 126, 111, 131, 131, 0, 95, 0, 12,\n 16, 97, 97, 0, 43, 0, 0, 0, 97, 97,\n 18, 86, 11, 100, 109, 126, 123, 0, 115, 126,\n 126, 0, 0, 97, 97, 0, 39, 44, 0, 8,\n\n 97, 124, 0, 0, 110, 126, 5, 0, 97, 17,\n 0, 8, 84, 0, 0, 126, 0, 0, 0, 13,\n 0, 0, 0, 126, 0, 0, 0, 0, 121, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 41, 125, 0, 0, 0, 0, 0, 0, 0, 118,\n 0, 0, 0, 0, 114, 120, 0, 0, 0, 0,\n 116, 4, 4, 4, 0, 0, 0, 122, 0\n } ;\n\nstatic yyconst flex_int32_t yy_ec[256] =\n { 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,\n 1, 1, 4, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 5, 6, 7, 8, 9, 10, 11, 12, 13,\n 14, 15, 16, 17, 18, 19, 20, 21, 22, 22,\n 22, 22, 22, 22, 22, 22, 22, 23, 24, 25,\n 26, 27, 28, 29, 30, 30, 30, 30, 30, 30,\n 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,\n 31, 31, 31, 31, 32, 31, 31, 31, 31, 31,\n 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,\n\n 43, 44, 45, 46, 47, 48, 49, 50, 48, 51,\n 52, 48, 48, 53, 54, 55, 56, 57, 58, 59,\n 60, 48, 61, 62, 63, 64, 1, 65, 1, 9,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 66, 9, 1, 9, 1, 1, 1, 1, 1,\n 1, 9, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 9, 1, 1, 1, 1, 1, 1,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 67, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1\n } ;\n\nstatic yyconst flex_int32_t yy_meta[68] =\n { 0,\n 1, 1, 2, 2, 1, 1, 3, 1, 4, 5,\n 1, 6, 1, 1, 1, 7, 1, 7, 8, 1,\n 9, 10, 1, 1, 1, 1, 1, 6, 11, 12,\n 13, 14, 1, 6, 1, 1, 15, 1, 16, 16,\n 17, 17, 17, 16, 18, 18, 18, 18, 18, 18,\n 19, 18, 19, 18, 19, 19, 19, 18, 19, 18,\n 1, 1, 6, 1, 1, 4, 4\n } ;\n\nstatic yyconst flex_int16_t yy_base[392] =\n { 0,\n 0, 0, 0, 0, 67, 0, 132, 133, 1029, 1099,\n 135, 142, 148, 1002, 151, 1099, 129, 1099, 169, 1099,\n 133, 149, 176, 164, 1008, 137, 211, 967, 997, 1099,\n 143, 160, 162, 1099, 1099, 177, 197, 205, 1099, 1099,\n 1099, 0, 272, 274, 273, 276, 279, 284, 185, 275,\n 286, 280, 282, 292, 277, 292, 954, 1099, 1099, 307,\n 1099, 192, 1099, 1099, 1099, 1099, 1099, 1099, 1099, 0,\n 0, 961, 246, 960, 961, 953, 948, 264, 929, 924,\n 939, 938, 952, 0, 341, 1099, 1099, 1099, 345, 349,\n 1099, 353, 1099, 358, 1099, 0, 332, 364, 386, 941,\n\n 1099, 939, 938, 1099, 391, 1099, 944, 1099, 1099, 942,\n 934, 1099, 327, 357, 360, 364, 388, 1099, 1099, 934,\n 1099, 931, 929, 1099, 926, 410, 278, 0, 417, 431,\n 435, 1099, 0, 293, 368, 417, 376, 418, 419, 434,\n 435, 436, 438, 442, 443, 445, 449, 446, 448, 474,\n 451, 454, 455, 459, 490, 495, 1099, 884, 480, 0,\n 0, 897, 908, 894, 888, 891, 887, 889, 887, 883,\n 876, 882, 894, 0, 0, 488, 509, 503, 1099, 1099,\n 1099, 909, 1099, 1099, 447, 272, 182, 506, 491, 504,\n 514, 534, 516, 524, 1099, 900, 1099, 1099, 918, 546,\n\n 514, 542, 532, 551, 553, 555, 559, 563, 584, 588,\n 572, 574, 578, 588, 589, 595, 0, 874, 855, 845,\n 855, 849, 0, 839, 824, 814, 822, 826, 602, 606,\n 0, 849, 380, 394, 590, 621, 302, 840, 840, 623,\n 628, 637, 638, 639, 783, 783, 781, 600, 782, 641,\n 642, 647, 648, 0, 0, 0, 776, 780, 734, 783,\n 731, 743, 737, 0, 675, 0, 762, 548, 752, 646,\n 649, 651, 672, 723, 1099, 721, 724, 720, 674, 675,\n 679, 680, 681, 0, 0, 730, 0, 491, 0, 717,\n 720, 708, 689, 692, 706, 754, 1099, 1099, 726, 707,\n\n 709, 0, 700, 698, 0, 701, 1099, 595, 711, 712,\n 680, 1099, 716, 681, 683, 690, 612, 659, 721, 720,\n 632, 633, 624, 654, 624, 725, 588, 579, 1099, 692,\n 593, 727, 554, 561, 508, 501, 498, 485, 503, 753,\n 1099, 1099, 477, 431, 408, 405, 385, 755, 353, 1099,\n 332, 318, 338, 757, 1099, 353, 284, 759, 761, 274,\n 1099, 1099, 167, 764, 124, 113, 96, 1099, 1099, 790,\n 809, 826, 845, 862, 873, 884, 903, 915, 932, 949,\n 965, 973, 977, 989, 1005, 1022, 1026, 1038, 1051, 1060,\n 1079\n\n } ;\n\nstatic yyconst flex_int16_t yy_def[392] =\n { 0,\n 369, 1, 1, 1, 369, 5, 370, 370, 369, 369,\n 369, 369, 369, 369, 371, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369, 369, 27, 369, 369,\n 369, 369, 369, 369, 369, 372, 369, 369, 369, 369,\n 369, 373, 374, 374, 374, 374, 374, 374, 374, 374,\n 374, 374, 374, 374, 374, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 375,\n 376, 376, 376, 376, 376, 376, 376, 376, 376, 376,\n 377, 377, 378, 379, 369, 369, 369, 369, 369, 369,\n 369, 371, 369, 371, 369, 380, 369, 369, 369, 369,\n\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369, 369, 369, 381, 369, 27, 381, 369, 369, 369,\n 369, 369, 369, 369, 369, 371, 372, 382, 369, 369,\n 369, 369, 373, 374, 374, 374, 374, 374, 374, 374,\n 374, 374, 374, 374, 374, 374, 374, 374, 374, 374,\n 374, 374, 374, 374, 369, 369, 369, 369, 369, 375,\n 376, 376, 376, 376, 376, 376, 376, 376, 376, 376,\n 376, 376, 377, 379, 383, 384, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369, 381, 381, 385, 27,\n 381, 369, 117, 117, 369, 369, 369, 369, 386, 369,\n\n 374, 374, 374, 374, 374, 374, 374, 369, 374, 369,\n 374, 374, 374, 374, 374, 369, 387, 376, 376, 376,\n 376, 376, 376, 376, 376, 376, 376, 376, 388, 384,\n 230, 369, 369, 385, 385, 369, 389, 369, 386, 374,\n 374, 374, 374, 374, 369, 369, 369, 369, 369, 374,\n 374, 374, 374, 390, 376, 376, 376, 376, 376, 376,\n 376, 376, 376, 376, 388, 265, 369, 369, 369, 374,\n 374, 374, 374, 369, 369, 369, 369, 369, 374, 374,\n 374, 374, 374, 390, 376, 376, 376, 369, 376, 376,\n 376, 369, 369, 374, 374, 369, 369, 369, 369, 374,\n\n 374, 376, 369, 369, 376, 376, 369, 391, 374, 374,\n 369, 369, 374, 369, 369, 376, 391, 369, 391, 374,\n 369, 369, 369, 376, 369, 391, 369, 369, 369, 369,\n 369, 391, 369, 369, 369, 369, 369, 369, 369, 391,\n 369, 369, 369, 369, 369, 369, 369, 391, 369, 369,\n 369, 369, 369, 391, 369, 369, 369, 369, 391, 369,\n 369, 369, 369, 391, 369, 369, 369, 369, 0, 369,\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369\n\n } ;\n\nstatic yyconst flex_int16_t yy_nxt[1167] =\n { 0,\n 10, 11, 12, 13, 11, 14, 15, 16, 17, 17,\n 18, 10, 19, 20, 21, 22, 23, 24, 25, 26,\n 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,\n 36, 36, 37, 38, 39, 40, 41, 42, 43, 44,\n 45, 46, 47, 48, 49, 50, 51, 49, 49, 49,\n 52, 53, 49, 49, 54, 49, 49, 55, 49, 49,\n 56, 57, 58, 59, 10, 17, 60, 61, 62, 63,\n 64, 62, 61, 61, 61, 61, 61, 61, 61, 65,\n 66, 67, 61, 68, 61, 61, 61, 61, 61, 61,\n 69, 61, 61, 61, 61, 61, 70, 70, 70, 61,\n\n 61, 61, 61, 61, 61, 71, 72, 73, 74, 71,\n 75, 71, 71, 76, 71, 71, 77, 71, 71, 71,\n 78, 71, 79, 80, 71, 71, 71, 61, 61, 61,\n 61, 61, 61, 61, 82, 82, 85, 97, 97, 85,\n 368, 83, 83, 89, 90, 90, 89, 100, 86, 89,\n 90, 90, 89, 93, 94, 86, 111, 95, 101, 102,\n 119, 86, 112, 367, 103, 84, 84, 120, 121, 87,\n 98, 99, 99, 98, 104, 366, 87, 105, 106, 106,\n 105, 107, 87, 126, 96, 122, 123, 124, 125, 108,\n 109, 126, 325, 159, 97, 97, 159, 88, 129, 130,\n\n 130, 129, 369, 369, 88, 128, 131, 132, 132, 131,\n 88, 113, 113, 128, 113, 113, 113, 113, 113, 114,\n 114, 113, 113, 113, 113, 113, 113, 113, 113, 115,\n 113, 116, 116, 113, 113, 113, 113, 113, 113, 113,\n 114, 114, 114, 113, 113, 113, 113, 113, 113, 114,\n 114, 114, 114, 114, 114, 114, 114, 114, 114, 114,\n 114, 114, 114, 114, 114, 114, 114, 114, 114, 117,\n 114, 113, 113, 113, 113, 113, 114, 114, 126, 126,\n 126, 126, 126, 126, 126, 126, 126, 233, 126, 233,\n 126, 163, 126, 155, 156, 156, 155, 164, 126, 126,\n\n 128, 128, 128, 128, 128, 128, 128, 128, 128, 169,\n 128, 138, 128, 144, 128, 97, 97, 233, 170, 233,\n 128, 128, 154, 365, 139, 135, 136, 140, 141, 145,\n 142, 148, 149, 137, 150, 143, 146, 151, 361, 147,\n 97, 97, 85, 152, 153, 85, 89, 185, 185, 89,\n 177, 90, 90, 177, 86, 93, 94, 360, 86, 95,\n 93, 94, 86, 358, 95, 98, 99, 99, 98, 186,\n 357, 158, 97, 97, 126, 87, 356, 185, 185, 87,\n 189, 189, 126, 87, 190, 190, 96, 178, 99, 99,\n 178, 96, 105, 106, 106, 105, 128, 97, 97, 188,\n\n 268, 268, 186, 88, 128, 355, 191, 88, 192, 192,\n 353, 88, 93, 94, 369, 369, 95, 193, 129, 130,\n 130, 129, 114, 126, 126, 126, 193, 193, 193, 193,\n 194, 193, 200, 130, 130, 200, 131, 132, 132, 131,\n 126, 126, 126, 96, 126, 128, 128, 128, 126, 126,\n 208, 126, 126, 208, 126, 126, 352, 126, 351, 201,\n 126, 126, 128, 128, 128, 126, 128, 185, 185, 203,\n 128, 128, 202, 128, 128, 210, 128, 128, 210, 128,\n 126, 159, 128, 128, 159, 350, 205, 128, 204, 186,\n 206, 155, 156, 156, 155, 207, 216, 156, 156, 216,\n\n 209, 214, 128, 212, 178, 215, 175, 178, 230, 230,\n 177, 189, 189, 177, 213, 349, 211, 230, 230, 230,\n 126, 233, 86, 233, 190, 190, 369, 369, 347, 233,\n 346, 233, 303, 235, 185, 185, 236, 236, 126, 233,\n 304, 233, 128, 87, 236, 236, 191, 200, 126, 345,\n 200, 344, 240, 343, 192, 192, 188, 126, 193, 126,\n 128, 126, 114, 236, 208, 126, 193, 208, 268, 268,\n 128, 88, 236, 236, 236, 236, 237, 236, 126, 128,\n 126, 128, 241, 128, 126, 248, 242, 128, 248, 210,\n 126, 244, 210, 243, 126, 126, 216, 318, 319, 216,\n\n 128, 248, 128, 342, 248, 233, 128, 233, 341, 246,\n 369, 369, 128, 247, 318, 319, 128, 128, 339, 245,\n 175, 250, 265, 265, 175, 251, 230, 230, 334, 126,\n 249, 265, 265, 265, 126, 230, 230, 230, 253, 333,\n 252, 236, 236, 126, 126, 126, 277, 126, 126, 331,\n 236, 128, 126, 126, 126, 126, 128, 126, 330, 236,\n 236, 236, 236, 236, 236, 128, 128, 128, 329, 128,\n 128, 270, 328, 271, 128, 128, 128, 128, 126, 128,\n 126, 126, 327, 272, 325, 126, 126, 126, 273, 282,\n 283, 308, 308, 175, 279, 265, 265, 280, 126, 281,\n\n 128, 294, 128, 128, 265, 265, 265, 128, 128, 128,\n 307, 307, 126, 126, 293, 126, 300, 126, 126, 301,\n 128, 295, 126, 318, 319, 292, 126, 318, 319, 318,\n 319, 324, 335, 323, 128, 128, 322, 128, 336, 128,\n 128, 337, 321, 316, 128, 338, 326, 309, 128, 315,\n 332, 314, 340, 320, 313, 318, 319, 318, 319, 318,\n 319, 362, 362, 363, 364, 310, 318, 319, 312, 311,\n 306, 305, 302, 299, 298, 297, 296, 293, 348, 292,\n 354, 291, 359, 290, 358, 289, 359, 288, 287, 326,\n 81, 81, 81, 81, 81, 81, 81, 81, 81, 81,\n\n 81, 81, 81, 81, 81, 81, 81, 81, 81, 92,\n 92, 92, 92, 92, 92, 92, 92, 92, 92, 92,\n 92, 92, 92, 92, 92, 92, 92, 92, 127, 286,\n 285, 278, 276, 275, 127, 127, 127, 127, 127, 127,\n 274, 127, 127, 127, 127, 133, 126, 133, 133, 133,\n 133, 133, 133, 133, 133, 133, 133, 133, 133, 133,\n 133, 133, 133, 133, 134, 269, 267, 264, 263, 262,\n 134, 134, 134, 134, 134, 134, 261, 134, 134, 134,\n 134, 160, 160, 260, 160, 160, 160, 259, 160, 160,\n 160, 160, 161, 161, 258, 161, 161, 161, 257, 161,\n\n 161, 161, 161, 173, 173, 173, 173, 256, 173, 173,\n 173, 173, 173, 173, 173, 173, 173, 173, 173, 173,\n 173, 173, 176, 255, 126, 238, 232, 174, 228, 227,\n 176, 176, 176, 176, 173, 226, 173, 173, 225, 224,\n 173, 223, 222, 221, 220, 173, 219, 173, 218, 217,\n 173, 92, 198, 92, 92, 197, 196, 92, 195, 184,\n 183, 182, 92, 181, 92, 180, 179, 92, 187, 187,\n 175, 174, 174, 187, 187, 172, 187, 187, 187, 171,\n 187, 187, 187, 187, 199, 199, 199, 199, 199, 199,\n 199, 199, 229, 229, 229, 229, 231, 231, 231, 168,\n\n 231, 231, 231, 167, 231, 231, 231, 231, 234, 234,\n 166, 165, 162, 234, 234, 157, 234, 234, 234, 118,\n 234, 234, 234, 234, 239, 114, 110, 91, 369, 369,\n 239, 239, 369, 239, 239, 239, 239, 239, 239, 239,\n 239, 254, 254, 254, 254, 266, 266, 266, 369, 266,\n 266, 266, 369, 266, 266, 266, 266, 236, 369, 236,\n 236, 369, 236, 369, 369, 369, 236, 236, 284, 284,\n 369, 284, 284, 284, 369, 284, 284, 284, 284, 317,\n 317, 317, 317, 317, 317, 317, 317, 317, 317, 317,\n 317, 317, 317, 317, 317, 317, 317, 317, 9, 369,\n\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369\n } ;\n\nstatic yyconst flex_int16_t yy_chk[1167] =\n { 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 5, 5, 5,\n 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,\n 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,\n 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,\n\n 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,\n 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,\n 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,\n 5, 5, 5, 5, 7, 8, 11, 17, 17, 11,\n 367, 7, 8, 12, 12, 12, 12, 21, 11, 13,\n 13, 13, 13, 15, 15, 12, 26, 15, 21, 21,\n 31, 13, 26, 366, 22, 7, 8, 31, 31, 11,\n 19, 19, 19, 19, 22, 365, 12, 23, 23, 23,\n 23, 24, 13, 36, 15, 32, 32, 33, 33, 24,\n 24, 49, 363, 62, 17, 17, 62, 11, 37, 37,\n\n 37, 37, 187, 187, 12, 36, 38, 38, 38, 38,\n 13, 27, 27, 49, 27, 27, 27, 27, 27, 27,\n 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,\n 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,\n 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,\n 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,\n 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,\n 27, 27, 27, 27, 27, 27, 27, 27, 43, 45,\n 44, 50, 46, 55, 127, 47, 52, 186, 53, 186,\n 48, 73, 51, 56, 56, 56, 56, 73, 54, 134,\n\n 43, 45, 44, 50, 46, 55, 127, 47, 52, 78,\n 53, 45, 48, 50, 51, 60, 60, 237, 78, 237,\n 54, 134, 55, 360, 45, 43, 44, 46, 47, 51,\n 48, 52, 53, 44, 53, 48, 51, 54, 357, 51,\n 97, 97, 85, 54, 54, 85, 89, 113, 113, 89,\n 90, 90, 90, 90, 85, 92, 92, 356, 89, 92,\n 94, 94, 90, 353, 94, 98, 98, 98, 98, 113,\n 352, 60, 60, 60, 135, 85, 351, 114, 114, 89,\n 115, 115, 137, 90, 116, 116, 92, 99, 99, 99,\n 99, 94, 105, 105, 105, 105, 135, 97, 97, 114,\n\n 233, 233, 115, 85, 137, 349, 116, 89, 117, 117,\n 347, 90, 126, 126, 234, 234, 126, 117, 129, 129,\n 129, 129, 116, 136, 138, 139, 117, 117, 117, 117,\n 117, 117, 130, 130, 130, 130, 131, 131, 131, 131,\n 140, 141, 142, 126, 143, 136, 138, 139, 144, 145,\n 147, 146, 148, 147, 149, 147, 346, 151, 345, 136,\n 152, 153, 140, 141, 142, 154, 143, 185, 185, 139,\n 144, 145, 138, 146, 148, 150, 149, 147, 150, 151,\n 150, 159, 152, 153, 159, 344, 142, 154, 141, 185,\n 143, 155, 155, 155, 155, 144, 156, 156, 156, 156,\n\n 148, 154, 150, 151, 178, 154, 176, 178, 176, 176,\n 177, 189, 189, 177, 153, 343, 150, 176, 176, 176,\n 201, 188, 177, 188, 190, 190, 188, 188, 339, 191,\n 338, 191, 288, 189, 191, 191, 193, 193, 203, 194,\n 288, 194, 201, 177, 194, 194, 190, 200, 202, 337,\n 200, 336, 201, 335, 192, 192, 191, 204, 193, 205,\n 203, 206, 190, 192, 208, 207, 194, 208, 268, 268,\n 202, 177, 192, 192, 192, 192, 192, 192, 211, 204,\n 212, 205, 202, 206, 213, 209, 203, 207, 209, 210,\n 209, 205, 210, 204, 214, 215, 216, 308, 308, 216,\n\n 211, 248, 212, 334, 248, 235, 213, 235, 333, 208,\n 235, 235, 209, 208, 317, 317, 214, 215, 331, 207,\n 229, 211, 229, 229, 230, 212, 230, 230, 328, 240,\n 210, 229, 229, 229, 241, 230, 230, 230, 215, 327,\n 214, 236, 236, 242, 243, 244, 248, 250, 251, 325,\n 236, 240, 270, 252, 253, 271, 241, 272, 324, 236,\n 236, 236, 236, 236, 236, 242, 243, 244, 323, 250,\n 251, 240, 322, 241, 270, 252, 253, 271, 273, 272,\n 279, 280, 321, 242, 318, 281, 282, 283, 244, 252,\n 253, 293, 293, 265, 250, 265, 265, 251, 294, 251,\n\n 273, 272, 279, 280, 265, 265, 265, 281, 282, 283,\n 292, 292, 295, 300, 293, 301, 279, 309, 310, 280,\n 294, 273, 313, 319, 319, 292, 320, 326, 326, 332,\n 332, 316, 330, 315, 295, 300, 314, 301, 330, 309,\n 310, 330, 311, 306, 313, 330, 319, 294, 320, 304,\n 326, 303, 332, 309, 301, 340, 340, 348, 348, 354,\n 354, 358, 358, 359, 359, 295, 364, 364, 299, 296,\n 291, 290, 286, 278, 277, 276, 274, 269, 340, 267,\n 348, 263, 354, 262, 358, 261, 359, 260, 259, 364,\n 370, 370, 370, 370, 370, 370, 370, 370, 370, 370,\n\n 370, 370, 370, 370, 370, 370, 370, 370, 370, 371,\n 371, 371, 371, 371, 371, 371, 371, 371, 371, 371,\n 371, 371, 371, 371, 371, 371, 371, 371, 372, 258,\n 257, 249, 247, 246, 372, 372, 372, 372, 372, 372,\n 245, 372, 372, 372, 372, 373, 239, 373, 373, 373,\n 373, 373, 373, 373, 373, 373, 373, 373, 373, 373,\n 373, 373, 373, 373, 374, 238, 232, 228, 227, 226,\n 374, 374, 374, 374, 374, 374, 225, 374, 374, 374,\n 374, 375, 375, 224, 375, 375, 375, 222, 375, 375,\n 375, 375, 376, 376, 221, 376, 376, 376, 220, 376,\n\n 376, 376, 376, 377, 377, 377, 377, 219, 377, 377,\n 377, 377, 377, 377, 377, 377, 377, 377, 377, 377,\n 377, 377, 378, 218, 199, 196, 182, 173, 172, 171,\n 378, 378, 378, 378, 379, 170, 379, 379, 169, 168,\n 379, 167, 166, 165, 164, 379, 163, 379, 162, 158,\n 379, 380, 125, 380, 380, 123, 122, 380, 120, 111,\n 110, 107, 380, 103, 380, 102, 100, 380, 381, 381,\n 83, 82, 81, 381, 381, 80, 381, 381, 381, 79,\n 381, 381, 381, 381, 382, 382, 382, 382, 382, 382,\n 382, 382, 383, 383, 383, 383, 384, 384, 384, 77,\n\n 384, 384, 384, 76, 384, 384, 384, 384, 385, 385,\n 75, 74, 72, 385, 385, 57, 385, 385, 385, 29,\n 385, 385, 385, 385, 386, 28, 25, 14, 9, 0,\n 386, 386, 0, 386, 386, 386, 386, 386, 386, 386,\n 386, 387, 387, 387, 387, 388, 388, 388, 0, 388,\n 388, 388, 0, 388, 388, 388, 388, 389, 0, 389,\n 389, 0, 389, 0, 0, 0, 389, 389, 390, 390,\n 0, 390, 390, 390, 0, 390, 390, 390, 390, 391,\n 391, 391, 391, 391, 391, 391, 391, 391, 391, 391,\n 391, 391, 391, 391, 391, 391, 391, 391, 369, 369,\n\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369, 369, 369, 369, 369,\n 369, 369, 369, 369, 369, 369\n } ;\n\n/* The intent behind this definition is that it'll catch\n * any uses of REJECT which flex missed.\n */\n#define REJECT reject_used_but_not_detected\n#define yymore() yymore_used_but_not_detected\n#define YY_MORE_ADJ 0\n#define YY_RESTORE_YY_MORE_OFFSET\n#line 1 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n#line 2 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n#include \"UpParsing.h\"\n\n// ************************************************************************************************\n// These functions are static in Up.yy.c, and this is the only way to pull them out so I\n// can call them in UpScanner\n\n#include \"Up/UpArena.h\"\n\nstatic void yy_push_state (int new_state, yyscan_t yyscanner);\nstatic void yy_pop_state (yyscan_t yyscanner);\n\nvoid UpPushState(int new_state, void* yyscanner) {\n yy_push_state(new_state, (yyscan_t)yyscanner);\n}\n\nvoid UpPopState(void* yyscanner) {\n yy_pop_state((yyscan_t)yyscanner);\n}\n\n// ************************************************************************************************\n\n\n\n\n#line 843 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.yy.c\"\n\n#define INITIAL 0\n#define upcode 1\n#define ccode 2\n#define text 3\n\n#ifndef YY_NO_UNISTD_H\n/* Special case for \"unistd.h\", since it is non-ANSI. We include it way\n * down here because we want the user's section 1 to have been scanned first.\n * The user has a chance to override it with an option.\n */\n#include <unistd.h>\n#endif\n\n#ifndef YY_EXTRA_TYPE\n#define YY_EXTRA_TYPE void *\n#endif\n\n/* Holds the entire state of the reentrant scanner. */\nstruct yyguts_t\n {\n\n /* User-defined. Not touched by flex. */\n YY_EXTRA_TYPE yyextra_r;\n\n /* The rest are the same as the globals declared in the non-reentrant scanner. */\n FILE *yyin_r, *yyout_r;\n size_t yy_buffer_stack_top; /**< index of top of stack. */\n size_t yy_buffer_stack_max; /**< capacity of stack. */\n YY_BUFFER_STATE * yy_buffer_stack; /**< Stack as an array. */\n char yy_hold_char;\n yy_size_t yy_n_chars;\n yy_size_t yyleng_r;\n char *yy_c_buf_p;\n int yy_init;\n int yy_start;\n int yy_did_buffer_switch_on_eof;\n int yy_start_stack_ptr;\n int yy_start_stack_depth;\n int *yy_start_stack;\n yy_state_type yy_last_accepting_state;\n char* yy_last_accepting_cpos;\n\n int yylineno_r;\n int yy_flex_debug_r;\n\n char *yytext_r;\n int yy_more_flag;\n int yy_more_len;\n\n }; /* end struct yyguts_t */\n\nstatic int yy_init_globals (yyscan_t yyscanner );\n\nint Uplex_init (yyscan_t* scanner);\n\nint Uplex_init_extra (YY_EXTRA_TYPE user_defined,yyscan_t* scanner);\n\n/* Accessor methods to globals.\n These are made visible to non-reentrant scanners for convenience. */\n\nint Uplex_destroy (yyscan_t yyscanner );\n\nint Upget_debug (yyscan_t yyscanner );\n\nvoid Upset_debug (int debug_flag ,yyscan_t yyscanner );\n\nYY_EXTRA_TYPE Upget_extra (yyscan_t yyscanner );\n\nvoid Upset_extra (YY_EXTRA_TYPE user_defined ,yyscan_t yyscanner );\n\nFILE *Upget_in (yyscan_t yyscanner );\n\nvoid Upset_in (FILE * in_str ,yyscan_t yyscanner );\n\nFILE *Upget_out (yyscan_t yyscanner );\n\nvoid Upset_out (FILE * out_str ,yyscan_t yyscanner );\n\nyy_size_t Upget_leng (yyscan_t yyscanner );\n\nchar *Upget_text (yyscan_t yyscanner );\n\nint Upget_lineno (yyscan_t yyscanner );\n\nvoid Upset_lineno (int line_number ,yyscan_t yyscanner );\n\n/* Macros after this point can all be overridden by user definitions in\n * section 1.\n */\n\n#ifndef YY_SKIP_YYWRAP\n#ifdef __cplusplus\nextern \"C\" int Upwrap (yyscan_t yyscanner );\n#else\nextern int Upwrap (yyscan_t yyscanner );\n#endif\n#endif\n\n static void yyunput (int c,char *buf_ptr ,yyscan_t yyscanner);\n \n#ifndef yytext_ptr\nstatic void yy_flex_strncpy (char *,yyconst char *,int ,yyscan_t yyscanner);\n#endif\n\n#ifdef YY_NEED_STRLEN\nstatic int yy_flex_strlen (yyconst char * ,yyscan_t yyscanner);\n#endif\n\n#ifndef YY_NO_INPUT\n\n#ifdef __cplusplus\nstatic int yyinput (yyscan_t yyscanner );\n#else\nstatic int input (yyscan_t yyscanner );\n#endif\n\n#endif\n\n static void yy_push_state (int new_state ,yyscan_t yyscanner);\n \n static void yy_pop_state (yyscan_t yyscanner );\n \n static int yy_top_state (yyscan_t yyscanner );\n \n/* Amount of stuff to slurp up with each read. */\n#ifndef YY_READ_BUF_SIZE\n#define YY_READ_BUF_SIZE 8192\n#endif\n\n/* Copy whatever the last rule matched to the standard output. */\n#ifndef ECHO\n/* This used to be an fputs(), but since the string might contain NUL's,\n * we now use fwrite().\n */\n#define ECHO fwrite( yytext, yyleng, 1, yyout )\n#endif\n\n/* Gets input and stuffs it into \"buf\". number of characters read, or YY_NULL,\n * is returned in \"result\".\n */\n#ifndef YY_INPUT\n#define YY_INPUT(buf,result,max_size) \\\n\tif ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \\\n\t\t{ \\\n\t\tint c = '*'; \\\n\t\tyy_size_t n; \\\n\t\tfor ( n = 0; n < max_size && \\\n\t\t\t (c = getc( yyin )) != EOF && c != '\\n'; ++n ) \\\n\t\t\tbuf[n] = (char) c; \\\n\t\tif ( c == '\\n' ) \\\n\t\t\tbuf[n++] = (char) c; \\\n\t\tif ( c == EOF && ferror( yyin ) ) \\\n\t\t\tYY_FATAL_ERROR( \"input in flex scanner failed\" ); \\\n\t\tresult = n; \\\n\t\t} \\\n\telse \\\n\t\t{ \\\n\t\terrno=0; \\\n\t\twhile ( (result = fread(buf, 1, max_size, yyin))==0 && ferror(yyin)) \\\n\t\t\t{ \\\n\t\t\tif( errno != EINTR) \\\n\t\t\t\t{ \\\n\t\t\t\tYY_FATAL_ERROR( \"input in flex scanner failed\" ); \\\n\t\t\t\tbreak; \\\n\t\t\t\t} \\\n\t\t\terrno=0; \\\n\t\t\tclearerr(yyin); \\\n\t\t\t} \\\n\t\t}\\\n\\\n\n#endif\n\n/* No semi-colon after return; correct usage is to write \"yyterminate();\" -\n * we don't want an extra ';' after the \"return\" because that will cause\n * some compilers to complain about unreachable statements.\n */\n#ifndef yyterminate\n#define yyterminate() return YY_NULL\n#endif\n\n/* Number of entries by which start-condition stack grows. */\n#ifndef YY_START_STACK_INCR\n#define YY_START_STACK_INCR 25\n#endif\n\n/* Report a fatal error. */\n#ifndef YY_FATAL_ERROR\n#define YY_FATAL_ERROR(msg) yy_fatal_error( msg , yyscanner)\n#endif\n\n/* end tables serialization structures and prototypes */\n\n/* Default declaration of generated scanner - a define so the user can\n * easily add parameters.\n */\n#ifndef YY_DECL\n#define YY_DECL_IS_OURS 1\n\nextern int Uplex (yyscan_t yyscanner);\n\n#define YY_DECL int Uplex (yyscan_t yyscanner)\n#endif /* !YY_DECL */\n\n/* Code executed at the beginning of each rule, after yytext and yyleng\n * have been set up.\n */\n#ifndef YY_USER_ACTION\n#define YY_USER_ACTION\n#endif\n\n/* Code executed at the end of each rule. */\n#ifndef YY_BREAK\n#define YY_BREAK break;\n#endif\n\n#define YY_RULE_SETUP \\\n\tYY_USER_ACTION\n\n/** The main scanner function which does all the work.\n */\nYY_DECL\n{\n\tregister yy_state_type yy_current_state;\n\tregister char *yy_cp, *yy_bp;\n\tregister int yy_act;\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\n#line 55 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n\n\n UpParseState = upcode;\n CParseState = ccode;\n TextParseState = text;\n\n#line 1080 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.yy.c\"\n\n\tif ( !yyg->yy_init )\n\t\t{\n\t\tyyg->yy_init = 1;\n\n#ifdef YY_USER_INIT\n\t\tYY_USER_INIT;\n#endif\n\n\t\tif ( ! yyg->yy_start )\n\t\t\tyyg->yy_start = 1;\t/* first start state */\n\n\t\tif ( ! yyin )\n\t\t\tyyin = stdin;\n\n\t\tif ( ! yyout )\n\t\t\tyyout = stdout;\n\n\t\tif ( ! YY_CURRENT_BUFFER ) {\n\t\t\tUpensure_buffer_stack (yyscanner);\n\t\t\tYY_CURRENT_BUFFER_LVALUE =\n\t\t\t\tUp_create_buffer(yyin,YY_BUF_SIZE ,yyscanner);\n\t\t}\n\n\t\tUp_load_buffer_state(yyscanner );\n\t\t}\n\n\twhile ( 1 )\t\t/* loops until end-of-file is reached */\n\t\t{\n\t\tyy_cp = yyg->yy_c_buf_p;\n\n\t\t/* Support of yytext. */\n\t\t*yy_cp = yyg->yy_hold_char;\n\n\t\t/* yy_bp points to the position in yy_ch_buf of the start of\n\t\t * the current run.\n\t\t */\n\t\tyy_bp = yy_cp;\n\n\t\tyy_current_state = yyg->yy_start;\nyy_match:\n\t\tdo\n\t\t\t{\n\t\t\tregister YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)];\n\t\t\tif ( yy_accept[yy_current_state] )\n\t\t\t\t{\n\t\t\t\tyyg->yy_last_accepting_state = yy_current_state;\n\t\t\t\tyyg->yy_last_accepting_cpos = yy_cp;\n\t\t\t\t}\n\t\t\twhile ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )\n\t\t\t\t{\n\t\t\t\tyy_current_state = (int) yy_def[yy_current_state];\n\t\t\t\tif ( yy_current_state >= 370 )\n\t\t\t\t\tyy_c = yy_meta[(unsigned int) yy_c];\n\t\t\t\t}\n\t\t\tyy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];\n\t\t\t++yy_cp;\n\t\t\t}\n\t\twhile ( yy_base[yy_current_state] != 1099 );\n\nyy_find_action:\n\t\tyy_act = yy_accept[yy_current_state];\n\t\tif ( yy_act == 0 )\n\t\t\t{ /* have to back up */\n\t\t\tyy_cp = yyg->yy_last_accepting_cpos;\n\t\t\tyy_current_state = yyg->yy_last_accepting_state;\n\t\t\tyy_act = yy_accept[yy_current_state];\n\t\t\t}\n\n\t\tYY_DO_BEFORE_ACTION;\n\ndo_action:\t/* This label is used only to access EOF actions. */\n\n\t\tswitch ( yy_act )\n\t{ /* beginning of action switch */\n\t\t\tcase 0: /* must back up */\n\t\t\t/* undo the effects of YY_DO_BEFORE_ACTION */\n\t\t\t*yy_cp = yyg->yy_hold_char;\n\t\t\tyy_cp = yyg->yy_last_accepting_cpos;\n\t\t\tyy_current_state = yyg->yy_last_accepting_state;\n\t\t\tgoto yy_find_action;\n\ncase 1:\nYY_RULE_SETUP\n#line 63 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(0); }\n\tYY_BREAK\ncase 2:\n/* rule 2 can match eol */\nYY_RULE_SETUP\n#line 64 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(0); }\n\tYY_BREAK\ncase 3:\nYY_RULE_SETUP\n#line 66 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(0); }\n\tYY_BREAK\ncase 4:\n/* rule 4 can match eol */\nYY_RULE_SETUP\n#line 67 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(0); }\n\tYY_BREAK\ncase 5:\n/* rule 5 can match eol */\nYY_RULE_SETUP\n#line 68 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(0); }\n\tYY_BREAK\ncase 6:\nYY_RULE_SETUP\n#line 70 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(IF); }\n\tYY_BREAK\ncase 7:\nYY_RULE_SETUP\n#line 71 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(ELSE); }\n\tYY_BREAK\ncase 8:\nYY_RULE_SETUP\n#line 72 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(ORELSE); }\n\tYY_BREAK\ncase 9:\nYY_RULE_SETUP\n#line 74 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(FOR); }\n\tYY_BREAK\ncase 10:\nYY_RULE_SETUP\n#line 75 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(ON); }\n\tYY_BREAK\ncase 11:\nYY_RULE_SETUP\n#line 76 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(WHILE); }\n\tYY_BREAK\ncase 12:\nYY_RULE_SETUP\n#line 77 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(BREAK); }\n\tYY_BREAK\ncase 13:\nYY_RULE_SETUP\n#line 78 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(CONTINUE); }\n\tYY_BREAK\ncase 14:\nYY_RULE_SETUP\n#line 79 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(DO); }\n\tYY_BREAK\ncase 15:\nYY_RULE_SETUP\n#line 81 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(TRY); }\n\tYY_BREAK\ncase 16:\nYY_RULE_SETUP\n#line 82 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(CATCH); }\n\tYY_BREAK\ncase 17:\nYY_RULE_SETUP\n#line 83 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(FINALLY); }\n\tYY_BREAK\ncase 18:\nYY_RULE_SETUP\n#line 84 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(THROW); }\n\tYY_BREAK\ncase 19:\nYY_RULE_SETUP\n#line 86 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(DOT); }\n\tYY_BREAK\ncase 20:\nYY_RULE_SETUP\n#line 87 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(DOT2); }\n\tYY_BREAK\ncase 21:\nYY_RULE_SETUP\n#line 88 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(DOT3); }\n\tYY_BREAK\ncase 22:\n/* rule 22 can match eol */\nYY_RULE_SETUP\n#line 89 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(COMMA); }\n\tYY_BREAK\ncase 23:\nYY_RULE_SETUP\n#line 90 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(SEMICOLON); }\n\tYY_BREAK\ncase 24:\n/* rule 24 can match eol */\nYY_RULE_SETUP\n#line 92 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(LP); }\n\tYY_BREAK\ncase 25:\n/* rule 25 can match eol */\nYY_RULE_SETUP\n#line 93 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(RP); }\n\tYY_BREAK\ncase 26:\n/* rule 26 can match eol */\nYY_RULE_SETUP\n#line 94 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(LB); }\n\tYY_BREAK\ncase 27:\n/* rule 27 can match eol */\nYY_RULE_SETUP\n#line 95 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(RB); }\n\tYY_BREAK\ncase 28:\n/* rule 28 can match eol */\nYY_RULE_SETUP\n#line 96 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(LCB); }\n\tYY_BREAK\ncase 29:\n/* rule 29 can match eol */\nYY_RULE_SETUP\n#line 97 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(RCB); }\n\tYY_BREAK\ncase 30:\nYY_RULE_SETUP\n#line 99 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(ADD); }\n\tYY_BREAK\ncase 31:\nYY_RULE_SETUP\n#line 100 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(SUBTRACT); }\n\tYY_BREAK\ncase 32:\nYY_RULE_SETUP\n#line 101 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(STAR); }\n\tYY_BREAK\ncase 33:\nYY_RULE_SETUP\n#line 102 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(SLASH); }\n\tYY_BREAK\ncase 34:\nYY_RULE_SETUP\n#line 103 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(SLASH2); }\n\tYY_BREAK\ncase 35:\nYY_RULE_SETUP\n#line 104 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(STAR2); }\n\tYY_BREAK\ncase 36:\nYY_RULE_SETUP\n#line 105 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(CONCAT); }\n\tYY_BREAK\ncase 37:\nYY_RULE_SETUP\n#line 107 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(AS); }\n\tYY_BREAK\ncase 38:\nYY_RULE_SETUP\n#line 108 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(IS); }\n\tYY_BREAK\ncase 39:\nYY_RULE_SETUP\n#line 109 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(ISNOT); }\n\tYY_BREAK\ncase 40:\nYY_RULE_SETUP\n#line 110 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(HAS); }\n\tYY_BREAK\ncase 41:\nYY_RULE_SETUP\n#line 111 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(HASNOT); }\n\tYY_BREAK\ncase 42:\nYY_RULE_SETUP\n#line 113 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(IN); }\n\tYY_BREAK\ncase 43:\nYY_RULE_SETUP\n#line 114 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(ISIN); }\n\tYY_BREAK\ncase 44:\nYY_RULE_SETUP\n#line 115 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(NOTIN); }\n\tYY_BREAK\ncase 45:\nYY_RULE_SETUP\n#line 117 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(EQ2); }\n\tYY_BREAK\ncase 46:\nYY_RULE_SETUP\n#line 118 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(NEQ); }\n\tYY_BREAK\ncase 47:\nYY_RULE_SETUP\n#line 119 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(LT); }\n\tYY_BREAK\ncase 48:\nYY_RULE_SETUP\n#line 120 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(GT); }\n\tYY_BREAK\ncase 49:\nYY_RULE_SETUP\n#line 121 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(LTE); }\n\tYY_BREAK\ncase 50:\nYY_RULE_SETUP\n#line 122 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(GTE); }\n\tYY_BREAK\ncase 51:\nYY_RULE_SETUP\n#line 124 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(EQ); }\n\tYY_BREAK\ncase 52:\nYY_RULE_SETUP\n#line 125 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(FATARROW); }\n\tYY_BREAK\ncase 53:\nYY_RULE_SETUP\n#line 126 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(FATARROW2); }\n\tYY_BREAK\ncase 54:\nYY_RULE_SETUP\n#line 128 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(LARROW); }\n\tYY_BREAK\ncase 55:\nYY_RULE_SETUP\n#line 129 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(RARROW); }\n\tYY_BREAK\ncase 56:\nYY_RULE_SETUP\n#line 130 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(LARROW2); }\n\tYY_BREAK\ncase 57:\nYY_RULE_SETUP\n#line 131 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(RARROW2); }\n\tYY_BREAK\ncase 58:\nYY_RULE_SETUP\n#line 132 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(LARROW3); }\n\tYY_BREAK\ncase 59:\nYY_RULE_SETUP\n#line 133 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(RARROW3); }\n\tYY_BREAK\ncase 60:\nYY_RULE_SETUP\n#line 134 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(RARROW2MUL); }\n\tYY_BREAK\ncase 61:\nYY_RULE_SETUP\n#line 136 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(COLON); }\n\tYY_BREAK\ncase 62:\nYY_RULE_SETUP\n#line 137 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(COLON2); }\n\tYY_BREAK\ncase 63:\nYY_RULE_SETUP\n#line 139 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(ADD_EQ); }\n\tYY_BREAK\ncase 64:\nYY_RULE_SETUP\n#line 140 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(STAR_EQ); }\n\tYY_BREAK\ncase 65:\nYY_RULE_SETUP\n#line 141 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(SUBTRACT_EQ); }\n\tYY_BREAK\ncase 66:\nYY_RULE_SETUP\n#line 142 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(SLASH_EQ); }\n\tYY_BREAK\ncase 67:\nYY_RULE_SETUP\n#line 143 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(SLASH2_EQ); }\n\tYY_BREAK\ncase 68:\nYY_RULE_SETUP\n#line 144 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(STAR2_EQ); }\n\tYY_BREAK\ncase 69:\nYY_RULE_SETUP\n#line 145 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(CONCAT_EQ); }\n\tYY_BREAK\ncase 70:\nYY_RULE_SETUP\n#line 147 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(AT); }\n\tYY_BREAK\ncase 71:\nYY_RULE_SETUP\n#line 148 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(CARET); }\n\tYY_BREAK\ncase 72:\nYY_RULE_SETUP\n#line 149 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(UNDERSCORE); }\n\tYY_BREAK\ncase 73:\nYY_RULE_SETUP\n#line 150 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(POUND); }\n\tYY_BREAK\ncase 74:\nYY_RULE_SETUP\n#line 151 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(TILDE); }\n\tYY_BREAK\ncase 75:\nYY_RULE_SETUP\n#line 152 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(AMPERSAND); }\n\tYY_BREAK\ncase 76:\nYY_RULE_SETUP\n#line 153 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(PIPE); }\n\tYY_BREAK\ncase 77:\nYY_RULE_SETUP\n#line 154 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(PIPE2); }\n\tYY_BREAK\ncase 78:\nYY_RULE_SETUP\n#line 155 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(QUESTION); }\n\tYY_BREAK\ncase 79:\nYY_RULE_SETUP\n#line 156 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(EXCLAMATION); }\n\tYY_BREAK\ncase 80:\nYY_RULE_SETUP\n#line 157 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(BACKSLASH); }\n\tYY_BREAK\ncase 81:\nYY_RULE_SETUP\n#line 158 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(DASHDASH); }\n\tYY_BREAK\ncase 82:\nYY_RULE_SETUP\n#line 159 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(BULLET); }\n\tYY_BREAK\ncase 83:\nYY_RULE_SETUP\n#line 161 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(TO); }\n\tYY_BREAK\ncase 84:\nYY_RULE_SETUP\n#line 162 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(THROUGH); }\n\tYY_BREAK\ncase 85:\nYY_RULE_SETUP\n#line 163 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(BY); }\n\tYY_BREAK\ncase 86:\nYY_RULE_SETUP\n#line 164 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(WHERE); }\n\tYY_BREAK\ncase 87:\n/* rule 87 can match eol */\nYY_RULE_SETUP\n#line 166 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n yylval->indentLevel = (strlen(yytext)-1) / 4;\n TOKEN(NEWLINE);\n}\n\tYY_BREAK\ncase 88:\n/* rule 88 can match eol */\nYY_RULE_SETUP\n#line 171 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n yylval->indentLevel = 0;\n TOKEN(NEWLINE);\n}\n\tYY_BREAK\ncase 89:\nYY_RULE_SETUP\n#line 176 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n LOCATE();\n char* specialty;\n char* value = UpParseString(yytext, '\"', true, &specialty);\n UpSyntax* str = PARSE_STR(*yylloc, value, specialty);\n if (!str) {\n CONSUME(ERROR_TOKEN);\n } else {\n yylval->objectValue = str;\n CONSUME(FSTRING);\n }\n}\n\tYY_BREAK\ncase 90:\n/* rule 90 can match eol */\nYY_RULE_SETUP\n#line 189 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n LOCATE();\n char* specialty;\n char* value = UpParseString(yytext, '\"', false, &specialty);\n if (value) {\n value = UpRemoveTrailingWhitespace(value);\n }\n \n UpSyntax* str = PARSE_STR(*yylloc, value, specialty);\n if (!str) {\n CONSUME(ERROR_TOKEN);\n } else {\n yylval->objectValue = str;\n CONSUME(FSTRING_LINE);\n }\n}\n\tYY_BREAK\ncase 91:\nYY_RULE_SETUP\n#line 206 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n LOCATE();\n if (strlen(yytext) > 10) {\n long long value = yytext[0] == '-'\n ? -UpHexToLong(yytext+3)\n : UpHexToLong(yytext+2);\n\n yylval->objectValue = PARSE_LONG(*yylloc, value, 0);\n CONSUME(LONG);\n } else {\n int value = yytext[0] == '-'\n ? -UpHexToInt(yytext+3)\n : UpHexToInt(yytext+2);\n\n yylval->objectValue = PARSE_INT(*yylloc, value, 0);\n CONSUME(INTEGER);\n }\n}\n\tYY_BREAK\ncase 92:\nYY_RULE_SETUP\n#line 225 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n LOCATE();\n long long value = UpToLongLong(yytext);\n if (value > 2147483647 || value < -2147483647) {\n yylval->objectValue = PARSE_LONG(*yylloc, value, 0);\n } else {\n int value = UpToInt(yytext);\n yylval->objectValue = PARSE_INT(*yylloc, value, 0);\n }\n CONSUME(INTEGER);\n}\n\tYY_BREAK\ncase 93:\nYY_RULE_SETUP\n#line 237 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n LOCATE();\n const char* unit = UpParseUnit(yytext);\n unsigned long long value = UpToLongLong(yytext);\n if (value >= 0xFFFFFFFF) {\n yylval->objectValue = PARSE_LONG(*yylloc, value, unit);\n } else {\n yylval->objectValue = PARSE_INT(*yylloc, value, unit);\n }\n CONSUME(INTEGER);\n}\n\tYY_BREAK\ncase 94:\nYY_RULE_SETUP\n#line 249 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n LOCATE();\n double value = UpToDouble(yytext);\n yylval->objectValue = PARSE_FLOAT(*yylloc, value, 0);\n CONSUME(FLOAT);\n}\n\tYY_BREAK\ncase 95:\nYY_RULE_SETUP\n#line 256 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n LOCATE();\n double value = UpToDouble(yytext);\n yylval->objectValue = PARSE_FLOAT(*yylloc, value, 0);\n CONSUME(FLOAT);\n}\n\tYY_BREAK\ncase 96:\nYY_RULE_SETUP\n#line 263 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n LOCATE();\n double value = UpToDouble(yytext);\n const char* unit = UpParseUnit(yytext);\n yylval->objectValue = PARSE_FLOAT(*yylloc, value, unit);\n CONSUME(FLOAT);\n}\n\tYY_BREAK\ncase 97:\nYY_RULE_SETUP\n#line 271 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n ID_TOKEN(IDENTIFIER);\n}\n\tYY_BREAK\ncase 98:\nYY_RULE_SETUP\n#line 275 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n ID_TOKEN(IDENTIFIER);\n}\n\tYY_BREAK\ncase 99:\nYY_RULE_SETUP\n#line 279 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n ID_TOKEN(UIDENTIFIER);\n}\n\tYY_BREAK\ncase 100:\nYY_RULE_SETUP\n#line 283 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n BID_TOKEN(BIDENTIFIER);\n}\n\tYY_BREAK\ncase 101:\nYY_RULE_SETUP\n#line 287 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ SYNTAX_ERROR }\n\tYY_BREAK\n// ***********************************************************************************************\n\ncase 102:\nYY_RULE_SETUP\n#line 293 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ }\n\tYY_BREAK\ncase 103:\n/* rule 103 can match eol */\nYY_RULE_SETUP\n#line 294 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ }\n\tYY_BREAK\ncase 104:\nYY_RULE_SETUP\n#line 296 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(LP); }\n\tYY_BREAK\ncase 105:\nYY_RULE_SETUP\n#line 297 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(RP); }\n\tYY_BREAK\ncase 106:\nYY_RULE_SETUP\n#line 298 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(STAR); }\n\tYY_BREAK\ncase 107:\nYY_RULE_SETUP\n#line 299 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(COMMA); }\n\tYY_BREAK\ncase 108:\nYY_RULE_SETUP\n#line 300 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(SEMICOLON); }\n\tYY_BREAK\ncase 109:\nYY_RULE_SETUP\n#line 301 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(CONST); }\n\tYY_BREAK\ncase 110:\nYY_RULE_SETUP\n#line 302 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ TOKEN(STRUCT); }\n\tYY_BREAK\ncase 111:\nYY_RULE_SETUP\n#line 304 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 112:\nYY_RULE_SETUP\n#line 305 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 113:\nYY_RULE_SETUP\n#line 306 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 114:\nYY_RULE_SETUP\n#line 307 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 115:\nYY_RULE_SETUP\n#line 308 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 116:\nYY_RULE_SETUP\n#line 309 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 117:\nYY_RULE_SETUP\n#line 310 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 118:\nYY_RULE_SETUP\n#line 311 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 119:\nYY_RULE_SETUP\n#line 312 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 120:\nYY_RULE_SETUP\n#line 313 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 121:\nYY_RULE_SETUP\n#line 314 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 122:\nYY_RULE_SETUP\n#line 315 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 123:\nYY_RULE_SETUP\n#line 316 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 124:\nYY_RULE_SETUP\n#line 317 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 125:\nYY_RULE_SETUP\n#line 318 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(CPRIMITIVE); }\n\tYY_BREAK\ncase 126:\nYY_RULE_SETUP\n#line 319 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(IDENTIFIER); }\n\tYY_BREAK\ncase 127:\nYY_RULE_SETUP\n#line 320 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ ID_TOKEN(IDENTIFIER); }\n\tYY_BREAK\ncase 128:\nYY_RULE_SETUP\n#line 322 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ SYNTAX_ERROR }\n\tYY_BREAK\n// ***********************************************************************************************\n\ncase 129:\n/* rule 129 can match eol */\nYY_RULE_SETUP\n#line 328 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n LOCATE();\n if (!UpUnescapeString(UPSCANNER, yytext)) {\n CONSUME(ERROR_TOKEN);\n }\n yylval->objectValue = PARSE_STR(*yylloc, yytext, NULL);\n CONSUME(STRING);\n}\n\tYY_BREAK\ncase 130:\nYY_RULE_SETUP\n#line 337 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n LOCATE();\n yylval->objectValue = PARSE_STR(*yylloc, yytext, NULL);\n CONSUME(STRING);\n}\n\tYY_BREAK\ncase 131:\nYY_RULE_SETUP\n#line 343 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{\n LOCATE();\n yylval->stringValue = UpArenaCopyString(UPSCANNERHEAP, yytext+1);\n CONSUME(INLINE_EXPR);\n}\n\tYY_BREAK\ncase 132:\nYY_RULE_SETUP\n#line 349 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n{ SYNTAX_ERROR }\n\tYY_BREAK\n// ***********************************************************************************************\ncase 133:\nYY_RULE_SETUP\n#line 353 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\nECHO;\n\tYY_BREAK\n#line 1956 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.yy.c\"\ncase YY_STATE_EOF(INITIAL):\ncase YY_STATE_EOF(upcode):\ncase YY_STATE_EOF(ccode):\ncase YY_STATE_EOF(text):\n\tyyterminate();\n\n\tcase YY_END_OF_BUFFER:\n\t\t{\n\t\t/* Amount of text matched not including the EOB char. */\n\t\tint yy_amount_of_matched_text = (int) (yy_cp - yyg->yytext_ptr) - 1;\n\n\t\t/* Undo the effects of YY_DO_BEFORE_ACTION. */\n\t\t*yy_cp = yyg->yy_hold_char;\n\t\tYY_RESTORE_YY_MORE_OFFSET\n\n\t\tif ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )\n\t\t\t{\n\t\t\t/* We're scanning a new file or input source. It's\n\t\t\t * possible that this happened because the user\n\t\t\t * just pointed yyin at a new source and called\n\t\t\t * Uplex(). If so, then we have to assure\n\t\t\t * consistency between YY_CURRENT_BUFFER and our\n\t\t\t * globals. Here is the right place to do so, because\n\t\t\t * this is the first action (other than possibly a\n\t\t\t * back-up) that will match for the new input source.\n\t\t\t */\n\t\t\tyyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;\n\t\t\tYY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin;\n\t\t\tYY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;\n\t\t\t}\n\n\t\t/* Note that here we test for yy_c_buf_p \"<=\" to the position\n\t\t * of the first EOB in the buffer, since yy_c_buf_p will\n\t\t * already have been incremented past the NUL character\n\t\t * (since all states make transitions on EOB to the\n\t\t * end-of-buffer state). Contrast this with the test\n\t\t * in input().\n\t\t */\n\t\tif ( yyg->yy_c_buf_p <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] )\n\t\t\t{ /* This was really a NUL. */\n\t\t\tyy_state_type yy_next_state;\n\n\t\t\tyyg->yy_c_buf_p = yyg->yytext_ptr + yy_amount_of_matched_text;\n\n\t\t\tyy_current_state = yy_get_previous_state( yyscanner );\n\n\t\t\t/* Okay, we're now positioned to make the NUL\n\t\t\t * transition. We couldn't have\n\t\t\t * yy_get_previous_state() go ahead and do it\n\t\t\t * for us because it doesn't know how to deal\n\t\t\t * with the possibility of jamming (and we don't\n\t\t\t * want to build jamming into it because then it\n\t\t\t * will run more slowly).\n\t\t\t */\n\n\t\t\tyy_next_state = yy_try_NUL_trans( yy_current_state , yyscanner);\n\n\t\t\tyy_bp = yyg->yytext_ptr + YY_MORE_ADJ;\n\n\t\t\tif ( yy_next_state )\n\t\t\t\t{\n\t\t\t\t/* Consume the NUL. */\n\t\t\t\tyy_cp = ++yyg->yy_c_buf_p;\n\t\t\t\tyy_current_state = yy_next_state;\n\t\t\t\tgoto yy_match;\n\t\t\t\t}\n\n\t\t\telse\n\t\t\t\t{\n\t\t\t\tyy_cp = yyg->yy_c_buf_p;\n\t\t\t\tgoto yy_find_action;\n\t\t\t\t}\n\t\t\t}\n\n\t\telse switch ( yy_get_next_buffer( yyscanner ) )\n\t\t\t{\n\t\t\tcase EOB_ACT_END_OF_FILE:\n\t\t\t\t{\n\t\t\t\tyyg->yy_did_buffer_switch_on_eof = 0;\n\n\t\t\t\tif ( Upwrap(yyscanner ) )\n\t\t\t\t\t{\n\t\t\t\t\t/* Note: because we've taken care in\n\t\t\t\t\t * yy_get_next_buffer() to have set up\n\t\t\t\t\t * yytext, we can now set up\n\t\t\t\t\t * yy_c_buf_p so that if some total\n\t\t\t\t\t * hoser (like flex itself) wants to\n\t\t\t\t\t * call the scanner after we return the\n\t\t\t\t\t * YY_NULL, it'll still work - another\n\t\t\t\t\t * YY_NULL will get returned.\n\t\t\t\t\t */\n\t\t\t\t\tyyg->yy_c_buf_p = yyg->yytext_ptr + YY_MORE_ADJ;\n\n\t\t\t\t\tyy_act = YY_STATE_EOF(YY_START);\n\t\t\t\t\tgoto do_action;\n\t\t\t\t\t}\n\n\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\tif ( ! yyg->yy_did_buffer_switch_on_eof )\n\t\t\t\t\t\tYY_NEW_FILE;\n\t\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\tcase EOB_ACT_CONTINUE_SCAN:\n\t\t\t\tyyg->yy_c_buf_p =\n\t\t\t\t\tyyg->yytext_ptr + yy_amount_of_matched_text;\n\n\t\t\t\tyy_current_state = yy_get_previous_state( yyscanner );\n\n\t\t\t\tyy_cp = yyg->yy_c_buf_p;\n\t\t\t\tyy_bp = yyg->yytext_ptr + YY_MORE_ADJ;\n\t\t\t\tgoto yy_match;\n\n\t\t\tcase EOB_ACT_LAST_MATCH:\n\t\t\t\tyyg->yy_c_buf_p =\n\t\t\t\t&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars];\n\n\t\t\t\tyy_current_state = yy_get_previous_state( yyscanner );\n\n\t\t\t\tyy_cp = yyg->yy_c_buf_p;\n\t\t\t\tyy_bp = yyg->yytext_ptr + YY_MORE_ADJ;\n\t\t\t\tgoto yy_find_action;\n\t\t\t}\n\t\tbreak;\n\t\t}\n\n\tdefault:\n\t\tYY_FATAL_ERROR(\n\t\t\t\"fatal flex scanner internal error--no action found\" );\n\t} /* end of action switch */\n\t\t} /* end of scanning one token */\n} /* end of Uplex */\n\n/* yy_get_next_buffer - try to read in a new buffer\n *\n * Returns a code representing an action:\n *\tEOB_ACT_LAST_MATCH -\n *\tEOB_ACT_CONTINUE_SCAN - continue scanning from current position\n *\tEOB_ACT_END_OF_FILE - end of file\n */\nstatic int yy_get_next_buffer (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\tregister char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;\n\tregister char *source = yyg->yytext_ptr;\n\tregister int number_to_move, i;\n\tint ret_val;\n\n\tif ( yyg->yy_c_buf_p > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars + 1] )\n\t\tYY_FATAL_ERROR(\n\t\t\"fatal flex scanner internal error--end of buffer missed\" );\n\n\tif ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )\n\t\t{ /* Don't try to fill the buffer, so this is an EOF. */\n\t\tif ( yyg->yy_c_buf_p - yyg->yytext_ptr - YY_MORE_ADJ == 1 )\n\t\t\t{\n\t\t\t/* We matched a single character, the EOB, so\n\t\t\t * treat this as a final EOF.\n\t\t\t */\n\t\t\treturn EOB_ACT_END_OF_FILE;\n\t\t\t}\n\n\t\telse\n\t\t\t{\n\t\t\t/* We matched some text prior to the EOB, first\n\t\t\t * process it.\n\t\t\t */\n\t\t\treturn EOB_ACT_LAST_MATCH;\n\t\t\t}\n\t\t}\n\n\t/* Try to read more data. */\n\n\t/* First move last chars to start of buffer. */\n\tnumber_to_move = (int) (yyg->yy_c_buf_p - yyg->yytext_ptr) - 1;\n\n\tfor ( i = 0; i < number_to_move; ++i )\n\t\t*(dest++) = *(source++);\n\n\tif ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )\n\t\t/* don't do the read, it's not guaranteed to return an EOF,\n\t\t * just force an EOF\n\t\t */\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars = 0;\n\n\telse\n\t\t{\n\t\t\tyy_size_t num_to_read =\n\t\t\tYY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;\n\n\t\twhile ( num_to_read <= 0 )\n\t\t\t{ /* Not enough room in the buffer - grow it. */\n\n\t\t\t/* just a shorter name for the current buffer */\n\t\t\tYY_BUFFER_STATE b = YY_CURRENT_BUFFER;\n\n\t\t\tint yy_c_buf_p_offset =\n\t\t\t\t(int) (yyg->yy_c_buf_p - b->yy_ch_buf);\n\n\t\t\tif ( b->yy_is_our_buffer )\n\t\t\t\t{\n\t\t\t\tyy_size_t new_size = b->yy_buf_size * 2;\n\n\t\t\t\tif ( new_size <= 0 )\n\t\t\t\t\tb->yy_buf_size += b->yy_buf_size / 8;\n\t\t\t\telse\n\t\t\t\t\tb->yy_buf_size *= 2;\n\n\t\t\t\tb->yy_ch_buf = (char *)\n\t\t\t\t\t/* Include room in for 2 EOB chars. */\n\t\t\t\t\tUprealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 ,yyscanner );\n\t\t\t\t}\n\t\t\telse\n\t\t\t\t/* Can't grow it, we don't own it. */\n\t\t\t\tb->yy_ch_buf = 0;\n\n\t\t\tif ( ! b->yy_ch_buf )\n\t\t\t\tYY_FATAL_ERROR(\n\t\t\t\t\"fatal error - scanner input buffer overflow\" );\n\n\t\t\tyyg->yy_c_buf_p = &b->yy_ch_buf[yy_c_buf_p_offset];\n\n\t\t\tnum_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -\n\t\t\t\t\t\tnumber_to_move - 1;\n\n\t\t\t}\n\n\t\tif ( num_to_read > YY_READ_BUF_SIZE )\n\t\t\tnum_to_read = YY_READ_BUF_SIZE;\n\n\t\t/* Read in more data. */\n\t\tYY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),\n\t\t\tyyg->yy_n_chars, num_to_read );\n\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars;\n\t\t}\n\n\tif ( yyg->yy_n_chars == 0 )\n\t\t{\n\t\tif ( number_to_move == YY_MORE_ADJ )\n\t\t\t{\n\t\t\tret_val = EOB_ACT_END_OF_FILE;\n\t\t\tUprestart(yyin ,yyscanner);\n\t\t\t}\n\n\t\telse\n\t\t\t{\n\t\t\tret_val = EOB_ACT_LAST_MATCH;\n\t\t\tYY_CURRENT_BUFFER_LVALUE->yy_buffer_status =\n\t\t\t\tYY_BUFFER_EOF_PENDING;\n\t\t\t}\n\t\t}\n\n\telse\n\t\tret_val = EOB_ACT_CONTINUE_SCAN;\n\n\tif ((yy_size_t) (yyg->yy_n_chars + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {\n\t\t/* Extend the array by 50%, plus the number we really need. */\n\t\tyy_size_t new_size = yyg->yy_n_chars + number_to_move + (yyg->yy_n_chars >> 1);\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) Uprealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size ,yyscanner );\n\t\tif ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )\n\t\t\tYY_FATAL_ERROR( \"out of dynamic memory in yy_get_next_buffer()\" );\n\t}\n\n\tyyg->yy_n_chars += number_to_move;\n\tYY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] = YY_END_OF_BUFFER_CHAR;\n\tYY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars + 1] = YY_END_OF_BUFFER_CHAR;\n\n\tyyg->yytext_ptr = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];\n\n\treturn ret_val;\n}\n\n/* yy_get_previous_state - get the state just before the EOB char was reached */\n\n static yy_state_type yy_get_previous_state (yyscan_t yyscanner)\n{\n\tregister yy_state_type yy_current_state;\n\tregister char *yy_cp;\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\n\tyy_current_state = yyg->yy_start;\n\n\tfor ( yy_cp = yyg->yytext_ptr + YY_MORE_ADJ; yy_cp < yyg->yy_c_buf_p; ++yy_cp )\n\t\t{\n\t\tregister YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1);\n\t\tif ( yy_accept[yy_current_state] )\n\t\t\t{\n\t\t\tyyg->yy_last_accepting_state = yy_current_state;\n\t\t\tyyg->yy_last_accepting_cpos = yy_cp;\n\t\t\t}\n\t\twhile ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )\n\t\t\t{\n\t\t\tyy_current_state = (int) yy_def[yy_current_state];\n\t\t\tif ( yy_current_state >= 370 )\n\t\t\t\tyy_c = yy_meta[(unsigned int) yy_c];\n\t\t\t}\n\t\tyy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];\n\t\t}\n\n\treturn yy_current_state;\n}\n\n/* yy_try_NUL_trans - try to make a transition on the NUL character\n *\n * synopsis\n *\tnext_state = yy_try_NUL_trans( current_state );\n */\n static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state , yyscan_t yyscanner)\n{\n\tregister int yy_is_jam;\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; /* This var may be unused depending upon options. */\n\tregister char *yy_cp = yyg->yy_c_buf_p;\n\n\tregister YY_CHAR yy_c = 1;\n\tif ( yy_accept[yy_current_state] )\n\t\t{\n\t\tyyg->yy_last_accepting_state = yy_current_state;\n\t\tyyg->yy_last_accepting_cpos = yy_cp;\n\t\t}\n\twhile ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )\n\t\t{\n\t\tyy_current_state = (int) yy_def[yy_current_state];\n\t\tif ( yy_current_state >= 370 )\n\t\t\tyy_c = yy_meta[(unsigned int) yy_c];\n\t\t}\n\tyy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];\n\tyy_is_jam = (yy_current_state == 369);\n\n\treturn yy_is_jam ? 0 : yy_current_state;\n}\n\n static void yyunput (int c, register char * yy_bp , yyscan_t yyscanner)\n{\n\tregister char *yy_cp;\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\n yy_cp = yyg->yy_c_buf_p;\n\n\t/* undo effects of setting up yytext */\n\t*yy_cp = yyg->yy_hold_char;\n\n\tif ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )\n\t\t{ /* need to shift things up to make room */\n\t\t/* +2 for EOB chars. */\n\t\tregister yy_size_t number_to_move = yyg->yy_n_chars + 2;\n\t\tregister char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[\n\t\t\t\t\tYY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2];\n\t\tregister char *source =\n\t\t\t\t&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move];\n\n\t\twhile ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )\n\t\t\t*--dest = *--source;\n\n\t\tyy_cp += (int) (dest - source);\n\t\tyy_bp += (int) (dest - source);\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_n_chars =\n\t\t\tyyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_buf_size;\n\n\t\tif ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )\n\t\t\tYY_FATAL_ERROR( \"flex scanner push-back overflow\" );\n\t\t}\n\n\t*--yy_cp = (char) c;\n\n\tyyg->yytext_ptr = yy_bp;\n\tyyg->yy_hold_char = *yy_cp;\n\tyyg->yy_c_buf_p = yy_cp;\n}\n\n#ifndef YY_NO_INPUT\n#ifdef __cplusplus\n static int yyinput (yyscan_t yyscanner)\n#else\n static int input (yyscan_t yyscanner)\n#endif\n\n{\n\tint c;\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\n\t*yyg->yy_c_buf_p = yyg->yy_hold_char;\n\n\tif ( *yyg->yy_c_buf_p == YY_END_OF_BUFFER_CHAR )\n\t\t{\n\t\t/* yy_c_buf_p now points to the character we want to return.\n\t\t * If this occurs *before* the EOB characters, then it's a\n\t\t * valid NUL; if not, then we've hit the end of the buffer.\n\t\t */\n\t\tif ( yyg->yy_c_buf_p < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] )\n\t\t\t/* This was really a NUL. */\n\t\t\t*yyg->yy_c_buf_p = '\\0';\n\n\t\telse\n\t\t\t{ /* need more input */\n\t\t\tyy_size_t offset = yyg->yy_c_buf_p - yyg->yytext_ptr;\n\t\t\t++yyg->yy_c_buf_p;\n\n\t\t\tswitch ( yy_get_next_buffer( yyscanner ) )\n\t\t\t\t{\n\t\t\t\tcase EOB_ACT_LAST_MATCH:\n\t\t\t\t\t/* This happens because yy_g_n_b()\n\t\t\t\t\t * sees that we've accumulated a\n\t\t\t\t\t * token and flags that we need to\n\t\t\t\t\t * try matching the token before\n\t\t\t\t\t * proceeding. But for input(),\n\t\t\t\t\t * there's no matching to consider.\n\t\t\t\t\t * So convert the EOB_ACT_LAST_MATCH\n\t\t\t\t\t * to EOB_ACT_END_OF_FILE.\n\t\t\t\t\t */\n\n\t\t\t\t\t/* Reset buffer status. */\n\t\t\t\t\tUprestart(yyin ,yyscanner);\n\n\t\t\t\t\t/*FALLTHROUGH*/\n\n\t\t\t\tcase EOB_ACT_END_OF_FILE:\n\t\t\t\t\t{\n\t\t\t\t\tif ( Upwrap(yyscanner ) )\n\t\t\t\t\t\treturn 0;\n\n\t\t\t\t\tif ( ! yyg->yy_did_buffer_switch_on_eof )\n\t\t\t\t\t\tYY_NEW_FILE;\n#ifdef __cplusplus\n\t\t\t\t\treturn yyinput(yyscanner);\n#else\n\t\t\t\t\treturn input(yyscanner);\n#endif\n\t\t\t\t\t}\n\n\t\t\t\tcase EOB_ACT_CONTINUE_SCAN:\n\t\t\t\t\tyyg->yy_c_buf_p = yyg->yytext_ptr + offset;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\tc = *(unsigned char *) yyg->yy_c_buf_p;\t/* cast for 8-bit char's */\n\t*yyg->yy_c_buf_p = '\\0';\t/* preserve yytext */\n\tyyg->yy_hold_char = *++yyg->yy_c_buf_p;\n\n\treturn c;\n}\n#endif\t/* ifndef YY_NO_INPUT */\n\n/** Immediately switch to a different input stream.\n * @param input_file A readable stream.\n * @param yyscanner The scanner object.\n * @note This function does not reset the start condition to @c INITIAL .\n */\n void Uprestart (FILE * input_file , yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\n\tif ( ! YY_CURRENT_BUFFER ){\n Upensure_buffer_stack (yyscanner);\n\t\tYY_CURRENT_BUFFER_LVALUE =\n Up_create_buffer(yyin,YY_BUF_SIZE ,yyscanner);\n\t}\n\n\tUp_init_buffer(YY_CURRENT_BUFFER,input_file ,yyscanner);\n\tUp_load_buffer_state(yyscanner );\n}\n\n/** Switch to a different input buffer.\n * @param new_buffer The new input buffer.\n * @param yyscanner The scanner object.\n */\n void Up_switch_to_buffer (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\n\t/* TODO. We should be able to replace this entire function body\n\t * with\n\t *\t\tUppop_buffer_state();\n\t *\t\tUppush_buffer_state(new_buffer);\n */\n\tUpensure_buffer_stack (yyscanner);\n\tif ( YY_CURRENT_BUFFER == new_buffer )\n\t\treturn;\n\n\tif ( YY_CURRENT_BUFFER )\n\t\t{\n\t\t/* Flush out information for old buffer. */\n\t\t*yyg->yy_c_buf_p = yyg->yy_hold_char;\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_buf_pos = yyg->yy_c_buf_p;\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars;\n\t\t}\n\n\tYY_CURRENT_BUFFER_LVALUE = new_buffer;\n\tUp_load_buffer_state(yyscanner );\n\n\t/* We don't actually know whether we did this switch during\n\t * EOF (Upwrap()) processing, but the only time this flag\n\t * is looked at is after Upwrap() is called, so it's safe\n\t * to go ahead and always set it.\n\t */\n\tyyg->yy_did_buffer_switch_on_eof = 1;\n}\n\nstatic void Up_load_buffer_state (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\tyyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;\n\tyyg->yytext_ptr = yyg->yy_c_buf_p = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;\n\tyyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;\n\tyyg->yy_hold_char = *yyg->yy_c_buf_p;\n}\n\n/** Allocate and initialize an input buffer state.\n * @param file A readable stream.\n * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.\n * @param yyscanner The scanner object.\n * @return the allocated buffer state.\n */\n YY_BUFFER_STATE Up_create_buffer (FILE * file, int size , yyscan_t yyscanner)\n{\n\tYY_BUFFER_STATE b;\n \n\tb = (YY_BUFFER_STATE) Upalloc(sizeof( struct yy_buffer_state ) ,yyscanner );\n\tif ( ! b )\n\t\tYY_FATAL_ERROR( \"out of dynamic memory in Up_create_buffer()\" );\n\n\tb->yy_buf_size = size;\n\n\t/* yy_ch_buf has to be 2 characters longer than the size given because\n\t * we need to put in 2 end-of-buffer characters.\n\t */\n\tb->yy_ch_buf = (char *) Upalloc(b->yy_buf_size + 2 ,yyscanner );\n\tif ( ! b->yy_ch_buf )\n\t\tYY_FATAL_ERROR( \"out of dynamic memory in Up_create_buffer()\" );\n\n\tb->yy_is_our_buffer = 1;\n\n\tUp_init_buffer(b,file ,yyscanner);\n\n\treturn b;\n}\n\n/** Destroy the buffer.\n * @param b a buffer created with Up_create_buffer()\n * @param yyscanner The scanner object.\n */\n void Up_delete_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\n\tif ( ! b )\n\t\treturn;\n\n\tif ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */\n\t\tYY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;\n\n\tif ( b->yy_is_our_buffer )\n\t\tUpfree((void *) b->yy_ch_buf ,yyscanner );\n\n\tUpfree((void *) b ,yyscanner );\n}\n\n#ifndef __cplusplus\nextern int isatty (int );\n#endif /* __cplusplus */\n \n/* Initializes or reinitializes a buffer.\n * This function is sometimes called more than once on the same buffer,\n * such as during a Uprestart() or at EOF.\n */\n static void Up_init_buffer (YY_BUFFER_STATE b, FILE * file , yyscan_t yyscanner)\n\n{\n\tint oerrno = errno;\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\n\tUp_flush_buffer(b ,yyscanner);\n\n\tb->yy_input_file = file;\n\tb->yy_fill_buffer = 1;\n\n /* If b is the current buffer, then Up_init_buffer was _probably_\n * called from Uprestart() or through yy_get_next_buffer.\n * In that case, we don't want to reset the lineno or column.\n */\n if (b != YY_CURRENT_BUFFER){\n b->yy_bs_lineno = 1;\n b->yy_bs_column = 0;\n }\n\n b->yy_is_interactive = file ? (isatty( fileno(file) ) > 0) : 0;\n \n\terrno = oerrno;\n}\n\n/** Discard all buffered characters. On the next scan, YY_INPUT will be called.\n * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.\n * @param yyscanner The scanner object.\n */\n void Up_flush_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\tif ( ! b )\n\t\treturn;\n\n\tb->yy_n_chars = 0;\n\n\t/* We always need two end-of-buffer characters. The first causes\n\t * a transition to the end-of-buffer state. The second causes\n\t * a jam in that state.\n\t */\n\tb->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;\n\tb->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;\n\n\tb->yy_buf_pos = &b->yy_ch_buf[0];\n\n\tb->yy_at_bol = 1;\n\tb->yy_buffer_status = YY_BUFFER_NEW;\n\n\tif ( b == YY_CURRENT_BUFFER )\n\t\tUp_load_buffer_state(yyscanner );\n}\n\n/** Pushes the new state onto the stack. The new state becomes\n * the current state. This function will allocate the stack\n * if necessary.\n * @param new_buffer The new state.\n * @param yyscanner The scanner object.\n */\nvoid Uppush_buffer_state (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\tif (new_buffer == NULL)\n\t\treturn;\n\n\tUpensure_buffer_stack(yyscanner);\n\n\t/* This block is copied from Up_switch_to_buffer. */\n\tif ( YY_CURRENT_BUFFER )\n\t\t{\n\t\t/* Flush out information for old buffer. */\n\t\t*yyg->yy_c_buf_p = yyg->yy_hold_char;\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_buf_pos = yyg->yy_c_buf_p;\n\t\tYY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars;\n\t\t}\n\n\t/* Only push if top exists. Otherwise, replace top. */\n\tif (YY_CURRENT_BUFFER)\n\t\tyyg->yy_buffer_stack_top++;\n\tYY_CURRENT_BUFFER_LVALUE = new_buffer;\n\n\t/* copied from Up_switch_to_buffer. */\n\tUp_load_buffer_state(yyscanner );\n\tyyg->yy_did_buffer_switch_on_eof = 1;\n}\n\n/** Removes and deletes the top of the stack, if present.\n * The next element becomes the new top.\n * @param yyscanner The scanner object.\n */\nvoid Uppop_buffer_state (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\tif (!YY_CURRENT_BUFFER)\n\t\treturn;\n\n\tUp_delete_buffer(YY_CURRENT_BUFFER ,yyscanner);\n\tYY_CURRENT_BUFFER_LVALUE = NULL;\n\tif (yyg->yy_buffer_stack_top > 0)\n\t\t--yyg->yy_buffer_stack_top;\n\n\tif (YY_CURRENT_BUFFER) {\n\t\tUp_load_buffer_state(yyscanner );\n\t\tyyg->yy_did_buffer_switch_on_eof = 1;\n\t}\n}\n\n/* Allocates the stack if it does not exist.\n * Guarantees space for at least one push.\n */\nstatic void Upensure_buffer_stack (yyscan_t yyscanner)\n{\n\tyy_size_t num_to_alloc;\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\n\tif (!yyg->yy_buffer_stack) {\n\n\t\t/* First allocation is just for 2 elements, since we don't know if this\n\t\t * scanner will even need a stack. We use 2 instead of 1 to avoid an\n\t\t * immediate realloc on the next call.\n */\n\t\tnum_to_alloc = 1;\n\t\tyyg->yy_buffer_stack = (struct yy_buffer_state**)Upalloc\n\t\t\t\t\t\t\t\t(num_to_alloc * sizeof(struct yy_buffer_state*)\n\t\t\t\t\t\t\t\t, yyscanner);\n\t\tif ( ! yyg->yy_buffer_stack )\n\t\t\tYY_FATAL_ERROR( \"out of dynamic memory in Upensure_buffer_stack()\" );\n\t\t\t\t\t\t\t\t \n\t\tmemset(yyg->yy_buffer_stack, 0, num_to_alloc * sizeof(struct yy_buffer_state*));\n\t\t\t\t\n\t\tyyg->yy_buffer_stack_max = num_to_alloc;\n\t\tyyg->yy_buffer_stack_top = 0;\n\t\treturn;\n\t}\n\n\tif (yyg->yy_buffer_stack_top >= (yyg->yy_buffer_stack_max) - 1){\n\n\t\t/* Increase the buffer to prepare for a possible push. */\n\t\tint grow_size = 8 /* arbitrary grow size */;\n\n\t\tnum_to_alloc = yyg->yy_buffer_stack_max + grow_size;\n\t\tyyg->yy_buffer_stack = (struct yy_buffer_state**)Uprealloc\n\t\t\t\t\t\t\t\t(yyg->yy_buffer_stack,\n\t\t\t\t\t\t\t\tnum_to_alloc * sizeof(struct yy_buffer_state*)\n\t\t\t\t\t\t\t\t, yyscanner);\n\t\tif ( ! yyg->yy_buffer_stack )\n\t\t\tYY_FATAL_ERROR( \"out of dynamic memory in Upensure_buffer_stack()\" );\n\n\t\t/* zero only the new slots.*/\n\t\tmemset(yyg->yy_buffer_stack + yyg->yy_buffer_stack_max, 0, grow_size * sizeof(struct yy_buffer_state*));\n\t\tyyg->yy_buffer_stack_max = num_to_alloc;\n\t}\n}\n\n/** Setup the input buffer state to scan directly from a user-specified character buffer.\n * @param base the character buffer\n * @param size the size in bytes of the character buffer\n * @param yyscanner The scanner object.\n * @return the newly allocated buffer state object. \n */\nYY_BUFFER_STATE Up_scan_buffer (char * base, yy_size_t size , yyscan_t yyscanner)\n{\n\tYY_BUFFER_STATE b;\n \n\tif ( size < 2 ||\n\t base[size-2] != YY_END_OF_BUFFER_CHAR ||\n\t base[size-1] != YY_END_OF_BUFFER_CHAR )\n\t\t/* They forgot to leave room for the EOB's. */\n\t\treturn 0;\n\n\tb = (YY_BUFFER_STATE) Upalloc(sizeof( struct yy_buffer_state ) ,yyscanner );\n\tif ( ! b )\n\t\tYY_FATAL_ERROR( \"out of dynamic memory in Up_scan_buffer()\" );\n\n\tb->yy_buf_size = size - 2;\t/* \"- 2\" to take care of EOB's */\n\tb->yy_buf_pos = b->yy_ch_buf = base;\n\tb->yy_is_our_buffer = 0;\n\tb->yy_input_file = 0;\n\tb->yy_n_chars = b->yy_buf_size;\n\tb->yy_is_interactive = 0;\n\tb->yy_at_bol = 1;\n\tb->yy_fill_buffer = 0;\n\tb->yy_buffer_status = YY_BUFFER_NEW;\n\n\tUp_switch_to_buffer(b ,yyscanner );\n\n\treturn b;\n}\n\n/** Setup the input buffer state to scan a string. The next call to Uplex() will\n * scan from a @e copy of @a str.\n * @param yystr a NUL-terminated string to scan\n * @param yyscanner The scanner object.\n * @return the newly allocated buffer state object.\n * @note If you want to scan bytes that may contain NUL values, then use\n * Up_scan_bytes() instead.\n */\nYY_BUFFER_STATE Up_scan_string (yyconst char * yystr , yyscan_t yyscanner)\n{\n \n\treturn Up_scan_bytes(yystr,strlen(yystr) ,yyscanner);\n}\n\n/** Setup the input buffer state to scan the given bytes. The next call to Uplex() will\n * scan from a @e copy of @a bytes.\n * @param bytes the byte buffer to scan\n * @param len the number of bytes in the buffer pointed to by @a bytes.\n * @param yyscanner The scanner object.\n * @return the newly allocated buffer state object.\n */\nYY_BUFFER_STATE Up_scan_bytes (yyconst char * yybytes, yy_size_t _yybytes_len , yyscan_t yyscanner)\n{\n\tYY_BUFFER_STATE b;\n\tchar *buf;\n\tyy_size_t n, i;\n \n\t/* Get memory for full buffer, including space for trailing EOB's. */\n\tn = _yybytes_len + 2;\n\tbuf = (char *) Upalloc(n ,yyscanner );\n\tif ( ! buf )\n\t\tYY_FATAL_ERROR( \"out of dynamic memory in Up_scan_bytes()\" );\n\n\tfor ( i = 0; i < _yybytes_len; ++i )\n\t\tbuf[i] = yybytes[i];\n\n\tbuf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;\n\n\tb = Up_scan_buffer(buf,n ,yyscanner);\n\tif ( ! b )\n\t\tYY_FATAL_ERROR( \"bad buffer in Up_scan_bytes()\" );\n\n\t/* It's okay to grow etc. this buffer, and we should throw it\n\t * away when we're done.\n\t */\n\tb->yy_is_our_buffer = 1;\n\n\treturn b;\n}\n\n static void yy_push_state (int new_state , yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\tif ( yyg->yy_start_stack_ptr >= yyg->yy_start_stack_depth )\n\t\t{\n\t\tyy_size_t new_size;\n\n\t\tyyg->yy_start_stack_depth += YY_START_STACK_INCR;\n\t\tnew_size = yyg->yy_start_stack_depth * sizeof( int );\n\n\t\tif ( ! yyg->yy_start_stack )\n\t\t\tyyg->yy_start_stack = (int *) Upalloc(new_size ,yyscanner );\n\n\t\telse\n\t\t\tyyg->yy_start_stack = (int *) Uprealloc((void *) yyg->yy_start_stack,new_size ,yyscanner );\n\n\t\tif ( ! yyg->yy_start_stack )\n\t\t\tYY_FATAL_ERROR( \"out of memory expanding start-condition stack\" );\n\t\t}\n\n\tyyg->yy_start_stack[yyg->yy_start_stack_ptr++] = YY_START;\n\n\tBEGIN(new_state);\n}\n\n static void yy_pop_state (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\tif ( --yyg->yy_start_stack_ptr < 0 )\n\t\tYY_FATAL_ERROR( \"start-condition stack underflow\" );\n\n\tBEGIN(yyg->yy_start_stack[yyg->yy_start_stack_ptr]);\n}\n\n static int yy_top_state (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\treturn yyg->yy_start_stack[yyg->yy_start_stack_ptr - 1];\n}\n\n#ifndef YY_EXIT_FAILURE\n#define YY_EXIT_FAILURE 2\n#endif\n\nstatic void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner)\n{\n \t(void) fprintf( stderr, \"%s\\n\", msg );\n\texit( YY_EXIT_FAILURE );\n}\n\n/* Redefine yyless() so it works in section 3 code. */\n\n#undef yyless\n#define yyless(n) \\\n\tdo \\\n\t\t{ \\\n\t\t/* Undo effects of setting up yytext. */ \\\n int yyless_macro_arg = (n); \\\n YY_LESS_LINENO(yyless_macro_arg);\\\n\t\tyytext[yyleng] = yyg->yy_hold_char; \\\n\t\tyyg->yy_c_buf_p = yytext + yyless_macro_arg; \\\n\t\tyyg->yy_hold_char = *yyg->yy_c_buf_p; \\\n\t\t*yyg->yy_c_buf_p = '\\0'; \\\n\t\tyyleng = yyless_macro_arg; \\\n\t\t} \\\n\twhile ( 0 )\n\n/* Accessor methods (get/set functions) to struct members. */\n\n/** Get the user-defined data for this scanner.\n * @param yyscanner The scanner object.\n */\nYY_EXTRA_TYPE Upget_extra (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n return yyextra;\n}\n\n/** Get the current line number.\n * @param yyscanner The scanner object.\n */\nint Upget_lineno (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n \n if (! YY_CURRENT_BUFFER)\n return 0;\n \n return yylineno;\n}\n\n/** Get the current column number.\n * @param yyscanner The scanner object.\n */\nint Upget_column (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n \n if (! YY_CURRENT_BUFFER)\n return 0;\n \n return yycolumn;\n}\n\n/** Get the input stream.\n * @param yyscanner The scanner object.\n */\nFILE *Upget_in (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n return yyin;\n}\n\n/** Get the output stream.\n * @param yyscanner The scanner object.\n */\nFILE *Upget_out (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n return yyout;\n}\n\n/** Get the length of the current token.\n * @param yyscanner The scanner object.\n */\nyy_size_t Upget_leng (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n return yyleng;\n}\n\n/** Get the current token.\n * @param yyscanner The scanner object.\n */\n\nchar *Upget_text (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n return yytext;\n}\n\n/** Set the user-defined data. This data is never touched by the scanner.\n * @param user_defined The data to be associated with this scanner.\n * @param yyscanner The scanner object.\n */\nvoid Upset_extra (YY_EXTRA_TYPE user_defined , yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n yyextra = user_defined ;\n}\n\n/** Set the current line number.\n * @param line_number\n * @param yyscanner The scanner object.\n */\nvoid Upset_lineno (int line_number , yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\n /* lineno is only valid if an input buffer exists. */\n if (! YY_CURRENT_BUFFER )\n yy_fatal_error( \"Upset_lineno called with no buffer\" , yyscanner); \n \n yylineno = line_number;\n}\n\n/** Set the current column.\n * @param line_number\n * @param yyscanner The scanner object.\n */\nvoid Upset_column (int column_no , yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\n /* column is only valid if an input buffer exists. */\n if (! YY_CURRENT_BUFFER )\n yy_fatal_error( \"Upset_column called with no buffer\" , yyscanner); \n \n yycolumn = column_no;\n}\n\n/** Set the input stream. This does not discard the current\n * input buffer.\n * @param in_str A readable stream.\n * @param yyscanner The scanner object.\n * @see Up_switch_to_buffer\n */\nvoid Upset_in (FILE * in_str , yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n yyin = in_str ;\n}\n\nvoid Upset_out (FILE * out_str , yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n yyout = out_str ;\n}\n\nint Upget_debug (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n return yy_flex_debug;\n}\n\nvoid Upset_debug (int bdebug , yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n yy_flex_debug = bdebug ;\n}\n\n/* Accessor methods for yylval and yylloc */\n\n/* User-visible API */\n\n/* Uplex_init is special because it creates the scanner itself, so it is\n * the ONLY reentrant function that doesn't take the scanner as the last argument.\n * That's why we explicitly handle the declaration, instead of using our macros.\n */\n\nint Uplex_init(yyscan_t* ptr_yy_globals)\n\n{\n if (ptr_yy_globals == NULL){\n errno = EINVAL;\n return 1;\n }\n\n *ptr_yy_globals = (yyscan_t) Upalloc ( sizeof( struct yyguts_t ), NULL );\n\n if (*ptr_yy_globals == NULL){\n errno = ENOMEM;\n return 1;\n }\n\n /* By setting to 0xAA, we expose bugs in yy_init_globals. Leave at 0x00 for releases. */\n memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t));\n\n return yy_init_globals ( *ptr_yy_globals );\n}\n\n/* Uplex_init_extra has the same functionality as Uplex_init, but follows the\n * convention of taking the scanner as the last argument. Note however, that\n * this is a *pointer* to a scanner, as it will be allocated by this call (and\n * is the reason, too, why this function also must handle its own declaration).\n * The user defined value in the first argument will be available to Upalloc in\n * the yyextra field.\n */\n\nint Uplex_init_extra(YY_EXTRA_TYPE yy_user_defined,yyscan_t* ptr_yy_globals )\n\n{\n struct yyguts_t dummy_yyguts;\n\n Upset_extra (yy_user_defined, &dummy_yyguts);\n\n if (ptr_yy_globals == NULL){\n errno = EINVAL;\n return 1;\n }\n\t\n *ptr_yy_globals = (yyscan_t) Upalloc ( sizeof( struct yyguts_t ), &dummy_yyguts );\n\t\n if (*ptr_yy_globals == NULL){\n errno = ENOMEM;\n return 1;\n }\n \n /* By setting to 0xAA, we expose bugs in\n yy_init_globals. Leave at 0x00 for releases. */\n memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t));\n \n Upset_extra (yy_user_defined, *ptr_yy_globals);\n \n return yy_init_globals ( *ptr_yy_globals );\n}\n\nstatic int yy_init_globals (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n /* Initialization is the same as for the non-reentrant scanner.\n * This function is called from Uplex_destroy(), so don't allocate here.\n */\n\n yyg->yy_buffer_stack = 0;\n yyg->yy_buffer_stack_top = 0;\n yyg->yy_buffer_stack_max = 0;\n yyg->yy_c_buf_p = (char *) 0;\n yyg->yy_init = 0;\n yyg->yy_start = 0;\n\n yyg->yy_start_stack_ptr = 0;\n yyg->yy_start_stack_depth = 0;\n yyg->yy_start_stack = NULL;\n\n/* Defined in main.c */\n#ifdef YY_STDINIT\n yyin = stdin;\n yyout = stdout;\n#else\n yyin = (FILE *) 0;\n yyout = (FILE *) 0;\n#endif\n\n /* For future reference: Set errno on error, since we are called by\n * Uplex_init()\n */\n return 0;\n}\n\n/* Uplex_destroy is for both reentrant and non-reentrant scanners. */\nint Uplex_destroy (yyscan_t yyscanner)\n{\n struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;\n\n /* Pop the buffer stack, destroying each element. */\n\twhile(YY_CURRENT_BUFFER){\n\t\tUp_delete_buffer(YY_CURRENT_BUFFER ,yyscanner );\n\t\tYY_CURRENT_BUFFER_LVALUE = NULL;\n\t\tUppop_buffer_state(yyscanner);\n\t}\n\n\t/* Destroy the stack itself. */\n\tUpfree(yyg->yy_buffer_stack ,yyscanner);\n\tyyg->yy_buffer_stack = NULL;\n\n /* Destroy the start condition stack. */\n Upfree(yyg->yy_start_stack ,yyscanner );\n yyg->yy_start_stack = NULL;\n\n /* Reset the globals. This is important in a non-reentrant scanner so the next time\n * Uplex() is called, initialization will occur. */\n yy_init_globals( yyscanner);\n\n /* Destroy the main struct (reentrant only). */\n Upfree ( yyscanner , yyscanner );\n yyscanner = NULL;\n return 0;\n}\n\n/*\n * Internal utility routines.\n */\n\n#ifndef yytext_ptr\nstatic void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner)\n{\n\tregister int i;\n\tfor ( i = 0; i < n; ++i )\n\t\ts1[i] = s2[i];\n}\n#endif\n\n#ifdef YY_NEED_STRLEN\nstatic int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner)\n{\n\tregister int n;\n\tfor ( n = 0; s[n]; ++n )\n\t\t;\n\n\treturn n;\n}\n#endif\n\nvoid *Upalloc (yy_size_t size , yyscan_t yyscanner)\n{\n\treturn (void *) malloc( size );\n}\n\nvoid *Uprealloc (void * ptr, yy_size_t size , yyscan_t yyscanner)\n{\n\t/* The cast to (char *) in the following accommodates both\n\t * implementations that use char* generic pointers, and those\n\t * that use void* generic pointers. It works with the latter\n\t * because both ANSI C and C++ allow castless assignment from\n\t * any pointer type to void*, and deal with argument conversions\n\t * as though doing an assignment.\n\t */\n\treturn (void *) realloc( (char *) ptr, size );\n}\n\nvoid Upfree (void * ptr , yyscan_t yyscanner)\n{\n\tfree( (char *) ptr );\t/* see Uprealloc() for (char *) cast */\n}\n\n#define YYTABLES_NAME \"yytables\"\n\n#line 353 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.lex\"\n\n\n\n" }, { "alpha_fraction": 0.6825705766677856, "alphanum_fraction": 0.6825705766677856, "avg_line_length": 27.5, "blob_id": "111fc3ed35f0ed35803650bcd59ed85ff2c6e45c", "content_id": "f9e7a8602d06e549bc0b955ae8bc8dedf55be135", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1027, "license_type": "permissive", "max_line_length": 100, "num_lines": 36, "path": "/src/vm/include/UpList.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPLIST_H\n#define UP_UPLIST_H\n\n#include \"Up/UpObject.h\"\n\nstruct UpList {\n UpObject __base;\n UpObject** items;\n unsigned int itemCount;\n unsigned int itemsAllocated;\n};\n\n// *************************************************************************************************\n\nUpList* UpListCreate();\nUpList* UpListCreateWithList(UpList* self);\n\nunsigned int UpListCount(UpList* self);\nUpObject** UpListBuffer(UpList* self);\n\nUpObject* UpListGet(UpList* self, UpIndex index, UpObject* defaultValue);\nUpObject* UpListGetFront(UpList* self);\nUpObject* UpListGetBack(UpList* self);\n\nUpIndex UpListFind(UpList* self, UpObject* value);\n\nvoid UpListSet(UpList* self, UpIndex index, UpObject* value);\nUpIndex UpListAppend(UpList* self, UpObject* value);\nvoid UpListInsert(UpList* self, UpIndex index, UpObject* value);\n\nint UpListRemove(UpList* self, UpObject* value);\nint UpListRemoveAtIndex(UpList* self, UpIndex index);\nvoid UpListRemoveAll(UpList* self);\nUpObject* UpListPop(UpList* self);\n\n#endif // UP_UPLIST_H\n" }, { "alpha_fraction": 0.5133007764816284, "alphanum_fraction": 0.51927250623703, "avg_line_length": 32.48181915283203, "blob_id": "5591005d33314aedbc98028ef0036640d93fe7b0", "content_id": "f4fb852f2b580593f59089f0edda03dc6532f467", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3684, "license_type": "permissive", "max_line_length": 103, "num_lines": 110, "path": "/src/regex/UpRegex.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"UpRegex/UpRegex.h\"\n#include \"Up/Up.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpList.h\"\n#include \"Up/UpMap.h\"\n\n#include <stdio.h>\n#include <string.h>\n#include \"pcre/pcre.h\"\n\n// ************************************************************************************************\n\nstruct UpRegex {\n UpObject __base;\n pcre* re;\n};\n\n// ************************************************************************************************\n\nUpObject* UpRegexCreate(const char* pattern, bool isAnchored, bool isCaseSensitive,\n bool isMultiLine) {\n UpRegex* self = (UpRegex*)UpObjectCreateWithSize(sizeof(UpRegex));\n\n if (!pattern) {\n pattern = \"\";\n }\n\n int options = PCRE_JAVASCRIPT_COMPAT | PCRE_UTF8 | PCRE_NEWLINE_LF;\n if (isAnchored) {\n options |= PCRE_ANCHORED;\n }\n if (isCaseSensitive) {\n options |= PCRE_CASELESS;\n }\n if (isMultiLine) {\n options |= PCRE_MULTILINE;\n }\n\n const char* err;\n int erroffset;\n self->re = pcre_compile(pattern, options, &err, &erroffset, NULL);\n if (err) {\n UpSetError(\"Regular expression syntax error at offset %d\", erroffset);\n return NULL;\n }\n\n return (UpObject*)self;\n}\n\ntypedef struct {\n uint16_t size;\n char* name;\n} UpRegexNameEntry;\n\nUpObject* UpRegexMatch(UpRegex* self, const char* subject, int start, bool capture) {\n int ovector[999];\n int rc = pcre_exec(self->re, NULL, subject, strlen(subject), start, 0, ovector, 999);\n if (rc < 0) {\n // switch(rc) {\n // case PCRE_ERROR_NOMATCH : printf(\"String did not match the pattern\\n\"); break;\n // case PCRE_ERROR_NULL : printf(\"Something was null\\n\"); break;\n // case PCRE_ERROR_BADOPTION : printf(\"A bad option was passed\\n\"); break;\n // case PCRE_ERROR_BADMAGIC : printf(\"Magic number bad (compiled re corrupt?)\\n\"); break;\n // case PCRE_ERROR_UNKNOWN_NODE : printf(\"Something kooky in the compiled re\\n\"); break;\n // case PCRE_ERROR_NOMEMORY : printf(\"Ran out of memory\\n\"); break;\n // default : printf(\"Unknown error\\n\"); break;\n // }\n\n return UpNull();\n } else {\n UpList* results = UpListCreate();\n\n if (capture) {\n UpListAppend(results, (UpObject*)UpIntegerCreate(rc));\n\n for (int i = 0; i < rc*2; ++i) {\n UpInteger* value = UpIntegerCreate(ovector[i]);\n UpListAppend(results, (UpObject*)value);\n }\n\n int nameCount;\n int err = pcre_fullinfo(self->re, NULL, PCRE_INFO_NAMECOUNT, &nameCount);\n\n int entrySize;\n err = pcre_fullinfo(self->re, NULL, PCRE_INFO_NAMEENTRYSIZE, &entrySize);\n\n char* entry;\n err = pcre_fullinfo(self->re, NULL, PCRE_INFO_NAMETABLE, &entry);\n\n UpListAppend(results, (UpObject*)UpIntegerCreate(nameCount));\n\n for (int i = 0; i < nameCount; ++i) {\n uint16_t captureIndex = (entry[0] << 8) | entry[1];\n const char* captureName = (const char*)(entry + 2);\n entry += entrySize;\n\n UpListAppend(results, (UpObject*)UpIntegerCreate(captureIndex));\n UpListAppend(results, (UpObject*)UpStringCreate(captureName));\n }\n } else {\n for (int i = 0; i < 2; ++i) {\n UpInteger* value = UpIntegerCreate(ovector[i]);\n UpListAppend(results, (UpObject*)value);\n }\n\n }\n\n return (UpObject*)results;\n }\n}\n" }, { "alpha_fraction": 0.7613168954849243, "alphanum_fraction": 0.7613168954849243, "avg_line_length": 25.88888931274414, "blob_id": "9eb7649db569afce7daaf883222ac12e14f3e494", "content_id": "7a09af4f91501bec2704fff01202a0be7749b978", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 243, "license_type": "permissive", "max_line_length": 76, "num_lines": 9, "path": "/src/vm/UpTerminal.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPTERMINAL_H\n#define UP_UPTERMINAL_H\n\n#include \"Up/UpGlobal.h\"\n\nstruct termios* UpTerminalEnter(UpObject* self, FILE* stream);\nvoid UpTerminalRestore(UpObject* self, FILE* stream, struct termios* state);\n\n#endif // UP_UPTERMINAL_H\n" }, { "alpha_fraction": 0.6256157755851746, "alphanum_fraction": 0.6256157755851746, "avg_line_length": 21.55555534362793, "blob_id": "eeb31d6700dc503840278c4c8ae10449f2efd7e8", "content_id": "e6b80a476220b6f301f892f2a5ef546c102bef11", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1015, "license_type": "permissive", "max_line_length": 99, "num_lines": 45, "path": "/src/vm/include/UpFunction.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPFUNCTION_H\n#define UP_UPFUNCTION_H\n\n#include \"Up/UpObject.h\"\n#include \"Up/UpType.h\"\n\nstruct UpArgumentDef {\n UpSymbol name;\n UpType* type;\n};\n\nstruct UpFunctionDef {\n UP_PROBE_ID;\n char* name;\n char* sourcePath;\n char* moduleName;\n\tUpSyntax* ast;\n UpArray* lines;\n UpBuffer* ops;\n UpScope* scope;\n UpArray* arguments;\n UpType* returnType;\n int argsCount;\n int thisIndex;\n};\n\nstruct UpFunction {\n UpObject __base;\n UpFunctionDef* def;\n UpVariables* closure;\n UpObject* boundThis;\n};\n\n// ************************************************************************************************\n\nUpFunction* UpFunctionCreate(UpFunctionDef* def, UpVariables* closure, UpObject* boundThis);\n\nvoid UpFunctionInit(UpFunction* self, const char* source, const char* moduleName,\n const char* sourcePath, UpObject* boundThis);\n\nUpVariables* UpFunctionGetClosure(UpFunction* self);\n\nchar* UpFunctionGetName(UpFunction* self);\n\n#endif // UP_UPFUNCTION_H\n" }, { "alpha_fraction": 0.7658473253250122, "alphanum_fraction": 0.7658473253250122, "avg_line_length": 25.620689392089844, "blob_id": "b0eba244934ab55806bc09d491ff878ddb4bdc42", "content_id": "bda3d688566cae4c9731621d76a4339f9ca219c6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 773, "license_type": "permissive", "max_line_length": 66, "num_lines": 29, "path": "/src/vm/UpLibrary.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPLIBRARY_H\n#define UP_UPLIBRARY_H\n\n#include \"Up/UpGlobal.h\"\n\nUpObject* UpGetInternalObject(UpObject* object, const char* name);\n\nvoid UpTerminate(const char* reason);\n\nvoid UpPrint(UpObject* object);\n\nvoid UpSleep(unsigned int seconds);\n\nFILE* UpGetStream(char* name);\nUpObject* UpRead(UpObject* self, FILE* stream, int numberOfChars);\nUpObject* UpReadLine(UpObject* self, FILE* stream);\nint UpWrite(UpObject* object, FILE* stream);\n\ndouble UpGetRandomNumber();\n\nint UpShiftRight(int bits, int amount);\nint UpShiftRightZeroFill(int bits, int amount);\nint UpShiftLeft(int bits, int amount);\nint UpBitwiseAnd(int left, int right);\nint UpBitwiseOr(int left, int right);\nint UpBitwiseXor(int left, int right);\nint UpBitwiseNot(int bits);\n\n#endif // UP_UPLIBRARY_H\n" }, { "alpha_fraction": 0.7400809526443481, "alphanum_fraction": 0.7425101399421692, "avg_line_length": 25.276596069335938, "blob_id": "e67e0b648f9b1b1e4f10419b6472b61510626a70", "content_id": "24d44472dff9575e21aada5ffab89e8889fea8ea", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1235, "license_type": "permissive", "max_line_length": 89, "num_lines": 47, "path": "/src/vm/UpScanner.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPSCANNER_H\n#define UP_UPSCANNER_H\n\n#include \"Up/UpGlobal.h\"\n#include \"Up.tab.h\"\n\nstruct UpScanner {\n int err;\n UpSyntax* rootSyntax;\n void* yyscanner;\n\n char* source;\n char* cursor;\n char* cursorMax;\n UpArray* tokenQueue;\n int line;\n int column;\n int indentLevel;\n int lastToken;\n int lastIndent;\n UpException* error;\n\n bool containsGenerator:1;\n bool addTrailingLine:1;\n bool firstToken:1;\n};\n\nUpScanner* UpScannerCreate();\n\nvoid UpScannerSetInput(UpScanner* self, char* input);\n\nint UpScannerGetLineNumber(UpScanner* self);\nint UpScannerGetColumnNumber(UpScanner* self);\nvoid UpScannerLocate(UpScanner* self, const char* text, yyltype* yylloc);\n\nint UpScannerRead(UpScanner* self, char* buf, long int max_size);\n\nint UpScannerNext(UpScanner* self, yystype* yylval, yyltype* yylloc);\n\nvoid UpScannerSetException(UpScanner* self, UpException* exc);\nvoid UpScannerSetError(UpScanner* self, const char* description);\nvoid UpScannerSetErrorAt(UpScanner* self, const char* description, int line, int column);\nvoid UpScannerSetErrorAtNode(UpScanner* self, const char* description, UpSyntax* column);\n\nvoid UpScannerPushToken(UpScanner* self, int token);\n\n#endif // UP_UPSCANNER_H\n" }, { "alpha_fraction": 0.6189780831336975, "alphanum_fraction": 0.6211678981781006, "avg_line_length": 30.837209701538086, "blob_id": "44ced0510ff0833c8b7c86eeb6cf2aadce084a54", "content_id": "67a44ad5c675a38b8aa2c0015c3b2023ad928508", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1370, "license_type": "permissive", "max_line_length": 99, "num_lines": 43, "path": "/src/vm/UpFunction.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpFunction.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpNull.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nUpFunction* UpFunctionCreate(UpFunctionDef* def, UpVariables* closure,\n UpObject* boundThis) {\n UpFunction* self = (UpFunction*)UpObjectCreateWithClass(UP_BUILTIN(function));\n self->def = def;\n self->boundThis = boundThis;\n self->closure = closure;\n return self;\n}\n\nvoid UpFunctionInit(UpFunction* self, const char* source, const char* sourcePath,\n const char* moduleName, UpObject* boundThis) {\n UpCompileSource(source, sourcePath, moduleName, &(self->def));\n if (boundThis && !UpIsTypeOf(boundThis, UP_BUILTIN(null))) {\n self->boundThis = boundThis;\n }\n\n UpObject** variables = UpArenaNewArray(UpGetHeap(), UpObject, 1);\n variables[0] = UpGetBuiltinModule();\n\n UpVariables* locals = UpArenaNew(UpGetHeap(), UpVariables);\n locals->variables = variables;\n locals->count = 1;\n locals->previous = NULL;\n self->closure = locals;\n}\n\nUpVariables* UpFunctionGetClosure(UpFunction* self) {\n return self->closure;\n}\n\nchar* UpFunctionGetName(UpFunction* self) {\n return self->def ? self->def->name : \"\";\n}\n" }, { "alpha_fraction": 0.37442922592163086, "alphanum_fraction": 0.37442922592163086, "avg_line_length": 20.799999237060547, "blob_id": "841e832b3043f95d891e577408dd0d35148e0289", "content_id": "3bb7b71449566bc7100845547e55abbe9fc29701", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 219, "license_type": "permissive", "max_line_length": 100, "num_lines": 10, "path": "/src/vm/include/UpBool.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPBOOL_H\n#define UP_UPBOOL_H\n\n#include \"Up/UpObject.h\"\n\n// *************************************************************************************************\n\nUpObject* UpBoolCreate();\n\n#endif // UP_UPBOOL_H\n" }, { "alpha_fraction": 0.588807225227356, "alphanum_fraction": 0.5910599827766418, "avg_line_length": 33.004032135009766, "blob_id": "2dbffb4bb6abf3fd44077e19364561f314105ddb", "content_id": "5af016035c6d7a695d84aef9f6e7e34437bc78d8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 8434, "license_type": "permissive", "max_line_length": 99, "num_lines": 248, "path": "/src/vm/UpFloat.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpFloat.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpLong.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpList.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nUpFloat* UpFloatCreate(double value) {\n return UpFloatCreateWithUnit(value, UpNullSymbol);\n}\n\nUpFloat* UpFloatCreateWithUnit(double value, UpSymbol unit) {\n if (!unit && value >= UpSharedFloatMin && value <= UpSharedFloatMax) {\n int v = value;\n if (v == value) {\n UpFloat** shared = UpGetSharedFloats();\n if (shared) {\n return shared[v-UpSharedFloatMin];\n }\n }\n }\n\n UpClass* cls = UpGetBuiltinClasses()->floatClass;\n UpFloat* self = (UpFloat*)UpClassInstantiate(cls);\n self->value = value;\n self->unit = unit;\n COUNT_FLOAT(self->value);\n return self;\n}\n\nconst char* UpFloatToCString(UpFloat* self) {\n static char buf[1024];\n if (self->unit) {\n sprintf(buf, \"%.14g%s\", self->value, UpGetSymbolName(self->unit));\n } else {\n sprintf(buf, \"%.14g\", self->value);\n }\n buf[1023] = 0;\n return buf;\n}\n\nUpString* UpFloatToString(UpFloat* self) {\n const char* buf = UpFloatToCString(self);\n return UpStringCreate(buf);\n}\n\nbool UpFloatEquals(UpFloat* self, UpObject* other) {\n double result;\n if (UpAsFloat(other, &result)) {\n return self->value == result;\n } else {\n return false;\n }\n}\n\nbool UpFloatGreaterThan(UpFloat* self, UpObject* other) {\n double result;\n if (UpAsFloat(other, &result)) {\n return self->value > result;\n } else {\n return false;\n }\n}\n\nbool UpFloatGreaterThanEquals(UpFloat* self, UpObject* other) {\n double result;\n if (UpAsFloat(other, &result)) {\n return self->value >= result;\n } else {\n return false;\n }\n}\n\nbool UpFloatLessThan(UpFloat* self, UpObject* other) {\n double result;\n if (UpAsFloat(other, &result)) {\n return self->value < result;\n } else {\n return false;\n }\n}\n\nbool UpFloatLessThanEquals(UpFloat* self, UpObject* other) {\n double result;\n if (UpAsFloat(other, &result)) {\n return self->value <= result;\n } else {\n return false;\n }\n}\n\nUpObject* UpFloatAdd(UpFloat* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value + n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value + n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value + n->value, unit);\n } else {\n UpSetError(\"Invalid operand for +\");\n return NULL;\n }\n}\n\nUpObject* UpFloatSubtract(UpFloat* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value - n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value - n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value - n->value, unit);\n } else {\n UpSetError(\"Invalid operand for -\");\n return NULL;\n }\n}\n\nUpObject* UpFloatMultiply(UpFloat* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value * n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value * n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value * n->value, unit);\n } else {\n UpSetError(\"Invalid operand for *\");\n return NULL;\n }\n}\n\nUpObject* UpFloatDivide(UpFloat* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value / n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value / n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value / n->value, unit);\n } else {\n UpSetError(\"Invalid operand for /\");\n return NULL;\n }\n}\n\nUpObject* UpFloatMod(UpFloat* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(fmod(self->value, n->value), unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(fmod(self->value, n->value), unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(fmod(self->value, n->value), unit);\n } else {\n UpSetError(\"%s\", \"Invalid operand for //\");\n return NULL;\n }\n}\n\nUpObject* UpFloatPow(UpFloat* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(pow(self->value, n->value), unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(pow(self->value, n->value), unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(pow(self->value, n->value), unit);\n } else {\n UpSetError(\"Invalid operand for **\");\n return NULL;\n }\n}\n\nUpObject* UpFloatNegate(UpFloat* self) {\n return (UpObject*)UpFloatCreateWithUnit(-self->value, self->unit);\n}\n\nUpObject* UpFloatModf(UpFloat* self) {\n double left;\n double right = modf(self->value, &left);\n\n UpList* list = UpListCreate();\n UpListAppend(list, (UpObject*)UpFloatCreate(left));\n UpListAppend(list, (UpObject*)UpFloatCreate(right));\n return (UpObject*)list;\n}\n" }, { "alpha_fraction": 0.4972760081291199, "alphanum_fraction": 0.5004266500473022, "avg_line_length": 29.90060806274414, "blob_id": "821b0799aa937ba7b28a1c79908b15d91554fb2d", "content_id": "d4a5a4df7b30c38be173398222d9a2c43cd04492", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15235, "license_type": "permissive", "max_line_length": 100, "num_lines": 493, "path": "/make/metric/Analyst.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nimport types, os.path, struct, json, fnmatch, pandas\nfrom ..util import formatTable, importLocalModule\n\n# **************************************************************************************************\n\nkProbeIterate = 1\nkProbeBegin = 2\nkProbeBegin2 = 3\nkProbeEnd = 4\nkProbeEnd2 = 5\nkProbeCount = 6\nkProbeCount2 = 7\nkProbeLog = 8\nkProbeSymbol = 9\n\nsortAscending = 1\nsortDescending = 2\n\nmetricsMetadata = {\n 'name': 'Metrics',\n 'index': ['METRIC'],\n 'columns': [{'name': 'METRIC'}, {'name': 'VALUE', 'sort': sortDescending}],\n 'rows': {}\n}\n\ndefaultCatalog = {\n 'analysts': [],\n 'scripts': [],\n 'scriptRoot': ''\n}\n\n# **************************************************************************************************\n\ndef probe(probeName, summarize=False, format=None):\n def handler(cls):\n if not cls.probeNames:\n cls.probeNames = [probeName]\n else:\n cls.probeNames.append(probeName)\n \n if summarize:\n @metric(format=format)\n def measurer(self, **tables):\n total = 0\n counts = self.counts.get(probeName, {})\n for probeId,count in counts.iteritems():\n total += len(count)\n return total\n\n setattr(cls, probeName, measurer)\n return cls\n return handler\n\ndef table(fn):\n fn.isTable = True\n fn.columns = []\n fn.formats = {}\n fn.index = []\n fn.sort = {}\n return fn\n\ndef column(name, format=None, sort=None, index=False):\n def handler(fn):\n fn.columns.insert(0, name)\n fn.formats[name] = format if format else ''\n if sort:\n fn.sort[name] = sort\n if index:\n fn.index.insert(0, name)\n return fn\n return handler\n\ndef metric(fn=None, format='', persist=True):\n \"\"\" Expected to return a dictionary of numbers. \"\"\"\n\n if hasattr(fn, \"__call__\"):\n fn.isSummary = True\n fn.format = ''\n fn.isPersisted = True\n return fn\n else:\n def handler(fn):\n fn.isSummary = True\n fn.isPersisted = persist\n fn.format = format\n return fn\n return handler\n\n# **************************************************************************************************\n\nclass Analyst(object):\n probeNames = None\n repeat = False\n inputPath = None\n breaker = None\n\n @classmethod\n def scanAnalysts(self, modules):\n for module in modules:\n for analystClass in vars(module).itervalues():\n if isinstance(analystClass, types.TypeType) and issubclass(analystClass, Analyst) \\\n and analystClass not in [Analyst, ProbeAnalyst] and analystClass.probeNames:\n yield analystClass\n\n @classmethod\n def scanForAnalyst(self, modules, analystName):\n for analystClass in self.scanAnalysts(modules):\n if analystClass.__name__ == analystName:\n return analystClass\n\n @classmethod\n def scanCatalog(self, modules):\n catalog = dict(defaultCatalog)\n analysts = catalog['analysts']\n\n for analystClass in self.scanAnalysts(modules):\n analyst = analystClass()\n\n tables = [name for name,tabulator in analyst.tabulators]\n metrics = [name for name,measurer in analyst.measurers]\n if tables or metrics:\n analysts.append({\n 'name': analyst.analystName,\n 'repeat': analyst.repeat,\n 'probes': analyst.probeNames,\n 'tables': tables,\n 'metrics': metrics,\n })\n\n scriptsPath = os.path.normpath(os.path.join(__file__, '..', '..', '..', 'metrics'))\n catalog['scriptRoot'] = scriptsPath\n\n scripts = catalog['scripts']\n for root, dirs, files in os.walk(scriptsPath):\n for fileName in files:\n sourcePath = os.path.join(root, fileName)\n if fnmatch.fnmatch(sourcePath, \"*.up\"):\n scripts.append(sourcePath)\n\n return catalog\n\n @property\n def analystName(self):\n return self.__class__.__name__\n\n @property\n def tabulators(self):\n for name in dir(self):\n tabulator = getattr(self, name)\n if getattr(tabulator, \"isTable\", None):\n yield name,tabulator\n\n @property\n def measurers(self):\n for name in dir(self):\n measurer = getattr(self, name)\n if getattr(measurer, \"isSummary\", None):\n yield name,measurer\n\n def analyze(self, inputPath=None):\n pass\n\n def tabulate(self):\n pass\n\n def summarize(self, **tables):\n pass\n\n def finalize(self, **metrics):\n pass\n\n def saveTable(self, tableName, tabulator, table):\n analystsDirPath = os.path.dirname(self.inputPath)\n\n tablePath = os.path.join(analystsDirPath, \"%s.csv\" % tableName)\n f = open(tablePath, 'w')\n table.to_csv(f)\n f.close()\n\n metadataPath = os.path.join(analystsDirPath, \"%s.json\" % tableName)\n entry = self._entryForTable(tabulator)\n\n metadata = json.dumps(entry)\n f = open(metadataPath, 'w')\n f.write(metadata)\n f.close()\n\n def saveMetrics(self, metrics):\n analystsPath = os.path.dirname(self.inputPath)\n\n tablePath = os.path.join(analystsPath, \"Metrics.csv\")\n metricMap = openTableAsMap(tablePath)\n\n # XXXjoe Will DataFrame.combine_first() do all this work for me?\n rows = []\n for metricName,(measurer, metric) in metrics.iteritems():\n if measurer.isPersisted:\n metricMap[metricName] = metric\n\n for s,v in metricMap.iteritems():\n rows.append((s,v))\n\n columns = ['METRIC', 'VALUE']\n table = pandas.DataFrame.from_records(rows, columns=columns, index='METRIC')\n f = open(tablePath, 'w')\n table.to_csv(f)\n f.close() \n\n metadataPath = os.path.join(analystsPath, \"Metrics.json\")\n if os.path.isfile(metadataPath):\n entry = self._loadTable(metadataPath)\n else:\n entry = metricsMetadata\n\n metricRows = entry['rows']\n for metricName,(measurer, metric) in metrics.iteritems():\n if measurer.isPersisted:\n metricRows[metricName] = measurer.format\n\n metadata = json.dumps(entry)\n f = open(metadataPath, 'w')\n f.write(metadata)\n f.close()\n\n def _loadTable(self, tablePath):\n f = open(tablePath, 'r')\n text = f.read()\n f.close()\n\n return json.loads(text)\n\n def _entryForTable(self, tabulator):\n columns = []\n for name in tabulator.columns:\n column = {'name': name}\n if name in tabulator.sort:\n column['sort'] = tabulator.sort[name]\n if name in tabulator.formats:\n column['format'] = tabulator.formats[name]\n columns.append(column)\n\n return {\n 'name': tabulator.__name__,\n 'columns': columns,\n 'index': tabulator.index\n }\n\n# **************************************************************************************************\n\nclass ProbeAnalyst(Analyst):\n def stackTrace(self, stack):\n pass\n\n def analyze(self, inputPath=None):\n self.inputPath = inputPath\n\n try:\n probeFile = open(inputPath, 'rb')\n except Exception,exc:\n raise Exception(\"Probes file not found at %s\" % inputPath)\n\n tableReps = []\n metricReps = []\n\n probeBuffer = probeFile.read()\n probeOffset = 0\n\n while True:\n probeOffset = self._readProbes(probeBuffer, probeOffset)\n tables, metrics = self._analyzeProbes()\n tableReps.append(tables)\n metricReps.append(metrics)\n\n if probeOffset == -1:\n break\n\n probeFile.close()\n\n repeatCount = len(tableReps)\n if len(tableReps) > 1:\n tables, metrics = self._combineRepetitions(tableReps, metricReps)\n \n for name,(tabulator,table) in tables.iteritems():\n self.saveTable(name, tabulator, table)\n\n self.saveMetrics(metrics)\n\n def _readProbes(self, buf, offset):\n logSize,done, = struct.unpack_from('IB', buf, offset)\n offset += 5\n\n symbolMap = {}\n startMaps = {}\n stackMaps = {}\n timeMaps = {}\n countMaps = {}\n\n self.times = timeMaps\n self.counts = countMaps\n\n breaker = self.breaker\n\n bufferSize = len(buf)\n while offset < bufferSize:\n b, = struct.unpack_from('B', buf, offset)\n if b == 0:\n return -1\n elif b == kProbeIterate:\n offset += 1\n return offset\n elif b == kProbeBegin:\n entry = struct.unpack_from('BIId', buf, offset)\n _,n,d,time = entry\n offset += 24\n\n probeName = symbolMap.get(n, \"\")\n probeId = symbolMap.get(d, \"\")\n time = int(time)*1e-6\n\n if probeName not in stackMaps:\n stackMaps[probeName] = [probeId]\n else:\n stackMaps[probeName].append(probeId)\n\n if probeName not in startMaps:\n starts = startMaps[probeName] = {}\n else:\n starts = startMaps[probeName]\n\n if probeId not in starts:\n starts[probeId] = [time]\n else:\n starts[probeId].append(time)\n elif b == kProbeEnd:\n entry = struct.unpack_from('BIId', buf, offset)\n _,n,d,time = entry\n offset += 24\n\n probeName = symbolMap.get(n, \"\")\n probeId = symbolMap.get(d, \"\")\n time = int(time)*1e-6\n\n start = startMaps[probeName][probeId].pop()\n elapsed = time - start\n\n stack = stackMaps[probeName]\n stack.pop()\n\n # For recursion, only count the outermost time\n if probeId in stack:\n elapsed = 0\n\n if probeName not in timeMaps:\n times = timeMaps[probeName] = {}\n else:\n times = timeMaps[probeName]\n\n if probeId not in times:\n times[probeId] = [elapsed]\n else:\n times[probeId].append(elapsed)\n elif b == kProbeCount:\n _,n,d,data = struct.unpack_from('BIId', buf, offset)\n offset += 24\n\n probeName = symbolMap.get(n, \"\")\n probeId = symbolMap.get(d, \"\")\n\n # if breaker:\n # stackTraceProbe = breaker(b, (probeName, probeId, data))\n # if stackTraceProbe:\n # stack = stackMaps.get(stackTraceProbe, [])\n # self.stackTrace(stack)\n # # break\n\n if probeName not in countMaps:\n counts = countMaps[probeName] = {}\n else:\n counts = countMaps[probeName]\n\n if probeId not in counts:\n counts[probeId] = [data]\n else:\n counts[probeId].append(data)\n elif b == kProbeCount2:\n _,n,d,d2,data = struct.unpack_from('BIIId', buf, offset)\n offset += 24\n\n probeName = symbolMap.get(n, \"\")\n probeId = symbolMap.get(d, \"\")\n probeId2 = symbolMap.get(d2, \"\")\n\n # if breaker:\n # stackTraceProbe = breaker(b, (probeName, probeId, probeId2, data))\n # if stackTraceProbe:\n # stack = stackMaps.get(stackTraceProbe, [])\n # self.stackTrace(stack)\n # # break\n\n if probeName not in countMaps:\n counts = countMaps[probeName] = {}\n else:\n counts = countMaps[probeName]\n\n key = (probeId,probeId2)\n if key not in counts:\n counts[key] = [data]\n else:\n counts[key].append(data)\n elif b == kProbeLog:\n _,n,length = struct.unpack_from('BII', buf, offset)\n offset += 12\n\n content = buf[offset:offset+length]\n offset += length\n print content\n elif b == kProbeSymbol:\n _,symbol,length = struct.unpack_from('BII', buf, offset)\n offset += 12\n\n content = buf[offset:offset+length]\n offset += length\n \n symbolMap[symbol] = content\n\n # print \"%s:: '%s'\" % (symbol, content)\n else:\n pass\n # sys.stderr.write(\"Unrecognized probe %s\" % b)\n\n return -1\n\n def _analyzeProbes(self):\n self.tabulate()\n\n tables = {}\n tabulatorTables = {}\n for name,tabulator in self.tabulators:\n table = tabulator()\n tables[name] = (tabulator, table)\n tabulatorTables[name] = table\n\n self.summarize(**tabulatorTables)\n\n metrics = {}\n for name,measurer in self.measurers:\n metric = measurer(**tabulatorTables)\n metrics[name] = (measurer, metric)\n\n self.finalize(**metrics)\n\n return tables, metrics\n\n def _combineRepetitions(self, tableReps, metricReps):\n tableAvgs = {}\n metricAvgs = {}\n\n repeatCount = len(tableReps)\n\n for tables in tableReps:\n for tableName in tables:\n if tableName in tableAvgs:\n tabler,next = tables[tableName]\n tabler,avg = tableAvgs[tableName]\n\n for name in avg.columns:\n col = pandas.Series([min(a,b) for a,b in zip(avg[name], next[name])])\n col.index = avg.index\n avg[name] = col\n else:\n tableAvgs[tableName] = tables[tableName]\n\n for metrics in metricReps:\n for metricName in metrics:\n if metricName in metricAvgs:\n measurer,next = metrics[metricName]\n measurer,avg = metricAvgs[metricName]\n metricAvgs[metricName] = (measurer,min(avg, next))\n else:\n metricAvgs[metricName] = metrics[metricName]\n\n return tableAvgs, metricAvgs\n\n# **************************************************************************************************\n\ndef openTableAsMap(tablePath):\n tableMap = {}\n if os.path.isfile(tablePath):\n table = pandas.DataFrame.from_csv(tablePath)\n for columnName,series in table.iterrows():\n for value in series:\n tableMap[columnName] = value\n return tableMap\n" }, { "alpha_fraction": 0.567394495010376, "alphanum_fraction": 0.5722233653068542, "avg_line_length": 32.65724563598633, "blob_id": "0b345518f721f74687505bfe7f498494b41b42e4", "content_id": "f7aafd6fc9f40a8e21e279db23f1f7baf994fd49", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 9526, "license_type": "permissive", "max_line_length": 99, "num_lines": 283, "path": "/src/vm/UpLong.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpLong.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpFloat.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nstatic char* _EncodeInteger(uint64_t n, uint8_t base, bool pad, char* buf, int bufsize) {\n static char* bases = \"0123456789abcdefghijklmnopqrstuvwxyz\";\n\n char* p = buf + bufsize -1;\n *p-- = '\\0';\n\n while (n) {\n int x = n % base;\n *p-- = bases[x];\n n /= base;\n }\n\n if (pad) {\n return p;\n } else {\n return p+1;\n }\n}\n\nstatic char* _ToString(UpLong* self, int base, char* buf, int bufsize) {\n if (base == 10 || base == 8 || base == 16) {\n if (self->unit) {\n sprintf(buf, base == 10 ? \"%lliL%s\" : (base == 8 ? \"%lloL%s\" : \"%llxL%s\"), self->value,\n UpGetSymbolName(self->unit));\n } else {\n sprintf(buf, base == 10 ? \"%lldL\" : (base == 8 ? \"%lloL\" : \"%llxL\"), self->value);\n }\n return buf;\n } else {\n static char buf2[sizeof(self->value)*8+1];\n char* encoded = _EncodeInteger(self->value, base, false, buf2, sizeof(buf2));\n if (self->unit) {\n sprintf(buf, \"%s%s\", encoded, UpGetSymbolName(self->unit));\n return buf;\n } else {\n return encoded;\n }\n }\n}\n\n// ************************************************************************************************\n\nUpLong* UpLongCreate(long long value) {\n return UpLongCreateWithUnit(value, UpNullSymbol);\n}\n\nUpLong* UpLongCreateWithUnit(long long value, UpSymbol unit) {\n if (!unit && value >= UpSharedLongMin && value <= UpSharedLongMax) {\n UpLong** shared = UpGetSharedLongs();\n if (shared) {\n return shared[value-UpSharedLongMin];\n }\n }\n\n UpClass* cls = UpGetBuiltinClasses()->longClass;\n UpLong* self = (UpLong*)UpClassInstantiate(cls);\n self->value = value;\n self->unit = unit;\n COUNT_LONG(self->value);\n return self;\n}\n\nvoid UpLongInit(UpLong* self, const char* value, int base, const char* unit) {\n self->value = strtoll(value, NULL, base);\n if (unit) {\n self->unit = UpGetSymbol(unit);\n }\n}\n\nconst char* UpLongToCString(UpLong* self, int base) {\n static char buf[sizeof(self->value)*8+1];\n return _ToString(self, base, buf, sizeof(buf));\n}\n\nUpString* UpLongToString(UpLong* self, int base) {\n if (base > 36) {\n UpSetError(\"Base exceeds maximum of 36 for string conversion\");\n return NULL;\n }\n\n char buf[sizeof(self->value)*8+1];\n char* encoded = _ToString(self, base, buf, sizeof(buf));\n return UpStringCreate(encoded);\n}\n\nbool UpLongEquals(UpLong* self, UpObject* other) {\n long long result;\n if (UpAsLong(other, &result)) {\n return self->value == result;\n } else {\n return false;\n }\n}\n\nbool UpLongGreaterThan(UpLong* self, UpObject* other) {\n long long result;\n if (UpAsLong(other, &result)) {\n return self->value > result;\n } else {\n return false;\n }\n}\n\nbool UpLongGreaterThanEquals(UpLong* self, UpObject* other) {\n long long result;\n if (UpAsLong(other, &result)) {\n return self->value >= result;\n } else {\n return false;\n }\n}\n\nbool UpLongLessThan(UpLong* self, UpObject* other) {\n long long result;\n if (UpAsLong(other, &result)) {\n return self->value < result;\n } else {\n return false;\n }\n}\n\nbool UpLongLessThanEquals(UpLong* self, UpObject* other) {\n long long result;\n if (UpAsLong(other, &result)) {\n return self->value <= result;\n } else {\n return false;\n }\n}\n\nUpObject* UpLongAdd(UpLong* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value + n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value + n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value + n->value, unit);\n } else {\n UpSetError(\"Invalid operand for +\");\n return NULL;\n }\n}\n\nUpObject* UpLongSubtract(UpLong* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value - n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value - n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value - n->value, unit);\n } else {\n UpSetError(\"Invalid operand for -\");\n return NULL;\n }\n}\n\nUpObject* UpLongMultiply(UpLong* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value * n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value * n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value * n->value, unit);\n } else {\n UpSetError(\"Invalid operand for *\");\n return NULL;\n }\n}\n\nUpObject* UpLongDivide(UpLong* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value / n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value / n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(self->value / n->value, unit);\n } else {\n UpSetError(\"Invalid operand for /\");\n return NULL;\n }\n}\n\nUpObject* UpLongMod(UpLong* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value % n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(self->value % n->value, unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n if (n->value == 0) {\n UpSetError(\"Divide by zero\");\n return NULL;\n }\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(fmod(self->value, n->value), unit);\n } else {\n UpSetError(\"%s\", \"Invalid operand for //\");\n return NULL;\n }\n}\n\nUpObject* UpLongPow(UpLong* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(long))) {\n UpLong* n = (UpLong*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(pow(self->value, n->value), unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(integer))) {\n UpInteger* n = (UpInteger*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpLongCreateWithUnit(pow(self->value, n->value), unit);\n } else if (UpIsTypeOf(other, UP_BUILTIN(float))) {\n UpFloat* n = (UpFloat*)other;\n UpSymbol unit = self->unit ? self->unit : n->unit;\n return (UpObject*)UpFloatCreateWithUnit(pow(self->value, n->value), unit);\n } else {\n UpSetError(\"Invalid operand for **\");\n return NULL;\n }\n}\n\nUpObject* UpLongNegate(UpLong* self) {\n return (UpObject*)UpLongCreateWithUnit(-self->value, self->unit);\n}\n" }, { "alpha_fraction": 0.5302056670188904, "alphanum_fraction": 0.5308483242988586, "avg_line_length": 26.76785659790039, "blob_id": "f4689f43e1b56e26f9a8cad61d2836a45825e525", "content_id": "40c463e6f26ca5dfb54f4b65233774f4df47d01b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1556, "license_type": "permissive", "max_line_length": 99, "num_lines": 56, "path": "/src/vm/UpCLibrary.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpCLibrary.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpArena.h\"\n#include \"UpArray.h\"\n\n#include \"dlfcn.h\"\n\n// ************************************************************************************************\n\nstatic void _LoadLibrary(UpCLibrary* self, const char* name) {\n UpArray* searchPaths = UpGetSearchPaths();\n UpArena* heap = UpGetHeap();\n\n int searchCount = UpArrayCount(searchPaths);\n for (int i = 0; i < searchCount; ++i) {\n const char* searchPath;\n UpArrayGet(searchPaths, i, &searchPath);\n char* modulePath = UpArenaFormatString(heap, \"%s/%s.so\", searchPath, name);\n \n void* library = dlopen(modulePath, RTLD_LAZY);\n if (library) {\n self->path = modulePath;\n self->library = library;\n break;\n }\n }\n}\n\n// ************************************************************************************************\n\nUpCLibrary* UpCLibraryCreate(const char* name) {\n UpCLibrary* self = UpArenaNew(UpGetHeap(), UpCLibrary);\n self->name = UpArenaCopyString(UpGetHeap(), name);\n return self;\n}\n\nvoid UpCLibraryClose(UpCLibrary* self) {\n if (self->library) {\n dlclose(self->library);\n self->library = NULL;\n }\n}\n\nvoid* UpCLibraryLoad(UpCLibrary* self, const char* name) {\n if (!self->library && self->name) {\n _LoadLibrary(self, self->name);\n }\n\n if (self->library) {\n return dlsym(self->library, name);\n } else {\n return dlsym(RTLD_SELF, name);\n }\n}\n" }, { "alpha_fraction": 0.5290685892105103, "alphanum_fraction": 0.5322872400283813, "avg_line_length": 24.487178802490234, "blob_id": "0a2aa907e785449dae83e2258cc23313995dd08b", "content_id": "dfb57a56b0da0e5c62bdaad2c94cad7f8b590707", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4971, "license_type": "permissive", "max_line_length": 100, "num_lines": 195, "path": "/src/vm/UpLibrary.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpGlobal.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpObject.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpFunction.h\"\n#include \"Up/UpCFunction.h\"\n#include \"Up/UpCPointer.h\"\n#include \"Up/UpException.h\"\n#include \"Up/UpNull.h\"\n#include \"Up/UpBool.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpLong.h\"\n#include \"Up/UpFloat.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpList.h\"\n#include \"Up/UpMap.h\"\n#include \"UpIntTable.h\"\n\n#include <sys/time.h>\n\n// ************************************************************************************************\n\nconst int kReadBufferSize = 8096;\n\n// ************************************************************************************************\n\nUpObject* UpGetInternalObject(UpObject* object, const char* name) {\n if (!strcmp(name, \"undefined\")) {\n return UpUndefined();\n } else if (!strcmp(name, \"null\")) {\n return UpNull();\n } else if (!strcmp(name, \"true\")) {\n return UpTrue();\n } else if (!strcmp(name, \"false\")) {\n return UpFalse();\n } else if (!strcmp(name, \"closed\")) {\n return UpClosed();\n } else if (!strcmp(name, \"eval\")) {\n return UpEval();\n } else {\n return UpUndefined();\n }\n}\n\nvoid UpTerminate(const char* reason) {\n fprintf(stderr, \"%s\\n\", reason);\n exit(1);\n}\n\n// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n\nvoid UpPrint(UpObject* object) {\n if (object) {\n if (!UpIsTypeOf(object, UP_BUILTIN(string))) {\n UpSetError(\"Illegal print with non-string\");\n return;\n }\n\n UpString* str = (UpString*)object;\n\n if (UpProbeLogEnabled) {\n DO_LOG(str->value);\n } else {\n FILE* stream = UpGetLogStream();\n fputs(str->value, stream);\n fputs(\"\\n\", stream);\n fflush(stdout);\n }\n } else {\n UpSetError(\"Illegal print\");\n }\n}\n\nvoid UpSleep(unsigned int seconds) {\n sleep(seconds);\n}\n\n// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n\nFILE* UpGetStream(char* name) {\n if (!strcmp(name, \"stdin\")) {\n return stdin;\n } else if (!strcmp(name, \"stdout\")) {\n return stdout;\n } else if (!strcmp(name, \"stderr\")) {\n return stderr;\n } else {\n return NULL;\n }\n}\n\nUpObject* UpRead(UpObject* self, FILE* stream, int numberOfChars) {\n if (!stream) {\n UpSetError(\"Can't read from stream\");\n return NULL;\n }\n\n char buf[kReadBufferSize];\n // XXXjoe Reallocate buffer if smaller than numberOfChars\n int len = fread(buf, sizeof(char), numberOfChars > 0 ? numberOfChars : kReadBufferSize, stream);\n\n if (len > 0) {\n return (UpObject*)UpStringCreateWithLength(buf, len);\n } else {\n return UpNull();\n }\n}\n\nUpObject* UpReadLine(UpObject* self, FILE* stream) {\n char buf[kReadBufferSize];\n char* result = fgets(buf, kReadBufferSize, stream);\n // XXXjoe If result doesn't end in newline, allocate another buffer and keep reading\n if (result) {\n return (UpObject*)UpStringCreate(buf);\n } else {\n UpSetError(\"Unable to read line\");\n return NULL;\n }\n}\n\nchar UpReadCharacter(UpObject* self, FILE* stream) {\n char c;\n int len = fread(&c, sizeof(char), 1, stream);\n if (len) {\n return c;\n } else {\n UpSetError(\"Unable to read character\");\n return -1;\n }\n}\n\nint UpWrite(UpObject* object, FILE* stream) {\n if (object) {\n if (!UpIsTypeOf(object, UP_BUILTIN(string))) {\n UpSetError(\"Illegal print with non-string\");\n return 0;\n }\n\n UpString* str = (UpString*)object;\n int len = UpStringGetLength(str);\n return fwrite(str->value, sizeof(char), len / sizeof(char), stream);\n } else {\n return 0;\n }\n}\n\nint UpWriteCharacter(UpObject* self, FILE* stream, char code) {\n return fwrite(&code, sizeof(char), 1, stream);\n}\n\n// ************************************************************************************************\n\ndouble UpGetRandomNumber() {\n struct timeval t1;\n gettimeofday(&t1, NULL);\n srand(t1.tv_usec * t1.tv_sec);\n return (double)rand() / (double)RAND_MAX;\n}\n\nint UpShiftRight(int bits, int amount) {\n return bits >> amount;\n}\n\nint UpShiftRightZeroFill(int bits, int amount) {\n return (unsigned int)bits >> amount;\n}\n\nint UpShiftLeft(int bits, int amount) {\n return bits << amount;\n}\n\nint UpBitwiseAnd(int left, int right) {\n return left & right;\n}\n\nint UpBitwiseOr(int left, int right) {\n return left | right;\n}\n\nint UpBitwiseXor(int left, int right) {\n return left ^ right;\n}\n\nint UpBitwiseNot(int bits) {\n return ~bits;\n}\n\n// ************************************************************************************************\n\n#include \"UpTerminal.h\"\n\nUP_EXPORT_SYMBOL(UpTerminalEnter)\nUP_EXPORT_SYMBOL(UpTerminalRestore)\n" }, { "alpha_fraction": 0.4935064911842346, "alphanum_fraction": 0.4958375096321106, "avg_line_length": 31.1122989654541, "blob_id": "27db5c437cb7410e1bfc28b19304d1395f9bd414", "content_id": "537ab383293c1a4a776f20d6cd93e2effde86754", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6006, "license_type": "permissive", "max_line_length": 99, "num_lines": 187, "path": "/src/up/Up.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"Up/Up.h\"\n#include <string.h>\n#include <stdio.h>\n\n#include <sys/ioctl.h>\n#include <termios.h>\n\n// ************************************************************************************************\n\nconst char* kInputPath = \"<no file>\";\nconst char* kSourceName = \"<source>\";\n\ntypedef enum {\n RunModeFile,\n RunModeModule,\n RunModeSource\n} RunMode;\n\ntypedef enum {\n DebugModeNone,\n DebugModeAST,\n DebugModeCompile,\n DebugModeBytecode\n} DebugMode;\n\nconst char* helpDocs = \\\n\"usage: up [option] ... [-c cmd | -m mod | file] [arg] ...\\n\" \\\n\" --probe the name of a probe to enable\\n\" \\\n\" --dump a path to write probes to\\n\" \\\n\" --version print the Up version\";\n\n// ************************************************************************************************\n\nstatic UpStatus _Main(const char* target, RunMode runMode, DebugMode debugMode) {\n if (!target) {\n runMode = RunModeModule;\n target = \"repl\";\n }\n\n UpStatus success = UpFailure;\n\n const char* targetName = \"\";\n if (!debugMode) {\n UpObject* result;\n if (runMode == RunModeFile) {\n targetName = UpGetFileConstructorName(target);\n success = UpExecuteFile(target, targetName, true, &result);\n } else if (runMode == RunModeModule) {\n targetName = UpGetFileConstructorName(target);\n success = UpExecuteModule(target, &result);\n } else if (runMode == RunModeSource) {\n targetName = kSourceName;\n success = UpExecuteSource(target, kInputPath, kSourceName, &result);\n }\n } else {\n if (debugMode == DebugModeAST) {\n UpPushCompilation(kInputPath, targetName);\n\n UpSyntax* ast;\n if (runMode == RunModeFile) {\n success = UpParseFile(target, &ast);\n } else if (runMode == RunModeModule) {\n success = UpParseModule(target, &ast);\n } else if (runMode == RunModeSource) {\n success = UpParseSource(target, &ast);\n }\n\n UpPopCompilation(UPCTX);\n\n if (success) {\n if (debugMode == DebugModeAST) {\n printf(\"%s\\n\", UpSyntaxToXML(ast));\n }\n }\n } else {\n UpFunctionDef* def = NULL;\n UpClassDef* classDef = NULL;\n if (runMode == RunModeFile) {\n success = UpCompileFile(target, targetName, false, &def, &classDef);\n } else if (runMode == RunModeModule) {\n success = UpCompileModule(target, &def, &classDef);\n } else if (runMode == RunModeSource) {\n success = UpCompileSource(target, kInputPath, targetName, &def);\n }\n\n if (success) {\n if (debugMode == DebugModeCompile) {\n if (classDef) {\n printf(\"%s\\n\", UpClassDefToString(classDef));\n } else if (def) {\n printf(\"%s\\n\", UpFunctionDefToString(def));\n }\n } else if (debugMode == DebugModeBytecode) {\n printf(\"%s\\n\", UpBytecodeToString(def));\n }\n }\n }\n }\n\n return success;\n}\n\n// ************************************************************************************************\n\nint main(int argc, char *argv[]) {\n RunMode runMode = RunModeFile;\n DebugMode debugMode = DebugModeNone;\n char* target = NULL;\n\n bool expectSource = false;\n bool expectModule = false;\n bool expectDebug = false;\n bool expectProbe = false;\n bool expectDump = false;\n bool disableEventLoop = false;\n bool putVersion = false;\n bool putHelp = false;\n\n UpContext* context = UpContextCreate();\n UpSwitchContext(context);\n UpSetLogStream(stdout);\n\n for (int i = 1; i < argc; ++i) {\n if (expectSource) {\n runMode = RunModeSource;\n target = argv[i];\n expectSource = false;\n } else if (expectModule) {\n runMode = RunModeModule;\n target = argv[i];\n expectModule = false;\n } else if (expectDebug) {\n char* modeName = argv[i];\n if (!strcmp(modeName, \"ast\")) {\n debugMode = DebugModeAST;\n } else if (!strcmp(modeName, \"compile\")) {\n debugMode = DebugModeCompile;\n } else if (!strcmp(modeName, \"bytecode\")) {\n debugMode = DebugModeBytecode;\n }\n expectDebug = false;\n } else if (expectProbe) {\n char* probe = argv[i];\n UpEnableProbe(probe, true);\n expectProbe = false;\n } else if (expectDump) {\n char* probesPath = argv[i];\n UpSetProbeDumpPath(probesPath);\n expectDump = false;\n } else if (strcmp(argv[i], \"-c\") == 0) {\n expectSource = true;\n } else if (strcmp(argv[i], \"-m\") == 0) {\n expectModule = true;\n } else if (strcmp(argv[i], \"--version\") == 0) {\n putVersion = true;\n } else if (strcmp(argv[i], \"--help\") == 0) {\n putHelp = true;\n } else if (strcmp(argv[i], \"--debug\") == 0) {\n expectDebug = true;\n } else if (strcmp(argv[i], \"--probe\") == 0) {\n expectProbe = true;\n } else if (strcmp(argv[i], \"--dump\") == 0) {\n expectDump = true;\n } else if (strcmp(argv[i], \"--disableEventLoop\") == 0) {\n disableEventLoop = true;\n } else if (i == argc-1) {\n runMode = RunModeFile;\n target = argv[i];\n }\n }\n\n if (disableEventLoop) {\n UpDisableEventLoop(true);\n }\n\n if (putVersion) {\n printf(\"Up %s\\n\", UpGetVersion());\n return 0;\n } else if (putHelp) {\n puts(helpDocs);\n return 0;\n }\n\n UpStatus status = _Main(target, runMode, debugMode);\n UpContextShutdown(context, status);\n return status ? 0 : 1;\n}\n" }, { "alpha_fraction": 0.7068443298339844, "alphanum_fraction": 0.7068443298339844, "avg_line_length": 34.10638427734375, "blob_id": "4df277da0cad59a36c5f19498681a94d9093d259", "content_id": "b2b0a489f83c586daa801a50ccebb0ce35ecb7c1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1651, "license_type": "permissive", "max_line_length": 100, "num_lines": 47, "path": "/src/vm/include/UpObject.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPOBJECT_H\n#define UP_UPOBJECT_H\n\n#include \"Up/UpGlobal.h\"\n\nstruct UpObject {\n UpClass* cls;\n UpIntTable* properties;\n};\n\n// *************************************************************************************************\n\nUpObject* UpObjectCreate();\nUpObject* UpObjectCreateWithClass(UpClass* cls);\nUpObject* UpObjectCreateWithSize(size_t size);\n\nbool UpAsTruth(UpObject* self);\nbool UpAsInt(UpObject* self, int* out);\nbool UpAsLong(UpObject* self, long long* out);\nbool UpAsFloat(UpObject* self, double* out);\n\nUpStatus UpIs(UpObject* self, UpObject* other, bool* out);\nbool UpIsTypeOf(UpObject* self, UpClass* cls);\nbool UpIsClass(UpObject* self);\n\nUpClassDef* UpObjectGetClassDef(UpObject* self);\nchar* UpObjectGetClassName(UpObject* self);\nUpClass* UpObjectGetClass(UpObject* self);\n\nUpObject* UpObjectGet(UpObject* self, UpSymbol name);\nbool UpObjectHas(UpObject* self, UpSymbol name);\nvoid UpObjectSet(UpObject* self, UpSymbol name, UpObject* value);\nvoid UpObjectValidate(UpObject* self, UpSymbol name, UpObject* value);\nUpObject* UpObjectUnset(UpObject* self, UpSymbol name);\n\nUpList* UpObjectGetProperties(UpObject*);\n\nUpStatus UpObjectGetGetter(UpObject* self, UpSymbol name, bool create, UpProperty** outProperty,\n UpGetterDef** outGetterDef, UpClass** outClass);\nUpStatus UpObjectGetSetter(UpObject* self, UpSymbol name, bool isSet, UpProperty** outProperty,\n UpClass** outClass, UpFunctionDef** outDef);\n\nbool UpObjectEquality(UpObject* self, UpObject* other);\nUpHash UpObjectHash(UpObject* self);\nUpHash UpGetBuiltinHash(UpObject* object);\n\n#endif // UP_UPOBJECT_H\n" }, { "alpha_fraction": 0.6292017102241516, "alphanum_fraction": 0.6292017102241516, "avg_line_length": 30.700000762939453, "blob_id": "77ad1fb4a0e7801ca03361491fa082eaff70b8c9", "content_id": "cca824ae58ade969498bd493a2d2a016f3b5b681", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 952, "license_type": "permissive", "max_line_length": 99, "num_lines": 30, "path": "/src/vm/UpEvents.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpEvents.h\"\n#include \"UpTask.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nUpEvent* UpCreateNullEvent(UpArena* heap) {\n UpEvent* self = UpArenaNew(heap, UpEvent);\n self->eventType = UpEventNone;\n return self;\n}\n\nUpEvent* UpCreateResumeEvent(UpArena* heap, UpTask* task, char* cursor, bool isImmediate) {\n UpEvent* self = UpArenaNew(heap, UpEvent);\n self->task = task;\n self->eventType = isImmediate ? UpEventResumeTaskImmediate : UpEventResumeTask;\n if (cursor) {\n task->callFrame->cursor = cursor;\n }\n return self;\n}\n\nUpEvent* UpCreateStartEvent(UpArena* heap, UpFunction* function, bool isImmediate) {\n UpEvent* self = UpArenaNew(heap, UpEvent);\n self->function = function;\n self->eventType = isImmediate ? UpEventStartTaskImmediate : UpEventStartTask;\n return self;\n}\n" }, { "alpha_fraction": 0.5375859141349792, "alphanum_fraction": 0.5375859141349792, "avg_line_length": 31.10344886779785, "blob_id": "32571bd774805c7de2d2e6da1feedccc53684ce3", "content_id": "8e7c23439cd7a2c9158e8f6693cedd1b343637d6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4656, "license_type": "permissive", "max_line_length": 103, "num_lines": 145, "path": "/src/vm/UpScheduler.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpScheduler.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpArena.h\"\n#include \"UpTask.h\"\n#include \"UpEvents.h\"\n\n// ************************************************************************************************\n\nstruct UpScheduleItem {\n UpEvent* event;\n UpObject* input;\n UpScheduleItem* next;\n};\n\n// ************************************************************************************************\n\nstatic UpStatus UpSchedulerDo(UpScheduler* self, UpEvent* event, UpObject* input) {\n if (event->eventType == UpEventResumeTaskImmediate || event->eventType == UpEventResumeTask) {\n if (input) {\n UpTaskPushArgument(event->task, input);\n }\n if (event->threadId == self->threadId) {\n return UpTaskResume(event->task, self);\n } else {\n UpSetError(\"Threads NYI.\");\n return UpFailure;\n }\n } else if (event->eventType == UpEventStartTaskImmediate || event->eventType == UpEventStartTask) {\n UpTask* task = UpTaskCreateWithFunction(UpGetHeap(), event->function);\n if (input) {\n UpTaskPushArgument(task, input);\n }\n if (event->threadId == self->threadId) {\n return UpTaskResume(task, self);\n } else {\n UpSetError(\"Threads NYI.\");\n return UpFailure;\n }\n } else {\n UpSetError(\"Unknown event.\");\n return UpFailure;\n }\n}\n\n// ************************************************************************************************\n\nUpScheduler* UpSchedulerCreate(UpArena* heap) {\n UpScheduler* self = UpArenaNew(heap, UpScheduler);\n return self;\n}\n\n// ************************************************************************************************\n\nvoid UpSchedulerResumeTask(UpScheduler* self, UpTask* task) {\n if (task->threadId == self->threadId) {\n UpTaskResume(task, self);\n } else {\n UpSchedulerScheduleTask(self, task);\n }\n}\n\nvoid UpSchedulerScheduleTask(UpScheduler* self, UpTask* task) {\n if (task->threadId == self->threadId) {\n UpEvent* event = UpCreateResumeEvent(UpGetHeap(), task, NULL, true);\n UpSchedulerScheduleEvent(self, event, NULL);\n } else {\n // UpSetError(\"Threads NYI.\");\n // return UpFailure;\n }\n}\n\nvoid UpSchedulerScheduleEvent(UpScheduler* self, UpEvent* event, UpObject* input) {\n UpScheduleItem* item = UpArenaNew(UpGetHeap(), UpScheduleItem);\n item->event = event;\n item->input = input;\n\n if (!self->nextEvent) {\n self->nextEvent = self->lastEvent = item;\n } else {\n self->lastEvent->next = item;\n self->lastEvent = item;\n }\n}\n\nUpStatus UpSchedulerDoEvent(UpScheduler* self, UpEvent* event, UpObject* input) {\n if (event->eventType == UpEventResumeTaskImmediate) {\n if (input) {\n UpTaskPushArgument(event->task, input);\n }\n if (event->threadId == self->threadId) {\n return UpTaskResume(event->task, self);\n } else {\n UpSetError(\"Threads NYI.\");\n return UpFailure;\n }\n } else if (event->eventType == UpEventResumeTask) {\n if (event->threadId == self->threadId) {\n UpSchedulerScheduleEvent(self, event, input);\n return UpSuccess;\n } else {\n UpSetError(\"Threads NYI.\");\n return UpFailure;\n }\n } else if (event->eventType == UpEventStartTaskImmediate) {\n UpTask* task = UpTaskCreateWithFunction(UpGetHeap(), event->function);\n if (input) {\n UpTaskPushArgument(task, input);\n }\n if (event->threadId == self->threadId) {\n return UpTaskResume(task, self);\n } else {\n UpSetError(\"Threads NYI.\");\n return UpFailure;\n }\n } else if (event->eventType == UpEventStartTask) {\n if (event->threadId == self->threadId) {\n UpSchedulerScheduleEvent(self, event, input);\n return UpSuccess;\n } else {\n UpSetError(\"Threads NYI.\");\n return UpFailure;\n }\n } else {\n return UpFailure;\n }\n}\n\nUpStatus UpSchedulerRun(UpScheduler* self) {\n UpScheduleItem* item = self->nextEvent;\n while (item) {\n self->nextEvent = item->next;\n if (item == self->lastEvent) {\n self->lastEvent = NULL;\n }\n UpStatus ok = UpSchedulerDo(self, item->event, item->input);\n if (!ok) {\n // XXXjoe Prevent uncaught exceptions in one task from crashing the others\n return UpFailure;\n }\n item = self->nextEvent;\n }\n return UpSuccess;\n}\n" }, { "alpha_fraction": 0.6119951009750366, "alphanum_fraction": 0.6119951009750366, "avg_line_length": 25.770492553710938, "blob_id": "1810018c90ddb0b8a38d22658bae891fc0442d37", "content_id": "21802785828928cab93bddbb8f30c27456c48d0f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1634, "license_type": "permissive", "max_line_length": 99, "num_lines": 61, "path": "/src/vm/UpClass.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpList.h\"\n#include \"Up/UpArena.h\"\n#include \"UpIntTable.h\"\n\n// ************************************************************************************************\n\nUpClass* UpClassCreate(UpClassDef* def, UpVariables* closure,\n UpClass* baseClass, size_t size) {\n UpClass* self = (UpClass*)UpObjectCreateWithSize(sizeof(UpClass));\n\n if (!baseClass) {\n self->baseClass = UpGetBuiltinClasses()->objectClass;\n } else {\n self->baseClass = baseClass;\n }\n\n self->__base.cls = self;\n self->def = def;\n self->closure = closure;\n self->size = size ? size : self->baseClass->size;\n return self;\n}\n\nUpObject* UpClassInstantiate(UpClass* self) {\n return UpObjectCreateWithClass(self);\n}\n\nbool UpClassIsInstance(UpClass* self, UpObject* object) {\n for (UpClass* cls = object->cls; cls; cls = cls->baseClass) {\n if (cls == self) {\n return true;\n }\n }\n return false;\n}\n\nchar* UpClassGetName(UpClass* self) {\n return self->def->name;\n}\n\nUpClass* UpClassGetBaseClass(UpClass* self) {\n return self->baseClass;\n}\n\nvoid _IterateGetters(UpStrTable* self, UpSymbol key, void* value, UpList* list) {\n UpString* name = UpStringCreate(UpGetSymbolName(key));\n UpListAppend(list, (UpObject*)name);\n}\n\nUpList* UpClassGetGetters(UpClass* self) {\n UpList* list = UpListCreate();\n if (self->def->getters) {\n UpIntTableIterate(self->def->getters, (UpIntTableIterator)_IterateGetters, list);\n }\n return list;\n}\n" }, { "alpha_fraction": 0.6477135419845581, "alphanum_fraction": 0.6489232778549194, "avg_line_length": 25.487178802490234, "blob_id": "681958468aec6710458de6287ed63d73761f7df7", "content_id": "a45ae37ede023e090efe3db23d08443d5aeeebb4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4133, "license_type": "permissive", "max_line_length": 99, "num_lines": 156, "path": "/src/vm/UpChannel.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpChannel.h\"\n#include \"Up/UpObject.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpFunction.h\"\n#include \"UpArray.h\"\n#include \"UpTask.h\"\n#include \"UpScheduler.h\"\n#include \"UpEvents.h\"\n\n// ************************************************************************************************\n\nUpObject* UpChannelCreate() {\n UpClass* cls = UpGetBuiltinClasses()->channelClass;\n UpChannel* self = (UpChannel*)(cls\n ? UpClassInstantiate(cls)\n : UpObjectCreateWithClass(UP_BUILTIN(channel)));\n self->isClosed = false;\n return (UpObject*)self;\n}\n\nbool UpChannelIsClosed(UpChannel* self) {\n return self->isClosed;\n}\n\nbool UpChannelClose(UpChannel* self) {\n if (!self->isClosed) {\n self->isClosed = true;\n return true;\n } else {\n return false;\n }\n}\n\nbool UpChannelHasGenerator(UpChannel* self) {\n return !!self->generator;\n}\n\nUpTask* UpChannelGetGenerator(UpChannel* self) {\n return self->generator;\n}\n\nchar* UpChannelGetGeneratorName(UpChannel* self) {\n if (self->generator) {\n UpCallFrame* call = self->generator->callFrame;\n if (call) {\n return call->functionDef->name;\n } else {\n return NULL;\n }\n } else {\n return NULL;\n }\n}\n\nbool UpChannelHasMessage(UpChannel* self) {\n return self->messages && UpArrayCount(self->messages) > 0;\n}\n\nvoid UpChannelAddMessage(UpChannel* self, UpObject* message) {\n if (!self->messages) {\n self->messages = UpArrayCreate(UpGetHeap(), sizeof(UpObject*));\n }\n UpArrayAppend(self->messages, &message);\n}\n\nUpObject* UpChannelPopMessage(UpChannel* self) {\n UpObject* message = NULL;\n UpArrayShift(self->messages, &message);\n return message;\n}\n\nbool UpChannelHasSubscribers(UpChannel* self) {\n return self->subscribers && UpArrayCount(self->subscribers) > 0;\n}\n\nvoid UpChannelAddSubscriber(UpChannel* self, UpEvent* subscriber) {\n if (!self->subscribers) {\n self->subscribers = UpArrayCreate(UpGetHeap(), sizeof(UpEvent*));\n }\n UpArrayAppend(self->subscribers, &subscriber);\n}\n\nUpEvent** UpChannelCloneSubscribers(UpChannel* self, unsigned int* outCount) {\n if (self->subscribers) {\n UpEvent** buffer;\n UpArrayCopyBuffer(self->subscribers, (void*)&buffer, outCount);\n return buffer;\n } else {\n return NULL;\n }\n}\n\nbool UpChannelHasReader(UpChannel* self) {\n return self->readers && UpArrayCount(self->readers) > 0;\n}\n\nvoid UpChannelAddReader(UpChannel* self, UpEvent* reader) {\n if (!self->readers) {\n self->readers = UpArrayCreate(UpGetHeap(), sizeof(UpObject*));\n }\n UpArrayAppend(self->readers, &reader);\n}\n\nUpEvent** UpChannelPopAllReaders(UpChannel* self, unsigned int* outCount) {\n if (self->readers) {\n UpEvent** buffer;\n UpArrayCopyBuffer(self->readers, (void*)&buffer, outCount);\n UpArrayRemoveAll(self->readers);\n return buffer;\n } else {\n return NULL;\n }\n}\n\nUpEvent* UpChannelPopReader(UpChannel* self) {\n UpEvent* reader = NULL;\n UpArrayShift(self->readers, &reader);\n return reader;\n}\n\nbool UpChannelHasWriter(UpChannel* self) {\n return self->writers && UpArrayCount(self->writers) > 0;\n}\n\nvoid UpChannelAddWriter(UpChannel* self, UpEvent* writer) {\n if (!self->writers) {\n self->writers = UpArrayCreate(UpGetHeap(), sizeof(UpObject*));\n }\n UpArrayAppend(self->writers, &writer);\n}\n\nUpEvent* UpChannelPopWriter(UpChannel* self) {\n UpEvent* writer = NULL;\n UpArrayShift(self->writers, &writer);\n return writer;\n}\n\nbool UpChannelHasRespondee(UpChannel* self) {\n return self->respondees && UpArrayCount(self->respondees) > 0;\n}\n\nvoid UpChannelAddRespondee(UpChannel* self, UpEvent* respondee) {\n if (!self->respondees) {\n self->respondees = UpArrayCreate(UpGetHeap(), sizeof(UpObject*));\n }\n UpArrayAppend(self->respondees, &respondee);\n}\n\nUpEvent* UpChannelPopRespondee(UpChannel* self) {\n UpEvent* respondee = NULL;\n UpArrayShift(self->respondees, &respondee);\n return respondee;\n}\n" }, { "alpha_fraction": 0.33457016944885254, "alphanum_fraction": 0.46963581442832947, "avg_line_length": 41.350738525390625, "blob_id": "7333b866683e48b89f2daeee3bd669072f26c3cb", "content_id": "5a517012660cc27b405be95151b3f5d58658b0f6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 171946, "license_type": "permissive", "max_line_length": 290, "num_lines": 4060, "path": "/src/vm/Up.tab.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "/* A Bison parser, made by GNU Bison 2.3. */\n\n/* Skeleton implementation for Bison's Yacc-like parsers in C\n\n Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006\n Free Software Foundation, Inc.\n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2, or (at your option)\n any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program; if not, write to the Free Software\n Foundation, Inc., 51 Franklin Street, Fifth Floor,\n Boston, MA 02110-1301, USA. */\n\n/* As a special exception, you may create a larger work that contains\n part or all of the Bison parser skeleton and distribute that work\n under terms of your choice, so long as that work isn't itself a\n parser generator using the skeleton or a modified version thereof\n as a parser skeleton. Alternatively, if you modify or redistribute\n the parser skeleton itself, you may (at your option) remove this\n special exception, which will cause the skeleton and the resulting\n Bison output files to be licensed under the GNU General Public\n License without this special exception.\n\n This special exception was added by the Free Software Foundation in\n version 2.2 of Bison. */\n\n/* C LALR(1) parser skeleton written by Richard Stallman, by\n simplifying the original so-called \"semantic\" parser. */\n\n/* All symbols defined below should begin with yy or YY, to avoid\n infringing on user name space. This should be done even for local\n variables, as they might otherwise be expanded by user macros.\n There are some unavoidable exceptions within include files to\n define necessary library symbols; they are noted \"INFRINGES ON\n USER NAME SPACE\" below. */\n\n/* Identify Bison output. */\n#define YYBISON 1\n\n/* Bison version. */\n#define YYBISON_VERSION \"2.3\"\n\n/* Skeleton name. */\n#define YYSKELETON_NAME \"yacc.c\"\n\n/* Pure parsers. */\n#define YYPURE 1\n\n/* Using locations. */\n#define YYLSP_NEEDED 1\n\n/* Substitute the variable and function names. */\n#define yyparse Upparse\n#define yylex Uplex\n#define yyerror Uperror\n#define yylval Uplval\n#define yychar Upchar\n#define yydebug Updebug\n#define yynerrs Upnerrs\n#define yylloc Uplloc\n\n/* Tokens. */\n#ifndef YYTOKENTYPE\n# define YYTOKENTYPE\n /* Put the tokens into the symbol table, so that GDB and other debuggers\n know about them. */\n enum yytokentype {\n IDENTIFIER = 258,\n UIDENTIFIER = 259,\n BIDENTIFIER = 260,\n STRING = 261,\n FSTRING = 262,\n STRING_LINE = 263,\n FSTRING_LINE = 264,\n INTEGER = 265,\n LONG = 266,\n FLOAT = 267,\n CPRIMITIVE = 268,\n CFUNCTION = 269,\n CLOSE_BLOCK = 270,\n OPEN_BLOCK = 271,\n NEWLINE = 272,\n INLINE_EXPR = 273,\n PRIVATE = 274,\n PUBLIC = 275,\n THROW = 276,\n FINALLY = 277,\n CATCH = 278,\n TRY = 279,\n DO = 280,\n CONTINUE = 281,\n BREAK = 282,\n WHILE = 283,\n ON = 284,\n FOR = 285,\n ORELSE = 286,\n ELSE = 287,\n IF = 288,\n UNDERSCORE = 289,\n AT = 290,\n CARET = 291,\n POUND = 292,\n RARROW2MUL = 293,\n LARROW3 = 294,\n RARROW3 = 295,\n RARROW2 = 296,\n LARROW2 = 297,\n RARROW = 298,\n LARROW = 299,\n FATARROW2 = 300,\n FATARROW = 301,\n SEMICOLON = 302,\n COLON2 = 303,\n COLON = 304,\n CONCAT_EQ = 305,\n STAR2_EQ = 306,\n SLASH2_EQ = 307,\n SLASH_EQ = 308,\n STAR_EQ = 309,\n SUBTRACT_EQ = 310,\n ADD_EQ = 311,\n EQ = 312,\n NOTIN = 313,\n ISIN = 314,\n IN = 315,\n HASNOT = 316,\n HAS = 317,\n ISNOT = 318,\n IS = 319,\n AS = 320,\n CONCATSTR = 321,\n CONCAT = 322,\n GTE = 323,\n GT = 324,\n LTE = 325,\n LT = 326,\n NEQ = 327,\n EQ2 = 328,\n STAR2 = 329,\n SLASH2 = 330,\n SLASH = 331,\n STAR = 332,\n SUBTRACT = 333,\n ADD = 334,\n PIPE2 = 335,\n PIPE = 336,\n AMPERSAND = 337,\n TILDE = 338,\n DASHDASH = 339,\n COMMA = 340,\n QUESTION = 341,\n EXCLAMATION = 342,\n DOT3 = 343,\n DOT2 = 344,\n DOT = 345,\n OPERATORQ = 346,\n OPERATOR = 347,\n CLOSE_OPERATOR = 348,\n OPEN_OPERATORQ = 349,\n OPEN_OPERATOR = 350,\n WHERE = 351,\n BY = 352,\n THROUGH = 353,\n TO = 354,\n BULLET = 355,\n BACKSLASH = 356,\n RCB = 357,\n LCB = 358,\n RB = 359,\n LB = 360,\n RP = 361,\n LP = 362,\n STRUCT = 363,\n CONST = 364,\n OPEN_C = 365\n };\n#endif\n/* Tokens. */\n#define IDENTIFIER 258\n#define UIDENTIFIER 259\n#define BIDENTIFIER 260\n#define STRING 261\n#define FSTRING 262\n#define STRING_LINE 263\n#define FSTRING_LINE 264\n#define INTEGER 265\n#define LONG 266\n#define FLOAT 267\n#define CPRIMITIVE 268\n#define CFUNCTION 269\n#define CLOSE_BLOCK 270\n#define OPEN_BLOCK 271\n#define NEWLINE 272\n#define INLINE_EXPR 273\n#define PRIVATE 274\n#define PUBLIC 275\n#define THROW 276\n#define FINALLY 277\n#define CATCH 278\n#define TRY 279\n#define DO 280\n#define CONTINUE 281\n#define BREAK 282\n#define WHILE 283\n#define ON 284\n#define FOR 285\n#define ORELSE 286\n#define ELSE 287\n#define IF 288\n#define UNDERSCORE 289\n#define AT 290\n#define CARET 291\n#define POUND 292\n#define RARROW2MUL 293\n#define LARROW3 294\n#define RARROW3 295\n#define RARROW2 296\n#define LARROW2 297\n#define RARROW 298\n#define LARROW 299\n#define FATARROW2 300\n#define FATARROW 301\n#define SEMICOLON 302\n#define COLON2 303\n#define COLON 304\n#define CONCAT_EQ 305\n#define STAR2_EQ 306\n#define SLASH2_EQ 307\n#define SLASH_EQ 308\n#define STAR_EQ 309\n#define SUBTRACT_EQ 310\n#define ADD_EQ 311\n#define EQ 312\n#define NOTIN 313\n#define ISIN 314\n#define IN 315\n#define HASNOT 316\n#define HAS 317\n#define ISNOT 318\n#define IS 319\n#define AS 320\n#define CONCATSTR 321\n#define CONCAT 322\n#define GTE 323\n#define GT 324\n#define LTE 325\n#define LT 326\n#define NEQ 327\n#define EQ2 328\n#define STAR2 329\n#define SLASH2 330\n#define SLASH 331\n#define STAR 332\n#define SUBTRACT 333\n#define ADD 334\n#define PIPE2 335\n#define PIPE 336\n#define AMPERSAND 337\n#define TILDE 338\n#define DASHDASH 339\n#define COMMA 340\n#define QUESTION 341\n#define EXCLAMATION 342\n#define DOT3 343\n#define DOT2 344\n#define DOT 345\n#define OPERATORQ 346\n#define OPERATOR 347\n#define CLOSE_OPERATOR 348\n#define OPEN_OPERATORQ 349\n#define OPEN_OPERATOR 350\n#define WHERE 351\n#define BY 352\n#define THROUGH 353\n#define TO 354\n#define BULLET 355\n#define BACKSLASH 356\n#define RCB 357\n#define LCB 358\n#define RB 359\n#define LB 360\n#define RP 361\n#define LP 362\n#define STRUCT 363\n#define CONST 364\n#define OPEN_C 365\n\n\n\n\n/* Copy the first part of user declarations. */\n#line 1 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n\n#include \"UpParsing.h\"\n#include \"UpSyntax.h\"\n\n// ************************************************************************************************\n\n#define YYDEBUG 1\n#define __attribute__(NOOP) // workaround for bug in bison 1.875\n\n// ************************************************************************************************\n\n\n\n/* Enabling traces. */\n#ifndef YYDEBUG\n# define YYDEBUG 0\n#endif\n\n/* Enabling verbose error messages. */\n#ifdef YYERROR_VERBOSE\n# undef YYERROR_VERBOSE\n# define YYERROR_VERBOSE 1\n#else\n# define YYERROR_VERBOSE 1\n#endif\n\n/* Enabling the token table. */\n#ifndef YYTOKEN_TABLE\n# define YYTOKEN_TABLE 0\n#endif\n\n#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED\ntypedef union YYSTYPE\n#line 21 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n{\n int indentLevel;\n long long tagValue;\n char* stringValue;\n UpSyntax* objectValue;\n int operatorValue;\n}\n/* Line 193 of yacc.c. */\n#line 345 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.tab.c\"\n\tYYSTYPE;\n# define yystype YYSTYPE /* obsolescent; will be withdrawn */\n# define YYSTYPE_IS_DECLARED 1\n# define YYSTYPE_IS_TRIVIAL 1\n#endif\n\n#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED\ntypedef struct YYLTYPE\n{\n int first_line;\n int first_column;\n int last_line;\n int last_column;\n} YYLTYPE;\n# define yyltype YYLTYPE /* obsolescent; will be withdrawn */\n# define YYLTYPE_IS_DECLARED 1\n# define YYLTYPE_IS_TRIVIAL 1\n#endif\n\n\n/* Copy the second part of user declarations. */\n\n\n/* Line 216 of yacc.c. */\n#line 370 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.tab.c\"\n\n#ifdef short\n# undef short\n#endif\n\n#ifdef YYTYPE_UINT8\ntypedef YYTYPE_UINT8 yytype_uint8;\n#else\ntypedef unsigned char yytype_uint8;\n#endif\n\n#ifdef YYTYPE_INT8\ntypedef YYTYPE_INT8 yytype_int8;\n#elif (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\ntypedef signed char yytype_int8;\n#else\ntypedef short int yytype_int8;\n#endif\n\n#ifdef YYTYPE_UINT16\ntypedef YYTYPE_UINT16 yytype_uint16;\n#else\ntypedef unsigned short int yytype_uint16;\n#endif\n\n#ifdef YYTYPE_INT16\ntypedef YYTYPE_INT16 yytype_int16;\n#else\ntypedef short int yytype_int16;\n#endif\n\n#ifndef YYSIZE_T\n# ifdef __SIZE_TYPE__\n# define YYSIZE_T __SIZE_TYPE__\n# elif defined size_t\n# define YYSIZE_T size_t\n# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\n# include <stddef.h> /* INFRINGES ON USER NAME SPACE */\n# define YYSIZE_T size_t\n# else\n# define YYSIZE_T unsigned int\n# endif\n#endif\n\n#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)\n\n#ifndef YY_\n# if defined YYENABLE_NLS && YYENABLE_NLS\n# if ENABLE_NLS\n# include <libintl.h> /* INFRINGES ON USER NAME SPACE */\n# define YY_(msgid) dgettext (\"bison-runtime\", msgid)\n# endif\n# endif\n# ifndef YY_\n# define YY_(msgid) msgid\n# endif\n#endif\n\n/* Suppress unused-variable warnings by \"using\" E. */\n#if ! defined lint || defined __GNUC__\n# define YYUSE(e) ((void) (e))\n#else\n# define YYUSE(e) /* empty */\n#endif\n\n/* Identity function, used to suppress warnings about constant conditions. */\n#ifndef lint\n# define YYID(n) (n)\n#else\n#if (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\nstatic int\nYYID (int i)\n#else\nstatic int\nYYID (i)\n int i;\n#endif\n{\n return i;\n}\n#endif\n\n#if ! defined yyoverflow || YYERROR_VERBOSE\n\n/* The parser invokes alloca or malloc; define the necessary symbols. */\n\n# ifdef YYSTACK_USE_ALLOCA\n# if YYSTACK_USE_ALLOCA\n# ifdef __GNUC__\n# define YYSTACK_ALLOC __builtin_alloca\n# elif defined __BUILTIN_VA_ARG_INCR\n# include <alloca.h> /* INFRINGES ON USER NAME SPACE */\n# elif defined _AIX\n# define YYSTACK_ALLOC __alloca\n# elif defined _MSC_VER\n# include <malloc.h> /* INFRINGES ON USER NAME SPACE */\n# define alloca _alloca\n# else\n# define YYSTACK_ALLOC alloca\n# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\n# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */\n# ifndef _STDLIB_H\n# define _STDLIB_H 1\n# endif\n# endif\n# endif\n# endif\n# endif\n\n# ifdef YYSTACK_ALLOC\n /* Pacify GCC's `empty if-body' warning. */\n# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))\n# ifndef YYSTACK_ALLOC_MAXIMUM\n /* The OS might guarantee only one guard page at the bottom of the stack,\n and a page size can be as small as 4096 bytes. So we cannot safely\n invoke alloca (N) if N exceeds 4096. Use a slightly smaller number\n to allow for a few compiler-allocated temporary stack slots. */\n# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */\n# endif\n# else\n# define YYSTACK_ALLOC YYMALLOC\n# define YYSTACK_FREE YYFREE\n# ifndef YYSTACK_ALLOC_MAXIMUM\n# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM\n# endif\n# if (defined __cplusplus && ! defined _STDLIB_H \\\n && ! ((defined YYMALLOC || defined malloc) \\\n\t && (defined YYFREE || defined free)))\n# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */\n# ifndef _STDLIB_H\n# define _STDLIB_H 1\n# endif\n# endif\n# ifndef YYMALLOC\n# define YYMALLOC malloc\n# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\nvoid *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */\n# endif\n# endif\n# ifndef YYFREE\n# define YYFREE free\n# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\nvoid free (void *); /* INFRINGES ON USER NAME SPACE */\n# endif\n# endif\n# endif\n#endif /* ! defined yyoverflow || YYERROR_VERBOSE */\n\n\n#if (! defined yyoverflow \\\n && (! defined __cplusplus \\\n\t || (defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL \\\n\t && defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))\n\n/* A type that is properly aligned for any stack member. */\nunion yyalloc\n{\n yytype_int16 yyss;\n YYSTYPE yyvs;\n YYLTYPE yyls;\n};\n\n/* The size of the maximum gap between one aligned stack and the next. */\n# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)\n\n/* The size of an array large to enough to hold all stacks, each with\n N elements. */\n# define YYSTACK_BYTES(N) \\\n ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE) + sizeof (YYLTYPE)) \\\n + 2 * YYSTACK_GAP_MAXIMUM)\n\n/* Copy COUNT objects from FROM to TO. The source and destination do\n not overlap. */\n# ifndef YYCOPY\n# if defined __GNUC__ && 1 < __GNUC__\n# define YYCOPY(To, From, Count) \\\n __builtin_memcpy (To, From, (Count) * sizeof (*(From)))\n# else\n# define YYCOPY(To, From, Count)\t\t\\\n do\t\t\t\t\t\\\n\t{\t\t\t\t\t\\\n\t YYSIZE_T yyi;\t\t\t\t\\\n\t for (yyi = 0; yyi < (Count); yyi++)\t\\\n\t (To)[yyi] = (From)[yyi];\t\t\\\n\t}\t\t\t\t\t\\\n while (YYID (0))\n# endif\n# endif\n\n/* Relocate STACK from its old location to the new one. The\n local variables YYSIZE and YYSTACKSIZE give the old and new number of\n elements in the stack, and YYPTR gives the new location of the\n stack. Advance YYPTR to a properly aligned location for the next\n stack. */\n# define YYSTACK_RELOCATE(Stack)\t\t\t\t\t\\\n do\t\t\t\t\t\t\t\t\t\\\n {\t\t\t\t\t\t\t\t\t\\\n\tYYSIZE_T yynewbytes;\t\t\t\t\t\t\\\n\tYYCOPY (&yyptr->Stack, Stack, yysize);\t\t\t\t\\\n\tStack = &yyptr->Stack;\t\t\t\t\t\t\\\n\tyynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \\\n\tyyptr += yynewbytes / sizeof (*yyptr);\t\t\t\t\\\n }\t\t\t\t\t\t\t\t\t\\\n while (YYID (0))\n\n#endif\n\n/* YYFINAL -- State number of the termination state. */\n#define YYFINAL 141\n/* YYLAST -- Last index in YYTABLE. */\n#define YYLAST 2554\n\n/* YYNTOKENS -- Number of terminals. */\n#define YYNTOKENS 111\n/* YYNNTS -- Number of nonterminals. */\n#define YYNNTS 78\n/* YYNRULES -- Number of rules. */\n#define YYNRULES 304\n/* YYNRULES -- Number of states. */\n#define YYNSTATES 548\n\n/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */\n#define YYUNDEFTOK 2\n#define YYMAXUTOK 365\n\n#define YYTRANSLATE(YYX)\t\t\t\t\t\t\\\n ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)\n\n/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */\nstatic const yytype_uint8 yytranslate[] =\n{\n 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,\n 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,\n 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,\n 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,\n 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,\n 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,\n 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,\n 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,\n 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,\n 105, 106, 107, 108, 109, 110\n};\n\n#if YYDEBUG\n/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in\n YYRHS. */\nstatic const yytype_uint16 yyprhs[] =\n{\n 0, 0, 3, 5, 7, 9, 12, 14, 16, 18,\n 20, 22, 24, 26, 28, 31, 34, 38, 40, 44,\n 47, 50, 53, 56, 59, 62, 67, 72, 78, 83,\n 87, 91, 96, 98, 101, 105, 108, 114, 118, 125,\n 130, 138, 140, 142, 144, 146, 148, 150, 154, 157,\n 159, 163, 168, 170, 173, 176, 180, 184, 189, 192,\n 196, 200, 205, 208, 212, 217, 221, 226, 230, 236,\n 241, 245, 249, 252, 257, 261, 265, 268, 271, 276,\n 280, 284, 287, 292, 298, 302, 307, 310, 318, 325,\n 332, 337, 347, 356, 365, 372, 378, 383, 391, 398,\n 401, 405, 409, 412, 416, 419, 422, 426, 429, 434,\n 440, 444, 449, 452, 459, 465, 469, 478, 486, 492,\n 497, 504, 506, 510, 514, 517, 521, 524, 528, 531,\n 538, 547, 552, 559, 561, 565, 569, 572, 576, 579,\n 581, 585, 588, 592, 595, 602, 611, 616, 623, 625,\n 629, 633, 640, 643, 650, 652, 656, 660, 664, 666,\n 670, 672, 676, 678, 682, 684, 688, 692, 694, 698,\n 701, 703, 705, 707, 712, 714, 718, 720, 724, 728,\n 734, 738, 744, 746, 750, 752, 756, 760, 762, 766,\n 770, 774, 778, 782, 786, 790, 792, 796, 800, 802,\n 806, 810, 814, 818, 822, 824, 827, 830, 833, 836,\n 840, 842, 845, 848, 850, 854, 856, 860, 864, 868,\n 871, 876, 880, 886, 890, 896, 904, 909, 916, 918,\n 922, 924, 926, 928, 930, 932, 934, 938, 941, 945,\n 948, 952, 955, 958, 962, 964, 966, 968, 970, 972,\n 974, 976, 978, 980, 982, 984, 986, 988, 990, 992,\n 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1011, 1013,\n 1015, 1017, 1019, 1023, 1026, 1030, 1032, 1035, 1039, 1041,\n 1044, 1046, 1050, 1053, 1055, 1059, 1062, 1066, 1069, 1072,\n 1074, 1077, 1079, 1085, 1090, 1092, 1095, 1098, 1102, 1104,\n 1107, 1110, 1112, 1116, 1118\n};\n\n/* YYRHS -- A `-1'-separated list of the rules' RHS. */\nstatic const yytype_int16 yyrhs[] =\n{\n 112, 0, -1, 113, -1, 182, -1, 115, -1, 113,\n 115, -1, 17, -1, 128, -1, 125, -1, 116, -1,\n 119, -1, 120, -1, 121, -1, 114, -1, 69, 118,\n -1, 76, 168, -1, 117, 76, 168, -1, 117, -1,\n 118, 85, 117, -1, 57, 128, -1, 57, 114, -1,\n 26, 114, -1, 27, 114, -1, 21, 128, -1, 21,\n 114, -1, 28, 127, 176, 114, -1, 24, 176, 114,\n 123, -1, 24, 176, 114, 123, 124, -1, 24, 176,\n 114, 124, -1, 23, 176, 114, -1, 23, 161, 114,\n -1, 23, 161, 176, 114, -1, 122, -1, 123, 122,\n -1, 22, 176, 114, -1, 126, 134, -1, 126, 134,\n 96, 176, 114, -1, 126, 135, 114, -1, 126, 135,\n 114, 96, 176, 114, -1, 126, 135, 176, 114, -1,\n 126, 135, 176, 114, 96, 176, 114, -1, 20, -1,\n 19, -1, 74, -1, 136, -1, 130, -1, 127, -1,\n 129, 85, 127, -1, 129, 85, -1, 131, -1, 131,\n 96, 133, -1, 131, 96, 176, 114, -1, 133, -1,\n 133, 132, -1, 5, 134, -1, 5, 134, 132, -1,\n 5, 135, 114, -1, 5, 135, 114, 132, -1, 82,\n 134, -1, 82, 134, 132, -1, 82, 135, 114, -1,\n 82, 135, 114, 132, -1, 147, 114, -1, 147, 176,\n 114, -1, 147, 100, 135, 114, -1, 147, 100, 134,\n -1, 147, 5, 135, 114, -1, 147, 5, 134, -1,\n 147, 170, 135, 176, 114, -1, 147, 170, 135, 114,\n -1, 147, 170, 134, -1, 84, 135, 114, -1, 84,\n 134, -1, 147, 172, 135, 114, -1, 147, 172, 134,\n -1, 171, 135, 114, -1, 171, 114, -1, 171, 134,\n -1, 147, 173, 135, 114, -1, 147, 173, 134, -1,\n 173, 135, 114, -1, 173, 134, -1, 147, 64, 142,\n 114, -1, 147, 64, 16, 139, 15, -1, 33, 142,\n 114, -1, 33, 16, 139, 15, -1, 33, 137, -1,\n 77, 147, 175, 147, 43, 135, 114, -1, 77, 147,\n 175, 147, 43, 134, -1, 77, 147, 175, 147, 176,\n 114, -1, 77, 147, 176, 114, -1, 77, 147, 175,\n 147, 174, 147, 43, 135, 114, -1, 77, 147, 175,\n 147, 174, 147, 43, 134, -1, 77, 147, 175, 147,\n 174, 147, 176, 114, -1, 77, 147, 174, 147, 176,\n 114, -1, 77, 147, 43, 135, 114, -1, 77, 147,\n 43, 134, -1, 77, 147, 174, 147, 43, 135, 114,\n -1, 77, 147, 174, 147, 43, 134, -1, 176, 114,\n -1, 147, 170, 134, -1, 147, 100, 134, -1, 84,\n 134, -1, 147, 172, 134, -1, 171, 134, -1, 171,\n 114, -1, 147, 173, 134, -1, 173, 134, -1, 147,\n 64, 142, 114, -1, 147, 64, 16, 139, 15, -1,\n 33, 142, 114, -1, 33, 16, 139, 15, -1, 33,\n 137, -1, 77, 147, 175, 147, 43, 134, -1, 77,\n 147, 175, 147, 176, -1, 77, 147, 176, -1, 77,\n 147, 175, 147, 174, 147, 43, 134, -1, 77, 147,\n 175, 147, 174, 147, 176, -1, 77, 147, 174, 147,\n 176, -1, 77, 147, 43, 134, -1, 77, 147, 174,\n 147, 43, 134, -1, 147, -1, 147, 170, 135, -1,\n 147, 100, 135, -1, 84, 135, -1, 147, 172, 135,\n -1, 171, 135, -1, 147, 173, 135, -1, 173, 135,\n -1, 77, 147, 175, 147, 43, 135, -1, 77, 147,\n 175, 147, 174, 147, 43, 135, -1, 77, 147, 43,\n 135, -1, 77, 147, 174, 147, 43, 135, -1, 148,\n -1, 148, 170, 127, -1, 148, 100, 127, -1, 84,\n 127, -1, 148, 172, 127, -1, 171, 127, -1, 171,\n -1, 148, 173, 127, -1, 173, 127, -1, 148, 64,\n 144, -1, 33, 144, -1, 77, 148, 175, 148, 43,\n 127, -1, 77, 148, 175, 148, 174, 148, 43, 127,\n -1, 77, 148, 43, 127, -1, 77, 148, 174, 148,\n 43, 127, -1, 138, -1, 138, 32, 176, -1, 147,\n 176, 114, -1, 138, 32, 33, 147, 176, 114, -1,\n 140, 114, -1, 140, 114, 32, 43, 147, 114, -1,\n 141, -1, 147, 43, 176, -1, 140, 114, 141, -1,\n 147, 43, 135, -1, 143, -1, 143, 80, 147, -1,\n 141, -1, 143, 80, 141, -1, 145, -1, 145, 80,\n 148, -1, 146, -1, 145, 80, 146, -1, 148, 43,\n 148, -1, 148, -1, 148, 85, 147, -1, 148, 85,\n -1, 149, -1, 150, -1, 151, -1, 151, 34, 163,\n 150, -1, 152, -1, 151, 66, 152, -1, 153, -1,\n 152, 81, 153, -1, 152, 99, 152, -1, 152, 99,\n 153, 97, 152, -1, 152, 98, 152, -1, 152, 98,\n 153, 97, 152, -1, 154, -1, 153, 82, 154, -1,\n 155, -1, 154, 73, 155, -1, 154, 72, 155, -1,\n 156, -1, 155, 71, 156, -1, 155, 69, 156, -1,\n 155, 70, 156, -1, 155, 68, 156, -1, 155, 63,\n 156, -1, 155, 59, 156, -1, 155, 58, 156, -1,\n 157, -1, 156, 79, 157, -1, 156, 78, 157, -1,\n 158, -1, 157, 77, 158, -1, 157, 76, 158, -1,\n 157, 75, 158, -1, 157, 74, 158, -1, 157, 67,\n 158, -1, 159, -1, 55, 158, -1, 78, 158, -1,\n 87, 158, -1, 60, 158, -1, 34, 163, 158, -1,\n 161, -1, 47, 160, -1, 47, 176, -1, 161, -1,\n 160, 47, 161, -1, 162, -1, 161, 49, 162, -1,\n 161, 48, 162, -1, 161, 65, 162, -1, 161, 177,\n -1, 161, 177, 35, 168, -1, 161, 177, 25, -1,\n 161, 177, 35, 168, 25, -1, 161, 90, 3, -1,\n 161, 90, 105, 127, 104, -1, 161, 90, 105, 127,\n 32, 127, 104, -1, 161, 105, 127, 104, -1, 161,\n 105, 127, 32, 127, 104, -1, 163, -1, 162, 101,\n 163, -1, 164, -1, 165, -1, 166, -1, 167, -1,\n 168, -1, 169, -1, 107, 129, 106, -1, 107, 106,\n -1, 105, 129, 104, -1, 105, 104, -1, 103, 180,\n 102, -1, 103, 102, -1, 71, 69, -1, 71, 161,\n 69, -1, 3, -1, 4, -1, 10, -1, 11, -1,\n 12, -1, 6, -1, 34, -1, 14, -1, 57, -1,\n 42, -1, 56, -1, 55, -1, 54, -1, 53, -1,\n 52, -1, 51, -1, 50, -1, 42, -1, 41, -1,\n 38, -1, 41, -1, 38, -1, 46, -1, 46, 25,\n -1, 33, -1, 28, -1, 60, -1, 29, -1, 16,\n 113, 15, -1, 107, 106, -1, 107, 178, 106, -1,\n 179, -1, 179, 85, -1, 179, 85, 178, -1, 127,\n -1, 5, 127, -1, 5, -1, 5, 57, 127, -1,\n 88, 127, -1, 181, -1, 180, 85, 181, -1, 180,\n 85, -1, 149, 57, 149, -1, 110, 183, -1, 182,\n 183, -1, 184, -1, 184, 47, -1, 185, -1, 186,\n 3, 107, 187, 106, -1, 186, 3, 107, 106, -1,\n 3, -1, 108, 3, -1, 109, 3, -1, 109, 108,\n 3, -1, 13, -1, 109, 13, -1, 186, 77, -1,\n 188, -1, 187, 85, 188, -1, 186, -1, 186, 3,\n -1\n};\n\n/* YYRLINE[YYN] -- source line where rule number YYN was defined. */\nstatic const yytype_uint16 yyrline[] =\n{\n 0, 151, 151, 153, 158, 160, 165, 170, 171, 172,\n 173, 174, 175, 176, 180, 185, 187, 192, 194, 199,\n 201, 203, 205, 207, 209, 214, 219, 221, 223, 228,\n 230, 232, 237, 239, 244, 249, 251, 253, 255, 257,\n 259, 264, 266, 268, 273, 277, 281, 282, 284, 289,\n 290, 292, 297, 298, 303, 305, 307, 309, 312, 314,\n 316, 318, 323, 324, 326, 328, 330, 332, 335, 337,\n 339, 342, 344, 347, 349, 352, 354, 356, 359, 361,\n 363, 365, 368, 370, 373, 375, 377, 380, 382, 384,\n 386, 389, 391, 393, 395, 398, 400, 403, 405, 410,\n 411, 414, 417, 420, 422, 424, 427, 429, 432, 434,\n 437, 439, 441, 444, 446, 448, 451, 453, 455, 458,\n 460, 465, 466, 469, 472, 475, 477, 480, 482, 485,\n 488, 491, 493, 498, 499, 502, 505, 508, 511, 513,\n 516, 518, 521, 524, 527, 529, 532, 534, 539, 541,\n 546, 548, 553, 555, 560, 562, 564, 569, 574, 576,\n 581, 583, 588, 590, 595, 597, 602, 607, 608, 610,\n 615, 619, 622, 623, 628, 629, 634, 635, 637, 639,\n 641, 643, 648, 649, 654, 655, 657, 662, 663, 665,\n 667, 669, 671, 673, 675, 680, 681, 683, 688, 689,\n 691, 693, 695, 697, 702, 703, 705, 707, 709, 711,\n 716, 717, 719, 724, 725, 730, 731, 733, 735, 738,\n 740, 742, 744, 747, 750, 752, 755, 757, 763, 764,\n 769, 770, 771, 772, 773, 774, 778, 780, 785, 787,\n 792, 794, 799, 801, 806, 808, 813, 814, 815, 816,\n 817, 819, 824, 826, 828, 830, 832, 834, 836, 838,\n 840, 845, 847, 849, 854, 856, 861, 863, 868, 870,\n 875, 877, 882, 887, 889, 894, 896, 898, 903, 905,\n 907, 909, 911, 916, 917, 919, 924, 931, 933, 938,\n 939, 943, 947, 949, 954, 956, 958, 960, 962, 964,\n 966, 971, 973, 978, 980\n};\n#endif\n\n#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE\n/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.\n First, the terminals, then, starting at YYNTOKENS, nonterminals. */\nstatic const char *const yytname[] =\n{\n \"$end\", \"error\", \"$undefined\", \"IDENTIFIER\", \"UIDENTIFIER\",\n \"BIDENTIFIER\", \"STRING\", \"FSTRING\", \"STRING_LINE\", \"FSTRING_LINE\",\n \"INTEGER\", \"LONG\", \"FLOAT\", \"CPRIMITIVE\", \"CFUNCTION\", \"CLOSE_BLOCK\",\n \"OPEN_BLOCK\", \"NEWLINE\", \"INLINE_EXPR\", \"PRIVATE\", \"PUBLIC\", \"THROW\",\n \"FINALLY\", \"CATCH\", \"TRY\", \"DO\", \"CONTINUE\", \"BREAK\", \"WHILE\", \"ON\",\n \"FOR\", \"ORELSE\", \"ELSE\", \"IF\", \"UNDERSCORE\", \"AT\", \"CARET\", \"POUND\",\n \"RARROW2MUL\", \"LARROW3\", \"RARROW3\", \"RARROW2\", \"LARROW2\", \"RARROW\",\n \"LARROW\", \"FATARROW2\", \"FATARROW\", \"SEMICOLON\", \"COLON2\", \"COLON\",\n \"CONCAT_EQ\", \"STAR2_EQ\", \"SLASH2_EQ\", \"SLASH_EQ\", \"STAR_EQ\",\n \"SUBTRACT_EQ\", \"ADD_EQ\", \"EQ\", \"NOTIN\", \"ISIN\", \"IN\", \"HASNOT\", \"HAS\",\n \"ISNOT\", \"IS\", \"AS\", \"CONCATSTR\", \"CONCAT\", \"GTE\", \"GT\", \"LTE\", \"LT\",\n \"NEQ\", \"EQ2\", \"STAR2\", \"SLASH2\", \"SLASH\", \"STAR\", \"SUBTRACT\", \"ADD\",\n \"PIPE2\", \"PIPE\", \"AMPERSAND\", \"TILDE\", \"DASHDASH\", \"COMMA\", \"QUESTION\",\n \"EXCLAMATION\", \"DOT3\", \"DOT2\", \"DOT\", \"OPERATORQ\", \"OPERATOR\",\n \"CLOSE_OPERATOR\", \"OPEN_OPERATORQ\", \"OPEN_OPERATOR\", \"WHERE\", \"BY\",\n \"THROUGH\", \"TO\", \"BULLET\", \"BACKSLASH\", \"RCB\", \"LCB\", \"RB\", \"LB\", \"RP\",\n \"LP\", \"STRUCT\", \"CONST\", \"OPEN_C\", \"$accept\", \"root\", \"statementList\",\n \"lineEnding\", \"statement\", \"importDirective\", \"moduleName\",\n \"moduleNameList\", \"controlFlowStatement\", \"whileBlock\", \"tryBlock\",\n \"catchBlock\", \"catchBlockList\", \"finallyBlock\", \"declarationBlock\",\n \"accessMode\", \"right\", \"rightBlock\", \"rightList\", \"whereExpression\",\n \"blockChain\", \"blockArguments\", \"blockLeft\", \"blockRight\",\n \"assignmentExpression\", \"assignmentExpressionSimple\", \"elseBlocks\",\n \"transformBlockList\", \"elseLines\", \"transformLineList\",\n \"transformExpression\", \"elseLine\", \"transformList\", \"elseLineSimple\",\n \"transformListSimple\", \"transformExpressionSimple\", \"tupleExpression\",\n \"simpleExpression\", \"conditionExpression\", \"binaryExpression\",\n \"concatExpression\", \"logicalOrExpression\", \"logicalAndExpression\",\n \"equalityExpression\", \"relationalExpression\", \"addExpression\",\n \"multiplyExpression\", \"unaryExpression\", \"bindExpression\", \"bindList\",\n \"callExpression\", \"typeExpression\", \"basicExpression\", \"parenExpression\",\n \"listExpression\", \"mapExpression\", \"channelExpression\", \"id\", \"literal\",\n \"assignOp\", \"channelOp\", \"writeOp\", \"funcOp\", \"ifWhile\", \"inOn\", \"block\",\n \"callArguments\", \"argumentList\", \"argument\", \"mapTupleExpression\",\n \"mapAssignmentExpression\", \"cDeclarations\", \"cDeclaration\", \"cLine\",\n \"cFunction\", \"cType\", \"cArgs\", \"cArg\", 0\n};\n#endif\n\n# ifdef YYPRINT\n/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to\n token YYLEX-NUM. */\nstatic const yytype_uint16 yytoknum[] =\n{\n 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,\n 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,\n 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,\n 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,\n 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,\n 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,\n 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,\n 325, 326, 327, 328, 329, 330, 331, 332, 333, 334,\n 335, 336, 337, 338, 339, 340, 341, 342, 343, 344,\n 345, 346, 347, 348, 349, 350, 351, 352, 353, 354,\n 355, 356, 357, 358, 359, 360, 361, 362, 363, 364,\n 365\n};\n# endif\n\n/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */\nstatic const yytype_uint8 yyr1[] =\n{\n 0, 111, 112, 112, 113, 113, 114, 115, 115, 115,\n 115, 115, 115, 115, 116, 117, 117, 118, 118, 119,\n 119, 119, 119, 119, 119, 120, 121, 121, 121, 122,\n 122, 122, 123, 123, 124, 125, 125, 125, 125, 125,\n 125, 126, 126, 126, 127, 128, 129, 129, 129, 130,\n 130, 130, 131, 131, 132, 132, 132, 132, 132, 132,\n 132, 132, 133, 133, 133, 133, 133, 133, 133, 133,\n 133, 133, 133, 133, 133, 133, 133, 133, 133, 133,\n 133, 133, 133, 133, 133, 133, 133, 133, 133, 133,\n 133, 133, 133, 133, 133, 133, 133, 133, 133, 134,\n 134, 134, 134, 134, 134, 134, 134, 134, 134, 134,\n 134, 134, 134, 134, 134, 134, 134, 134, 134, 134,\n 134, 135, 135, 135, 135, 135, 135, 135, 135, 135,\n 135, 135, 135, 136, 136, 136, 136, 136, 136, 136,\n 136, 136, 136, 136, 136, 136, 136, 136, 137, 137,\n 138, 138, 139, 139, 140, 140, 140, 141, 142, 142,\n 143, 143, 144, 144, 145, 145, 146, 147, 147, 147,\n 148, 149, 150, 150, 151, 151, 152, 152, 152, 152,\n 152, 152, 153, 153, 154, 154, 154, 155, 155, 155,\n 155, 155, 155, 155, 155, 156, 156, 156, 157, 157,\n 157, 157, 157, 157, 158, 158, 158, 158, 158, 158,\n 159, 159, 159, 160, 160, 161, 161, 161, 161, 161,\n 161, 161, 161, 161, 161, 161, 161, 161, 162, 162,\n 163, 163, 163, 163, 163, 163, 164, 164, 165, 165,\n 166, 166, 167, 167, 168, 168, 169, 169, 169, 169,\n 169, 169, 170, 170, 170, 170, 170, 170, 170, 170,\n 170, 171, 171, 171, 172, 172, 173, 173, 174, 174,\n 175, 175, 176, 177, 177, 178, 178, 178, 179, 179,\n 179, 179, 179, 180, 180, 180, 181, 182, 182, 183,\n 183, 184, 185, 185, 186, 186, 186, 186, 186, 186,\n 186, 187, 187, 188, 188\n};\n\n/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */\nstatic const yytype_uint8 yyr2[] =\n{\n 0, 2, 1, 1, 1, 2, 1, 1, 1, 1,\n 1, 1, 1, 1, 2, 2, 3, 1, 3, 2,\n 2, 2, 2, 2, 2, 4, 4, 5, 4, 3,\n 3, 4, 1, 2, 3, 2, 5, 3, 6, 4,\n 7, 1, 1, 1, 1, 1, 1, 3, 2, 1,\n 3, 4, 1, 2, 2, 3, 3, 4, 2, 3,\n 3, 4, 2, 3, 4, 3, 4, 3, 5, 4,\n 3, 3, 2, 4, 3, 3, 2, 2, 4, 3,\n 3, 2, 4, 5, 3, 4, 2, 7, 6, 6,\n 4, 9, 8, 8, 6, 5, 4, 7, 6, 2,\n 3, 3, 2, 3, 2, 2, 3, 2, 4, 5,\n 3, 4, 2, 6, 5, 3, 8, 7, 5, 4,\n 6, 1, 3, 3, 2, 3, 2, 3, 2, 6,\n 8, 4, 6, 1, 3, 3, 2, 3, 2, 1,\n 3, 2, 3, 2, 6, 8, 4, 6, 1, 3,\n 3, 6, 2, 6, 1, 3, 3, 3, 1, 3,\n 1, 3, 1, 3, 1, 3, 3, 1, 3, 2,\n 1, 1, 1, 4, 1, 3, 1, 3, 3, 5,\n 3, 5, 1, 3, 1, 3, 3, 1, 3, 3,\n 3, 3, 3, 3, 3, 1, 3, 3, 1, 3,\n 3, 3, 3, 3, 1, 2, 2, 2, 2, 3,\n 1, 2, 2, 1, 3, 1, 3, 3, 3, 2,\n 4, 3, 5, 3, 5, 7, 4, 6, 1, 3,\n 1, 1, 1, 1, 1, 1, 3, 2, 3, 2,\n 3, 2, 2, 3, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 2, 1, 1,\n 1, 1, 3, 2, 3, 1, 2, 3, 1, 2,\n 1, 3, 2, 1, 3, 2, 3, 2, 2, 1,\n 2, 1, 5, 4, 1, 2, 2, 3, 1, 2,\n 2, 1, 3, 1, 2\n};\n\n/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state\n STATE-NUM when YYTABLE doesn't specify something else to do. Zero\n means the default is an error. */\nstatic const yytype_uint16 yydefact[] =\n{\n 0, 244, 245, 249, 246, 247, 248, 251, 6, 42,\n 41, 0, 0, 0, 0, 0, 0, 250, 263, 262,\n 261, 266, 0, 0, 0, 0, 0, 0, 43, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 2, 13,\n 4, 9, 10, 11, 12, 8, 0, 7, 45, 49,\n 52, 0, 167, 170, 171, 172, 174, 176, 182, 184,\n 187, 195, 198, 204, 210, 215, 228, 230, 231, 232,\n 233, 234, 235, 0, 0, 3, 24, 23, 0, 0,\n 21, 22, 0, 0, 0, 0, 44, 133, 139, 0,\n 0, 86, 148, 160, 0, 158, 0, 250, 0, 267,\n 211, 213, 212, 205, 20, 19, 208, 0, 17, 14,\n 242, 0, 0, 206, 0, 0, 0, 72, 0, 121,\n 0, 0, 0, 207, 241, 0, 0, 283, 239, 46,\n 0, 237, 0, 294, 298, 0, 0, 287, 289, 291,\n 0, 1, 5, 35, 0, 0, 0, 0, 53, 0,\n 265, 264, 253, 260, 259, 258, 257, 256, 255, 254,\n 252, 0, 0, 62, 0, 0, 0, 0, 169, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 219, 0, 76,\n 77, 0, 81, 0, 288, 0, 0, 143, 162, 164,\n 0, 0, 136, 0, 0, 0, 0, 0, 0, 138,\n 141, 0, 0, 154, 0, 0, 84, 0, 0, 0,\n 209, 0, 15, 0, 0, 243, 269, 271, 268, 0,\n 270, 0, 0, 0, 0, 112, 0, 0, 102, 124,\n 71, 0, 0, 0, 0, 0, 105, 104, 126, 107,\n 128, 99, 0, 285, 240, 48, 238, 236, 295, 296,\n 299, 0, 290, 0, 300, 0, 37, 0, 50, 0,\n 54, 0, 58, 0, 67, 0, 0, 0, 0, 65,\n 0, 70, 0, 74, 0, 79, 0, 63, 168, 0,\n 175, 177, 180, 176, 178, 176, 183, 186, 185, 194,\n 193, 192, 191, 189, 190, 188, 197, 196, 203, 202,\n 201, 200, 199, 217, 216, 218, 223, 0, 0, 280,\n 0, 273, 278, 0, 275, 221, 0, 229, 75, 80,\n 272, 0, 0, 32, 26, 28, 0, 0, 0, 0,\n 0, 25, 142, 135, 134, 137, 140, 85, 152, 0,\n 0, 149, 161, 159, 0, 0, 157, 121, 0, 0,\n 150, 214, 16, 18, 96, 0, 0, 0, 90, 0,\n 110, 0, 0, 0, 115, 0, 0, 101, 123, 100,\n 122, 103, 125, 106, 127, 286, 284, 47, 297, 0,\n 0, 0, 39, 51, 55, 56, 59, 60, 66, 0,\n 82, 64, 69, 0, 73, 78, 173, 0, 0, 0,\n 0, 226, 0, 279, 282, 274, 276, 220, 0, 0,\n 0, 33, 27, 165, 163, 166, 146, 0, 0, 0,\n 156, 155, 0, 0, 0, 0, 0, 0, 95, 0,\n 0, 0, 0, 0, 111, 119, 131, 0, 0, 0,\n 108, 293, 303, 0, 301, 36, 0, 0, 57, 61,\n 83, 68, 181, 179, 0, 224, 0, 281, 277, 222,\n 34, 30, 0, 29, 0, 0, 0, 0, 0, 0,\n 0, 0, 98, 0, 94, 88, 0, 0, 89, 0,\n 118, 0, 0, 114, 109, 304, 0, 292, 38, 0,\n 0, 227, 31, 147, 144, 0, 0, 151, 0, 0,\n 97, 87, 0, 0, 120, 132, 113, 129, 0, 302,\n 40, 225, 0, 153, 0, 0, 0, 92, 0, 93,\n 0, 117, 145, 0, 91, 116, 130, 0\n};\n\n/* YYDEFGOTO[NTERM-NUM]. */\nstatic const yytype_int16 yydefgoto[] =\n{\n -1, 37, 38, 39, 40, 41, 108, 109, 42, 43,\n 44, 343, 344, 345, 45, 46, 129, 47, 130, 48,\n 49, 148, 50, 117, 249, 86, 91, 92, 221, 222,\n 93, 94, 95, 207, 208, 209, 119, 52, 53, 54,\n 55, 56, 57, 58, 59, 60, 61, 62, 63, 100,\n 64, 65, 66, 67, 68, 69, 70, 71, 72, 164,\n 120, 165, 121, 241, 242, 122, 197, 333, 334, 126,\n 127, 75, 137, 138, 139, 140, 463, 464\n};\n\n/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing\n STATE-NUM. */\n#define YYPACT_NINF -382\nstatic const yytype_int16 yypact[] =\n{\n 1209, -382, -382, -382, -382, -382, -382, -382, -382, -382,\n -382, 1760, 43, 52, 52, 2138, 954, 350, -382, -382,\n -382, 23, 87, 2408, 1760, 2408, 3, 2447, -382, 2408,\n 2408, 1839, 2408, 332, 1894, 1949, 20, 102, 1432, -382,\n -382, -382, -382, -382, -382, -382, 1839, -382, -382, -14,\n 32, 1528, 22, -382, -382, -9, 82, 64, 106, 454,\n 26, 296, -382, -382, 159, 40, -382, -382, -382, -382,\n -382, -382, -382, 1596, 1839, 20, -382, -382, 1432, 52,\n -382, -382, 2408, 2408, 2138, 43, -382, 765, 2138, 2138,\n 2408, -382, 116, -382, 52, 70, 34, -382, 2408, -382,\n 105, 159, -382, -382, -382, -382, -382, 108, 79, 72,\n -382, 124, 226, -382, 2272, 2408, 1839, -382, 52, 1607,\n 1596, 1839, 52, -382, -382, 149, -38, -382, -382, -382,\n -29, -382, -63, -382, -382, 208, 39, -382, 166, -382,\n 46, -382, -382, 126, 169, 2028, 1839, 1839, -382, 1839,\n -382, -382, -382, -382, -382, -382, -382, -382, -382, -382,\n -382, 2291, 1839, -382, 1839, 1839, 1839, 52, 62, 306,\n 2408, 2408, 2408, 2408, 2408, 2408, 2408, 2408, 2408, 2408,\n 2408, 2408, 2408, 2408, 2408, 2408, 2408, 2408, 2408, 2408,\n 2408, 306, 306, 306, 27, 2138, 1517, 35, 306, -382,\n -382, 52, -382, 52, -382, 1327, 180, -382, 140, -382,\n 187, 421, -382, 52, 2408, 2138, 2138, 2138, 2138, -382,\n -382, 230, 52, -382, 205, 150, -382, 2408, 2193, 52,\n -382, 306, -382, 108, 3, -382, -382, -382, -382, 1839,\n -382, 2408, 2408, 52, 2408, -382, 52, 265, -382, -382,\n -382, 2310, 1839, 1839, 1839, 1839, -382, -382, -382, -382,\n -382, -382, 2408, 2408, -382, 2138, -382, -382, -382, -382,\n -382, 248, -382, 146, -382, 43, 160, 52, -382, 52,\n 32, 52, 32, 52, -382, 52, 2408, 52, 220, -382,\n 52, -382, 169, -382, 52, -382, 52, -382, -382, 2408,\n 82, 64, -382, 71, -382, 78, 106, 454, 454, 26,\n 26, 26, 26, 26, 26, 26, 296, 296, -382, -382,\n -382, -382, -382, 40, 40, 40, -382, 2138, -12, 2083,\n 2138, -382, -382, 162, 186, -382, 108, -382, -382, -382,\n -382, 43, 87, -382, 180, -382, 2408, 2408, 2138, 2408,\n 2408, -382, -382, -382, -382, -382, -382, -382, 2389, 1036,\n 2408, -382, -382, 220, 2408, 2193, -382, 1371, 2193, 2193,\n -382, 159, -382, 79, -382, 52, 37, 67, -382, 257,\n -382, 1839, 2408, 2408, -382, 2408, 52, -382, -382, -382,\n -382, -382, -382, -382, -382, -382, -382, -382, -382, 18,\n 52, 43, 179, -382, -382, 32, -382, 32, -382, 267,\n -382, -382, -382, 52, -382, -382, -382, 2408, 2408, 31,\n 2138, -382, 2138, -382, -382, -382, 1681, 258, 52, 69,\n 52, -382, -382, -382, 187, -382, -382, 242, 154, 245,\n -382, -382, 43, 423, 2193, 2193, 2193, 2193, -382, 1839,\n 52, 1839, 2408, 52, -382, -382, -382, 65, 291, 274,\n -382, -382, 59, -39, -382, -382, 52, 43, -382, -382,\n -382, -382, -382, -382, 2138, -382, 188, -382, -382, -382,\n -382, -382, 52, -382, 2138, 2138, 2408, 2408, 52, 2193,\n 2408, 2408, -382, 52, -382, -382, 52, 100, -382, 1839,\n -382, 1839, 2408, -382, -382, -382, 20, -382, -382, 52,\n 191, -382, -382, -382, -382, 256, 52, -382, 260, 176,\n -382, -382, 1839, 52, -382, -382, -382, -382, 121, -382,\n -382, -382, 2138, -382, 2193, 2193, 2408, -382, 52, -382,\n 1839, -382, -382, 261, -382, -382, -382, 2193\n};\n\n/* YYPGOTO[NTERM-NUM]. */\nstatic const yytype_int16 yypgoto[] =\n{\n -382, -382, 213, 497, -35, -382, 80, -382, -382, -382,\n -382, -33, -382, -22, -382, -382, 512, 47, 270, -382,\n -382, -263, 161, 907, 648, -382, 201, -382, -231, -382,\n -88, -100, -382, 109, -382, -20, 16, 408, -24, 29,\n -382, -144, 28, 157, 50, 376, 77, 90, -382, -382,\n -21, 42, -7, -382, -382, -382, -382, -95, -382, -83,\n 212, -80, 0, -206, -203, 774, -382, -94, -382, -382,\n 74, -382, 264, -382, -382, -381, -382, -173\n};\n\n/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If\n positive, shift that token. If negative, reduce the rule which\n number is the opposite. If zero, do what YYDEFACT says.\n If YYTABLE_NINF, syntax error. */\n#define YYTABLE_NINF -1\nstatic const yytype_uint16 yytable[] =\n{\n 74, 101, 223, 142, 216, 349, 111, 217, 350, 125,\n 98, 74, 232, 379, 246, 89, 51, 404, 462, 406,\n 420, 133, 265, 133, 74, 169, 300, 51, 302, 304,\n 326, 134, 96, 134, 89, 89, 253, 146, 74, 254,\n 51, 382, 269, 267, 383, 112, 506, 263, 99, 273,\n 78, 166, 270, 78, 51, 409, 265, 170, 77, 78,\n 335, 287, 505, 474, 264, 1, 2, 507, 3, 8,\n 336, 105, 4, 5, 6, 266, 7, 228, 74, 107,\n 449, 78, 145, 78, 89, 78, 8, 218, 89, 89,\n 1, 2, 421, 3, 51, 236, 17, 4, 5, 6,\n 238, 7, 141, 78, 184, 185, 224, 168, 499, 22,\n 451, 1, 2, 103, 147, 106, 78, 191, 192, 255,\n 113, 97, 123, 274, 461, 462, 135, 136, 135, 136,\n 96, 247, 327, 27, 193, 475, 274, 78, 372, 362,\n 30, 198, 468, 522, 469, 74, 174, 271, 225, 32,\n 227, 386, 231, 174, 459, 233, 223, 234, 27, 194,\n 174, 51, 299, 171, 540, 33, 78, 34, 417, 35,\n 142, 452, 191, 192, 195, 418, 196, 288, 175, 176,\n 172, 173, 236, 360, 298, 78, 8, 238, 230, 193,\n 33, 337, 34, 235, 35, 89, 89, 485, 223, 301,\n 303, 305, 341, 342, 236, 74, 262, 191, 192, 238,\n 371, 268, 73, 272, 194, 89, 89, 89, 89, 535,\n 346, 51, 275, 73, 193, 307, 308, 88, 369, 195,\n 347, 196, 486, 323, 324, 325, 73, 490, 395, 125,\n 491, 427, 78, 363, 367, 357, 88, 88, 359, 194,\n 73, 398, 502, 399, 236, 237, 401, 376, 377, 238,\n 224, 316, 317, 228, 195, 89, 196, 288, 425, 239,\n 440, 426, 454, 472, 473, 467, 318, 319, 320, 321,\n 322, 78, 470, 479, 445, 484, 240, 446, 487, 504,\n 73, 205, 511, 236, 237, 531, 88, 223, 238, 532,\n 88, 88, 224, 534, 547, 132, 278, 78, 381, 1,\n 2, 431, 3, 536, 373, 245, 4, 5, 6, 236,\n 7, 429, 432, 352, 238, 240, 433, 89, 416, 89,\n 89, 306, 478, 529, 501, 1, 2, 396, 3, 204,\n 97, 0, 4, 5, 6, 0, 7, 0, 89, 0,\n 0, 0, 0, 1, 2, 0, 3, 73, 0, 369,\n 4, 5, 6, 186, 7, 369, 17, 447, 369, 369,\n 187, 188, 189, 190, 288, 367, 442, 27, 0, 22,\n 443, 367, 0, 0, 367, 367, 0, 23, 0, 0,\n 0, 0, 25, 0, 0, 0, 0, 0, 457, 458,\n 0, 224, 0, 27, 0, 0, 0, 88, 88, 33,\n 30, 34, 0, 35, 0, 0, 0, 73, 0, 32,\n 89, 27, 89, 87, 0, 0, 89, 88, 88, 88,\n 88, 0, 0, 0, 124, 33, 0, 34, 0, 35,\n 368, 0, 87, 87, 369, 369, 369, 369, 0, 236,\n 237, 236, 237, 33, 238, 34, 238, 35, 0, 0,\n 367, 367, 367, 367, 348, 0, 489, 0, 497, 0,\n 0, 0, 0, 0, 89, 0, 0, 88, 0, 0,\n 0, 240, 0, 240, 89, 89, 0, 0, 0, 369,\n 210, 211, 87, 0, 0, 0, 87, 87, 0, 0,\n 0, 0, 0, 516, 0, 367, 518, 519, 76, 0,\n 80, 81, 177, 178, 0, 0, 0, 179, 528, 0,\n 0, 104, 180, 181, 182, 183, 0, 85, 0, 0,\n 0, 0, 89, 0, 369, 369, 0, 0, 0, 88,\n 0, 88, 88, 0, 0, 0, 0, 369, 163, 0,\n 367, 367, 543, 309, 310, 311, 312, 313, 314, 315,\n 88, 0, 0, 367, 0, 0, 0, 0, 0, 0,\n 199, 368, 0, 0, 0, 0, 206, 368, 0, 0,\n 368, 368, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 226, 0, 0, 0, 0, 212, 0, 0, 0,\n 219, 220, 0, 87, 87, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 250, 0, 256, 0, 261,\n 0, 0, 210, 87, 87, 87, 87, 0, 0, 0,\n 0, 0, 88, 0, 88, 0, 0, 0, 88, 0,\n 0, 276, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 368, 368, 368, 368,\n 0, 0, 0, 0, 297, 0, 0, 0, 0, 0,\n 0, 0, 0, 87, 0, 0, 0, 0, 0, 118,\n 0, 0, 0, 0, 0, 0, 88, 0, 0, 0,\n 0, 0, 0, 0, 144, 0, 88, 88, 338, 0,\n 339, 368, 0, 0, 0, 0, 0, 328, 332, 0,\n 351, 0, 0, 0, 0, 0, 0, 0, 0, 358,\n 0, 201, 203, 0, 0, 0, 370, 353, 354, 355,\n 356, 0, 0, 0, 0, 87, 0, 87, 87, 0,\n 378, 0, 0, 380, 88, 0, 368, 368, 0, 0,\n 0, 0, 0, 0, 434, 435, 87, 437, 438, 368,\n 0, 0, 0, 0, 0, 0, 0, 0, 258, 260,\n 0, 0, 0, 0, 402, 0, 403, 397, 405, 0,\n 407, 0, 408, 0, 410, 0, 79, 411, 0, 412,\n 0, 414, 0, 415, 281, 283, 102, 285, 0, 0,\n 0, 0, 0, 150, 0, 0, 151, 152, 0, 0,\n 290, 21, 292, 294, 296, 153, 154, 155, 156, 157,\n 158, 159, 160, 0, 0, 167, 0, 0, 87, 214,\n 87, 0, 0, 0, 87, 0, 0, 0, 0, 419,\n 0, 423, 424, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 213,\n 436, 0, 0, 0, 0, 215, 0, 0, 0, 0,\n 229, 0, 448, 0, 0, 0, 366, 0, 0, 0,\n 0, 0, 87, 460, 0, 0, 243, 375, 0, 0,\n 0, 0, 87, 87, 515, 0, 0, 465, 0, 0,\n 388, 390, 392, 394, 0, 0, 0, 0, 0, 0,\n 471, 0, 0, 0, 0, 0, 0, 0, 277, 279,\n 0, 0, 0, 0, 0, 480, 481, 483, 0, 0,\n 0, 0, 476, 0, 477, 0, 0, 0, 332, 0,\n 87, 0, 0, 0, 0, 0, 0, 494, 0, 0,\n 498, 0, 0, 143, 0, 0, 0, 1, 2, 0,\n 3, 0, 0, 508, 4, 5, 6, 0, 7, 0,\n 90, 0, 0, 0, 0, 0, 0, 0, 0, 512,\n 200, 202, 0, 0, 0, 517, 510, 0, 17, 0,\n 520, 0, 0, 521, 0, 0, 513, 514, 0, 361,\n 0, 22, 0, 0, 0, 0, 530, 366, 0, 23,\n 0, 0, 0, 533, 25, 0, 258, 260, 0, 0,\n 539, 384, 0, 248, 0, 27, 0, 257, 259, 456,\n 0, 0, 30, 0, 0, 544, 0, 0, 0, 1,\n 2, 32, 3, 0, 542, 0, 4, 5, 6, 400,\n 7, 0, 78, 280, 282, 0, 284, 33, 0, 34,\n 0, 35, 0, 0, 0, 0, 413, 0, 0, 289,\n 17, 291, 293, 295, 18, 0, 0, 19, 20, 0,\n 0, 0, 21, 22, 0, 0, 0, 0, 0, 0,\n 0, 23, 388, 390, 392, 394, 25, 493, 0, 496,\n 0, 0, 0, 0, 0, 0, 0, 27, 0, 0,\n 0, 0, 0, 364, 30, 428, 430, 0, 0, 0,\n 365, 0, 0, 32, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 441, 0, 0, 0, 456, 0, 33,\n 0, 34, 0, 35, 0, 0, 374, 525, 0, 527,\n 450, 453, 0, 0, 0, 0, 0, 0, 0, 387,\n 389, 391, 393, 0, 0, 0, 0, 0, 0, 0,\n 538, 0, 0, 0, 0, 466, 0, 0, 0, 0,\n 0, 0, 525, 527, 0, 0, 0, 0, 546, 0,\n 0, 0, 0, 0, 0, 546, 0, 0, 0, 0,\n 0, 0, 0, 482, 0, 0, 0, 0, 0, 0,\n 0, 0, 1, 2, 0, 3, 488, 0, 0, 4,\n 5, 6, 0, 7, 0, 0, 8, 0, 9, 10,\n 11, 500, 503, 12, 0, 13, 14, 15, 0, 0,\n 0, 509, 16, 17, 0, 0, 0, 18, 0, 0,\n 19, 20, 0, 0, 0, 21, 22, 0, 0, 0,\n 0, 0, 0, 0, 23, 0, 24, 0, 0, 25,\n 0, 523, 0, 0, 0, 0, 0, 0, 26, 0,\n 27, 0, 0, 28, 0, 0, 29, 30, 455, 0,\n 0, 0, 0, 31, 0, 0, 32, 0, 0, 0,\n 0, 0, 541, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 33, 0, 34, 0, 35, 0, 0, 36,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 1, 2, 0, 3, 0, 0, 0, 4, 5, 6,\n 0, 7, 340, 0, 8, 0, 9, 10, 11, 0,\n 0, 12, 0, 13, 14, 15, 492, 0, 495, 0,\n 16, 17, 0, 0, 0, 18, 0, 0, 19, 20,\n 0, 0, 0, 21, 22, 0, 0, 0, 0, 0,\n 0, 0, 23, 0, 24, 0, 0, 25, 0, 0,\n 0, 0, 0, 0, 0, 0, 26, 0, 27, 0,\n 0, 28, 0, 0, 29, 30, 524, 0, 526, 150,\n 0, 31, 151, 152, 32, 0, 0, 21, 0, 0,\n 0, 153, 154, 155, 156, 157, 158, 159, 160, 537,\n 33, 0, 34, 0, 35, 1, 2, 0, 3, 0,\n 0, 0, 4, 5, 6, 0, 7, 545, 0, 8,\n 0, 9, 10, 11, 0, 0, 12, 0, 13, 14,\n 15, 0, 0, 0, 0, 16, 17, 0, 0, 0,\n 18, 444, 0, 19, 20, 0, 0, 0, 21, 22,\n 0, 0, 0, 0, 0, 0, 0, 23, 0, 24,\n 0, 0, 25, 0, 0, 0, 0, 0, 0, 0,\n 0, 26, 0, 27, 0, 0, 28, 0, 0, 29,\n 30, 0, 0, 0, 0, 0, 31, 0, 0, 32,\n 1, 2, 329, 3, 0, 0, 0, 4, 5, 6,\n 0, 7, 0, 149, 0, 33, 0, 34, 0, 35,\n 0, 0, 0, 0, 78, 8, 0, 0, 0, 0,\n 82, 17, 0, 0, 0, 18, 0, 0, 19, 20,\n 0, 0, 0, 21, 22, 0, 150, 0, 0, 151,\n 152, 0, 23, 0, 21, 0, 0, 25, 153, 154,\n 155, 156, 157, 158, 159, 160, 0, 0, 27, 0,\n 0, 0, 161, 0, 83, 30, 0, 0, 0, 1,\n 2, 84, 3, 0, 32, 330, 4, 5, 6, 0,\n 7, 0, 78, 8, 0, 0, 0, 0, 0, 0,\n 33, 0, 34, 331, 35, 0, 0, 0, 162, 114,\n 17, 0, 0, 0, 18, 0, 0, 19, 20, 0,\n 0, 0, 21, 22, 0, 150, 0, 0, 151, 152,\n 0, 23, 0, 21, 0, 0, 25, 153, 154, 155,\n 156, 157, 158, 159, 160, 0, 0, 27, 0, 0,\n 0, 251, 0, 115, 30, 0, 0, 0, 0, 0,\n 116, 0, 0, 32, 1, 2, 329, 3, 0, 0,\n 0, 4, 5, 6, 0, 7, 0, 0, 0, 33,\n 0, 34, 0, 35, 0, 0, 0, 252, 0, 0,\n 0, 0, 0, 0, 82, 17, 0, 0, 0, 18,\n 0, 0, 19, 20, 0, 0, 0, 21, 22, 0,\n 0, 0, 0, 0, 0, 0, 23, 0, 0, 0,\n 0, 25, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 27, 0, 0, 0, 0, 0, 83, 30,\n 0, 0, 0, 1, 2, 84, 3, 0, 32, 330,\n 4, 5, 6, 0, 7, 0, 0, 8, 0, 0,\n 0, 0, 0, 0, 33, 0, 34, 0, 35, 0,\n 0, 0, 0, 16, 17, 0, 0, 0, 18, 0,\n 0, 19, 20, 0, 0, 0, 21, 22, 0, 0,\n 0, 0, 0, 0, 0, 23, 0, 0, 0, 0,\n 25, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 27, 0, 0, 0, 0, 0, 29, 30, 0,\n 0, 0, 1, 2, 31, 3, 0, 32, 0, 4,\n 5, 6, 0, 7, 0, 78, 0, 0, 0, 0,\n 0, 0, 0, 33, 0, 34, 0, 35, 0, 0,\n 0, 0, 114, 17, 0, 0, 0, 18, 0, 0,\n 19, 20, 0, 0, 0, 21, 22, 0, 0, 0,\n 0, 0, 0, 0, 23, 0, 0, 1, 2, 25,\n 3, 0, 0, 0, 4, 5, 6, 0, 7, 0,\n 27, 0, 0, 0, 0, 0, 115, 30, 0, 0,\n 0, 0, 0, 116, 0, 0, 32, 82, 17, 0,\n 0, 0, 18, 0, 0, 19, 20, 0, 0, 0,\n 21, 22, 33, 0, 34, 0, 35, 0, 0, 23,\n 0, 0, 1, 2, 25, 3, 0, 0, 0, 4,\n 5, 6, 0, 7, 0, 27, 0, 0, 0, 0,\n 0, 83, 30, 0, 0, 0, 0, 0, 84, 0,\n 0, 32, 82, 17, 0, 0, 0, 18, 0, 0,\n 19, 20, 0, 0, 0, 21, 22, 33, 128, 34,\n 0, 35, 0, 0, 23, 0, 0, 0, 0, 25,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 27, 0, 0, 0, 0, 0, 83, 30, 0, 0,\n 0, 1, 2, 84, 3, 0, 32, 0, 4, 5,\n 6, 0, 7, 0, 78, 0, 0, 0, 0, 0,\n 0, 0, 33, 0, 34, 131, 35, 0, 0, 0,\n 0, 16, 17, 0, 0, 0, 18, 0, 0, 19,\n 20, 0, 0, 0, 21, 22, 0, 0, 0, 0,\n 0, 0, 0, 23, 0, 0, 1, 2, 25, 3,\n 0, 0, 0, 4, 5, 6, 0, 7, 0, 27,\n 0, 0, 0, 0, 0, 29, 30, 0, 0, 0,\n 0, 0, 31, 0, 0, 32, 82, 17, 0, 0,\n 0, 18, 0, 0, 19, 20, 0, 0, 0, 21,\n 22, 33, 0, 34, 0, 35, 0, 0, 23, 0,\n 422, 1, 2, 25, 3, 0, 0, 0, 4, 5,\n 6, 0, 7, 0, 27, 0, 0, 0, 0, 0,\n 83, 30, 0, 0, 0, 0, 0, 84, 0, 0,\n 32, 82, 17, 0, 0, 0, 18, 0, 0, 19,\n 20, 0, 0, 0, 21, 22, 33, 0, 34, 0,\n 35, 0, 0, 23, 0, 0, 1, 2, 25, 3,\n 0, 0, 0, 4, 5, 6, 0, 7, 0, 27,\n 0, 0, 0, 0, 0, 83, 30, 0, 0, 0,\n 0, 0, 84, 0, 0, 32, 0, 17, 0, 0,\n 0, 18, 0, 0, 19, 20, 0, 0, 0, 21,\n 22, 33, 0, 34, 0, 35, 0, 0, 23, 0,\n 0, 0, 0, 25, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 27, 0, 0, 0, 0, 0,\n 364, 30, 0, 0, 0, 1, 2, 365, 3, 0,\n 32, 0, 4, 5, 6, 0, 7, 0, 244, 0,\n 0, 0, 0, 0, 1, 2, 33, 3, 34, 0,\n 35, 4, 5, 6, 0, 7, 17, 286, 0, 0,\n 0, 0, 0, 1, 2, 0, 3, 0, 0, 22,\n 4, 5, 6, 0, 7, 17, 385, 23, 0, 0,\n 0, 0, 25, 0, 0, 0, 0, 0, 22, 0,\n 0, 0, 0, 27, 17, 0, 23, 0, 0, 0,\n 30, 25, 0, 0, 0, 0, 0, 22, 0, 32,\n 0, 0, 27, 0, 0, 23, 0, 0, 0, 30,\n 25, 0, 0, 0, 0, 33, 0, 34, 32, 35,\n 0, 27, 0, 0, 0, 0, 0, 0, 30, 0,\n 0, 0, 1, 2, 33, 3, 34, 32, 35, 4,\n 5, 6, 0, 7, 0, 0, 0, 0, 0, 0,\n 0, 1, 2, 33, 3, 34, 0, 35, 4, 5,\n 6, 439, 7, 17, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 22, 0, 0, 0,\n 0, 0, 17, 0, 23, 0, 0, 0, 0, 25,\n 1, 2, 0, 3, 0, 22, 0, 4, 5, 6,\n 27, 7, 0, 23, 0, 0, 0, 30, 25, 0,\n 0, 0, 0, 0, 0, 0, 32, 0, 0, 27,\n 0, 97, 0, 0, 0, 0, 30, 0, 0, 0,\n 0, 0, 33, 0, 34, 32, 35, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 33, 0, 34, 0, 35, 110, 0, 27, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 33, 0, 34, 0, 35\n};\n\nstatic const yytype_int16 yycheck[] =\n{\n 0, 22, 90, 38, 87, 211, 27, 87, 211, 33,\n 17, 11, 107, 244, 114, 15, 0, 280, 399, 282,\n 32, 3, 85, 3, 24, 34, 170, 11, 172, 173,\n 3, 13, 16, 13, 34, 35, 119, 5, 38, 119,\n 24, 247, 3, 106, 247, 29, 85, 85, 25, 3,\n 16, 51, 13, 16, 38, 286, 85, 66, 11, 16,\n 25, 161, 3, 32, 102, 3, 4, 106, 6, 17,\n 35, 24, 10, 11, 12, 104, 14, 43, 78, 76,\n 43, 16, 96, 16, 84, 16, 17, 87, 88, 89,\n 3, 4, 104, 6, 78, 28, 34, 10, 11, 12,\n 33, 14, 0, 16, 78, 79, 90, 85, 43, 47,\n 43, 3, 4, 23, 82, 25, 16, 48, 49, 119,\n 30, 34, 32, 77, 106, 506, 108, 109, 108, 109,\n 114, 115, 105, 71, 65, 104, 77, 16, 233, 227,\n 78, 101, 405, 43, 407, 145, 82, 108, 32, 87,\n 80, 251, 47, 82, 385, 76, 244, 85, 71, 90,\n 82, 145, 169, 81, 43, 103, 16, 105, 97, 107,\n 205, 377, 48, 49, 105, 97, 107, 161, 72, 73,\n 98, 99, 28, 33, 168, 16, 17, 33, 98, 65,\n 103, 198, 105, 69, 107, 195, 196, 43, 286, 171,\n 172, 173, 22, 23, 28, 205, 57, 48, 49, 33,\n 231, 3, 0, 47, 90, 215, 216, 217, 218, 43,\n 80, 205, 96, 11, 65, 175, 176, 15, 228, 105,\n 43, 107, 438, 191, 192, 193, 24, 443, 262, 263,\n 443, 336, 16, 227, 228, 15, 34, 35, 43, 90,\n 38, 3, 458, 107, 28, 29, 96, 241, 242, 33,\n 244, 184, 185, 43, 105, 265, 107, 251, 106, 43,\n 358, 85, 15, 417, 418, 96, 186, 187, 188, 189,\n 190, 16, 15, 25, 367, 43, 60, 367, 43, 15,\n 78, 78, 104, 28, 29, 104, 84, 385, 33, 43,\n 88, 89, 286, 43, 43, 35, 145, 16, 43, 3,\n 4, 344, 6, 519, 234, 114, 10, 11, 12, 28,\n 14, 342, 344, 214, 33, 60, 346, 327, 299, 329,\n 330, 174, 426, 506, 43, 3, 4, 263, 6, 75,\n 34, -1, 10, 11, 12, -1, 14, -1, 348, -1,\n -1, -1, -1, 3, 4, -1, 6, 145, -1, 359,\n 10, 11, 12, 67, 14, 365, 34, 367, 368, 369,\n 74, 75, 76, 77, 358, 359, 360, 71, -1, 47,\n 364, 365, -1, -1, 368, 369, -1, 55, -1, -1,\n -1, -1, 60, -1, -1, -1, -1, -1, 382, 383,\n -1, 385, -1, 71, -1, -1, -1, 195, 196, 103,\n 78, 105, -1, 107, -1, -1, -1, 205, -1, 87,\n 420, 71, 422, 15, -1, -1, 426, 215, 216, 217,\n 218, -1, -1, -1, 102, 103, -1, 105, -1, 107,\n 228, -1, 34, 35, 444, 445, 446, 447, -1, 28,\n 29, 28, 29, 103, 33, 105, 33, 107, -1, -1,\n 444, 445, 446, 447, 43, -1, 43, -1, 452, -1,\n -1, -1, -1, -1, 474, -1, -1, 265, -1, -1,\n -1, 60, -1, 60, 484, 485, -1, -1, -1, 489,\n 82, 83, 84, -1, -1, -1, 88, 89, -1, -1,\n -1, -1, -1, 487, -1, 489, 490, 491, 11, -1,\n 13, 14, 58, 59, -1, -1, -1, 63, 502, -1,\n -1, 24, 68, 69, 70, 71, -1, 15, -1, -1,\n -1, -1, 532, -1, 534, 535, -1, -1, -1, 327,\n -1, 329, 330, -1, -1, -1, -1, 547, 51, -1,\n 534, 535, 536, 177, 178, 179, 180, 181, 182, 183,\n 348, -1, -1, 547, -1, -1, -1, -1, -1, -1,\n 73, 359, -1, -1, -1, -1, 79, 365, -1, -1,\n 368, 369, -1, -1, -1, -1, -1, -1, -1, -1,\n -1, 94, -1, -1, -1, -1, 84, -1, -1, -1,\n 88, 89, -1, 195, 196, -1, -1, -1, -1, -1,\n -1, -1, -1, -1, -1, 118, -1, 120, -1, 122,\n -1, -1, 214, 215, 216, 217, 218, -1, -1, -1,\n -1, -1, 420, -1, 422, -1, -1, -1, 426, -1,\n -1, 144, -1, -1, -1, -1, -1, -1, -1, -1,\n -1, -1, -1, -1, -1, -1, 444, 445, 446, 447,\n -1, -1, -1, -1, 167, -1, -1, -1, -1, -1,\n -1, -1, -1, 265, -1, -1, -1, -1, -1, 31,\n -1, -1, -1, -1, -1, -1, 474, -1, -1, -1,\n -1, -1, -1, -1, 46, -1, 484, 485, 201, -1,\n 203, 489, -1, -1, -1, -1, -1, 195, 196, -1,\n 213, -1, -1, -1, -1, -1, -1, -1, -1, 222,\n -1, 73, 74, -1, -1, -1, 229, 215, 216, 217,\n 218, -1, -1, -1, -1, 327, -1, 329, 330, -1,\n 243, -1, -1, 246, 532, -1, 534, 535, -1, -1,\n -1, -1, -1, -1, 346, 347, 348, 349, 350, 547,\n -1, -1, -1, -1, -1, -1, -1, -1, 120, 121,\n -1, -1, -1, -1, 277, -1, 279, 265, 281, -1,\n 283, -1, 285, -1, 287, -1, 12, 290, -1, 292,\n -1, 294, -1, 296, 146, 147, 22, 149, -1, -1,\n -1, -1, -1, 38, -1, -1, 41, 42, -1, -1,\n 162, 46, 164, 165, 166, 50, 51, 52, 53, 54,\n 55, 56, 57, -1, -1, 51, -1, -1, 420, 64,\n 422, -1, -1, -1, 426, -1, -1, -1, -1, 327,\n -1, 329, 330, -1, -1, -1, -1, -1, -1, -1,\n -1, -1, -1, -1, -1, -1, -1, -1, -1, 85,\n 348, -1, -1, -1, -1, 100, -1, -1, -1, -1,\n 96, -1, 375, -1, -1, -1, 228, -1, -1, -1,\n -1, -1, 474, 386, -1, -1, 112, 239, -1, -1,\n -1, -1, 484, 485, 486, -1, -1, 400, -1, -1,\n 252, 253, 254, 255, -1, -1, -1, -1, -1, -1,\n 413, -1, -1, -1, -1, -1, -1, -1, 144, 145,\n -1, -1, -1, -1, -1, 428, 429, 430, -1, -1,\n -1, -1, 420, -1, 422, -1, -1, -1, 426, -1,\n 532, -1, -1, -1, -1, -1, -1, 450, -1, -1,\n 453, -1, -1, 46, -1, -1, -1, 3, 4, -1,\n 6, -1, -1, 466, 10, 11, 12, -1, 14, -1,\n 16, -1, -1, -1, -1, -1, -1, -1, -1, 482,\n 73, 74, -1, -1, -1, 488, 474, -1, 34, -1,\n 493, -1, -1, 496, -1, -1, 484, 485, -1, 225,\n -1, 47, -1, -1, -1, -1, 509, 359, -1, 55,\n -1, -1, -1, 516, 60, -1, 368, 369, -1, -1,\n 523, 247, -1, 116, -1, 71, -1, 120, 121, 381,\n -1, -1, 78, -1, -1, 538, -1, -1, -1, 3,\n 4, 87, 6, -1, 532, -1, 10, 11, 12, 275,\n 14, -1, 16, 146, 147, -1, 149, 103, -1, 105,\n -1, 107, -1, -1, -1, -1, 292, -1, -1, 162,\n 34, 164, 165, 166, 38, -1, -1, 41, 42, -1,\n -1, -1, 46, 47, -1, -1, -1, -1, -1, -1,\n -1, 55, 444, 445, 446, 447, 60, 449, -1, 451,\n -1, -1, -1, -1, -1, -1, -1, 71, -1, -1,\n -1, -1, -1, 77, 78, 341, 342, -1, -1, -1,\n 84, -1, -1, 87, -1, -1, -1, -1, -1, -1,\n -1, -1, -1, 359, -1, -1, -1, 489, -1, 103,\n -1, 105, -1, 107, -1, -1, 239, 499, -1, 501,\n 376, 377, -1, -1, -1, -1, -1, -1, -1, 252,\n 253, 254, 255, -1, -1, -1, -1, -1, -1, -1,\n 522, -1, -1, -1, -1, 401, -1, -1, -1, -1,\n -1, -1, 534, 535, -1, -1, -1, -1, 540, -1,\n -1, -1, -1, -1, -1, 547, -1, -1, -1, -1,\n -1, -1, -1, 429, -1, -1, -1, -1, -1, -1,\n -1, -1, 3, 4, -1, 6, 442, -1, -1, 10,\n 11, 12, -1, 14, -1, -1, 17, -1, 19, 20,\n 21, 457, 458, 24, -1, 26, 27, 28, -1, -1,\n -1, 467, 33, 34, -1, -1, -1, 38, -1, -1,\n 41, 42, -1, -1, -1, 46, 47, -1, -1, -1,\n -1, -1, -1, -1, 55, -1, 57, -1, -1, 60,\n -1, 497, -1, -1, -1, -1, -1, -1, 69, -1,\n 71, -1, -1, 74, -1, -1, 77, 78, 381, -1,\n -1, -1, -1, 84, -1, -1, 87, -1, -1, -1,\n -1, -1, 528, -1, -1, -1, -1, -1, -1, -1,\n -1, -1, 103, -1, 105, -1, 107, -1, -1, 110,\n -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\n 3, 4, -1, 6, -1, -1, -1, 10, 11, 12,\n -1, 14, 15, -1, 17, -1, 19, 20, 21, -1,\n -1, 24, -1, 26, 27, 28, 449, -1, 451, -1,\n 33, 34, -1, -1, -1, 38, -1, -1, 41, 42,\n -1, -1, -1, 46, 47, -1, -1, -1, -1, -1,\n -1, -1, 55, -1, 57, -1, -1, 60, -1, -1,\n -1, -1, -1, -1, -1, -1, 69, -1, 71, -1,\n -1, 74, -1, -1, 77, 78, 499, -1, 501, 38,\n -1, 84, 41, 42, 87, -1, -1, 46, -1, -1,\n -1, 50, 51, 52, 53, 54, 55, 56, 57, 522,\n 103, -1, 105, -1, 107, 3, 4, -1, 6, -1,\n -1, -1, 10, 11, 12, -1, 14, 540, -1, 17,\n -1, 19, 20, 21, -1, -1, 24, -1, 26, 27,\n 28, -1, -1, -1, -1, 33, 34, -1, -1, -1,\n 38, 100, -1, 41, 42, -1, -1, -1, 46, 47,\n -1, -1, -1, -1, -1, -1, -1, 55, -1, 57,\n -1, -1, 60, -1, -1, -1, -1, -1, -1, -1,\n -1, 69, -1, 71, -1, -1, 74, -1, -1, 77,\n 78, -1, -1, -1, -1, -1, 84, -1, -1, 87,\n 3, 4, 5, 6, -1, -1, -1, 10, 11, 12,\n -1, 14, -1, 5, -1, 103, -1, 105, -1, 107,\n -1, -1, -1, -1, 16, 17, -1, -1, -1, -1,\n 33, 34, -1, -1, -1, 38, -1, -1, 41, 42,\n -1, -1, -1, 46, 47, -1, 38, -1, -1, 41,\n 42, -1, 55, -1, 46, -1, -1, 60, 50, 51,\n 52, 53, 54, 55, 56, 57, -1, -1, 71, -1,\n -1, -1, 64, -1, 77, 78, -1, -1, -1, 3,\n 4, 84, 6, -1, 87, 88, 10, 11, 12, -1,\n 14, -1, 16, 17, -1, -1, -1, -1, -1, -1,\n 103, -1, 105, 106, 107, -1, -1, -1, 100, 33,\n 34, -1, -1, -1, 38, -1, -1, 41, 42, -1,\n -1, -1, 46, 47, -1, 38, -1, -1, 41, 42,\n -1, 55, -1, 46, -1, -1, 60, 50, 51, 52,\n 53, 54, 55, 56, 57, -1, -1, 71, -1, -1,\n -1, 64, -1, 77, 78, -1, -1, -1, -1, -1,\n 84, -1, -1, 87, 3, 4, 5, 6, -1, -1,\n -1, 10, 11, 12, -1, 14, -1, -1, -1, 103,\n -1, 105, -1, 107, -1, -1, -1, 100, -1, -1,\n -1, -1, -1, -1, 33, 34, -1, -1, -1, 38,\n -1, -1, 41, 42, -1, -1, -1, 46, 47, -1,\n -1, -1, -1, -1, -1, -1, 55, -1, -1, -1,\n -1, 60, -1, -1, -1, -1, -1, -1, -1, -1,\n -1, -1, 71, -1, -1, -1, -1, -1, 77, 78,\n -1, -1, -1, 3, 4, 84, 6, -1, 87, 88,\n 10, 11, 12, -1, 14, -1, -1, 17, -1, -1,\n -1, -1, -1, -1, 103, -1, 105, -1, 107, -1,\n -1, -1, -1, 33, 34, -1, -1, -1, 38, -1,\n -1, 41, 42, -1, -1, -1, 46, 47, -1, -1,\n -1, -1, -1, -1, -1, 55, -1, -1, -1, -1,\n 60, -1, -1, -1, -1, -1, -1, -1, -1, -1,\n -1, 71, -1, -1, -1, -1, -1, 77, 78, -1,\n -1, -1, 3, 4, 84, 6, -1, 87, -1, 10,\n 11, 12, -1, 14, -1, 16, -1, -1, -1, -1,\n -1, -1, -1, 103, -1, 105, -1, 107, -1, -1,\n -1, -1, 33, 34, -1, -1, -1, 38, -1, -1,\n 41, 42, -1, -1, -1, 46, 47, -1, -1, -1,\n -1, -1, -1, -1, 55, -1, -1, 3, 4, 60,\n 6, -1, -1, -1, 10, 11, 12, -1, 14, -1,\n 71, -1, -1, -1, -1, -1, 77, 78, -1, -1,\n -1, -1, -1, 84, -1, -1, 87, 33, 34, -1,\n -1, -1, 38, -1, -1, 41, 42, -1, -1, -1,\n 46, 47, 103, -1, 105, -1, 107, -1, -1, 55,\n -1, -1, 3, 4, 60, 6, -1, -1, -1, 10,\n 11, 12, -1, 14, -1, 71, -1, -1, -1, -1,\n -1, 77, 78, -1, -1, -1, -1, -1, 84, -1,\n -1, 87, 33, 34, -1, -1, -1, 38, -1, -1,\n 41, 42, -1, -1, -1, 46, 47, 103, 104, 105,\n -1, 107, -1, -1, 55, -1, -1, -1, -1, 60,\n -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\n 71, -1, -1, -1, -1, -1, 77, 78, -1, -1,\n -1, 3, 4, 84, 6, -1, 87, -1, 10, 11,\n 12, -1, 14, -1, 16, -1, -1, -1, -1, -1,\n -1, -1, 103, -1, 105, 106, 107, -1, -1, -1,\n -1, 33, 34, -1, -1, -1, 38, -1, -1, 41,\n 42, -1, -1, -1, 46, 47, -1, -1, -1, -1,\n -1, -1, -1, 55, -1, -1, 3, 4, 60, 6,\n -1, -1, -1, 10, 11, 12, -1, 14, -1, 71,\n -1, -1, -1, -1, -1, 77, 78, -1, -1, -1,\n -1, -1, 84, -1, -1, 87, 33, 34, -1, -1,\n -1, 38, -1, -1, 41, 42, -1, -1, -1, 46,\n 47, 103, -1, 105, -1, 107, -1, -1, 55, -1,\n 57, 3, 4, 60, 6, -1, -1, -1, 10, 11,\n 12, -1, 14, -1, 71, -1, -1, -1, -1, -1,\n 77, 78, -1, -1, -1, -1, -1, 84, -1, -1,\n 87, 33, 34, -1, -1, -1, 38, -1, -1, 41,\n 42, -1, -1, -1, 46, 47, 103, -1, 105, -1,\n 107, -1, -1, 55, -1, -1, 3, 4, 60, 6,\n -1, -1, -1, 10, 11, 12, -1, 14, -1, 71,\n -1, -1, -1, -1, -1, 77, 78, -1, -1, -1,\n -1, -1, 84, -1, -1, 87, -1, 34, -1, -1,\n -1, 38, -1, -1, 41, 42, -1, -1, -1, 46,\n 47, 103, -1, 105, -1, 107, -1, -1, 55, -1,\n -1, -1, -1, 60, -1, -1, -1, -1, -1, -1,\n -1, -1, -1, -1, 71, -1, -1, -1, -1, -1,\n 77, 78, -1, -1, -1, 3, 4, 84, 6, -1,\n 87, -1, 10, 11, 12, -1, 14, -1, 16, -1,\n -1, -1, -1, -1, 3, 4, 103, 6, 105, -1,\n 107, 10, 11, 12, -1, 14, 34, 16, -1, -1,\n -1, -1, -1, 3, 4, -1, 6, -1, -1, 47,\n 10, 11, 12, -1, 14, 34, 16, 55, -1, -1,\n -1, -1, 60, -1, -1, -1, -1, -1, 47, -1,\n -1, -1, -1, 71, 34, -1, 55, -1, -1, -1,\n 78, 60, -1, -1, -1, -1, -1, 47, -1, 87,\n -1, -1, 71, -1, -1, 55, -1, -1, -1, 78,\n 60, -1, -1, -1, -1, 103, -1, 105, 87, 107,\n -1, 71, -1, -1, -1, -1, -1, -1, 78, -1,\n -1, -1, 3, 4, 103, 6, 105, 87, 107, 10,\n 11, 12, -1, 14, -1, -1, -1, -1, -1, -1,\n -1, 3, 4, 103, 6, 105, -1, 107, 10, 11,\n 12, 32, 14, 34, -1, -1, -1, -1, -1, -1,\n -1, -1, -1, -1, -1, -1, 47, -1, -1, -1,\n -1, -1, 34, -1, 55, -1, -1, -1, -1, 60,\n 3, 4, -1, 6, -1, 47, -1, 10, 11, 12,\n 71, 14, -1, 55, -1, -1, -1, 78, 60, -1,\n -1, -1, -1, -1, -1, -1, 87, -1, -1, 71,\n -1, 34, -1, -1, -1, -1, 78, -1, -1, -1,\n -1, -1, 103, -1, 105, 87, 107, -1, -1, -1,\n -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\n -1, 103, -1, 105, -1, 107, 69, -1, 71, -1,\n -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\n -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\n -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\n 103, -1, 105, -1, 107\n};\n\n/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing\n symbol of state STATE-NUM. */\nstatic const yytype_uint8 yystos[] =\n{\n 0, 3, 4, 6, 10, 11, 12, 14, 17, 19,\n 20, 21, 24, 26, 27, 28, 33, 34, 38, 41,\n 42, 46, 47, 55, 57, 60, 69, 71, 74, 77,\n 78, 84, 87, 103, 105, 107, 110, 112, 113, 114,\n 115, 116, 119, 120, 121, 125, 126, 128, 130, 131,\n 133, 147, 148, 149, 150, 151, 152, 153, 154, 155,\n 156, 157, 158, 159, 161, 162, 163, 164, 165, 166,\n 167, 168, 169, 171, 173, 182, 114, 128, 16, 176,\n 114, 114, 33, 77, 84, 127, 136, 148, 171, 173,\n 16, 137, 138, 141, 142, 143, 147, 34, 163, 25,\n 160, 161, 176, 158, 114, 128, 158, 76, 117, 118,\n 69, 161, 147, 158, 33, 77, 84, 134, 135, 147,\n 171, 173, 176, 158, 102, 149, 180, 181, 104, 127,\n 129, 106, 129, 3, 13, 108, 109, 183, 184, 185,\n 186, 0, 115, 134, 135, 96, 5, 82, 132, 5,\n 38, 41, 42, 50, 51, 52, 53, 54, 55, 56,\n 57, 64, 100, 114, 170, 172, 173, 176, 85, 34,\n 66, 81, 98, 99, 82, 72, 73, 58, 59, 63,\n 68, 69, 70, 71, 78, 79, 67, 74, 75, 76,\n 77, 48, 49, 65, 90, 105, 107, 177, 101, 114,\n 134, 135, 134, 135, 183, 113, 114, 144, 145, 146,\n 148, 148, 127, 176, 64, 100, 170, 172, 173, 127,\n 127, 139, 140, 141, 147, 32, 114, 80, 43, 176,\n 158, 47, 168, 76, 85, 69, 28, 29, 33, 43,\n 60, 174, 175, 176, 16, 137, 142, 147, 134, 135,\n 114, 64, 100, 170, 172, 173, 114, 134, 135, 134,\n 135, 114, 57, 85, 102, 85, 104, 106, 3, 3,\n 13, 108, 47, 3, 77, 96, 114, 176, 133, 176,\n 134, 135, 134, 135, 134, 135, 16, 142, 147, 134,\n 135, 134, 135, 134, 135, 134, 135, 114, 147, 163,\n 152, 153, 152, 153, 152, 153, 154, 155, 155, 156,\n 156, 156, 156, 156, 156, 156, 157, 157, 158, 158,\n 158, 158, 158, 162, 162, 162, 3, 105, 127, 5,\n 88, 106, 127, 178, 179, 25, 35, 163, 114, 114,\n 15, 22, 23, 122, 123, 124, 80, 43, 43, 174,\n 175, 114, 144, 127, 127, 127, 127, 15, 114, 43,\n 33, 176, 141, 147, 77, 84, 135, 147, 171, 173,\n 114, 161, 168, 117, 134, 135, 147, 147, 114, 139,\n 114, 43, 174, 175, 176, 16, 142, 134, 135, 134,\n 135, 134, 135, 134, 135, 149, 181, 127, 3, 107,\n 176, 96, 114, 114, 132, 114, 132, 114, 114, 139,\n 114, 114, 114, 176, 114, 114, 150, 97, 97, 127,\n 32, 104, 57, 127, 127, 106, 85, 168, 176, 161,\n 176, 122, 124, 146, 148, 148, 127, 148, 148, 32,\n 141, 176, 147, 147, 100, 170, 172, 173, 114, 43,\n 176, 43, 174, 176, 15, 134, 135, 147, 147, 139,\n 114, 106, 186, 187, 188, 114, 176, 96, 132, 132,\n 15, 114, 152, 152, 32, 104, 127, 127, 178, 25,\n 114, 114, 176, 114, 43, 43, 174, 43, 176, 43,\n 174, 175, 134, 135, 114, 134, 135, 147, 114, 43,\n 176, 43, 174, 176, 15, 3, 85, 106, 114, 176,\n 127, 104, 114, 127, 127, 148, 147, 114, 147, 147,\n 114, 114, 43, 176, 134, 135, 134, 135, 147, 188,\n 114, 104, 43, 114, 43, 43, 174, 134, 135, 114,\n 43, 176, 127, 147, 114, 134, 135, 43\n};\n\n#define yyerrok\t\t(yyerrstatus = 0)\n#define yyclearin\t(yychar = YYEMPTY)\n#define YYEMPTY\t\t(-2)\n#define YYEOF\t\t0\n\n#define YYACCEPT\tgoto yyacceptlab\n#define YYABORT\t\tgoto yyabortlab\n#define YYERROR\t\tgoto yyerrorlab\n\n\n/* Like YYERROR except do call yyerror. This remains here temporarily\n to ease the transition to the new meaning of YYERROR, for GCC.\n Once GCC version 2 has supplanted version 1, this can go. */\n\n#define YYFAIL\t\tgoto yyerrlab\n\n#define YYRECOVERING() (!!yyerrstatus)\n\n#define YYBACKUP(Token, Value)\t\t\t\t\t\\\ndo\t\t\t\t\t\t\t\t\\\n if (yychar == YYEMPTY && yylen == 1)\t\t\t\t\\\n {\t\t\t\t\t\t\t\t\\\n yychar = (Token);\t\t\t\t\t\t\\\n yylval = (Value);\t\t\t\t\t\t\\\n yytoken = YYTRANSLATE (yychar);\t\t\t\t\\\n YYPOPSTACK (1);\t\t\t\t\t\t\\\n goto yybackup;\t\t\t\t\t\t\\\n }\t\t\t\t\t\t\t\t\\\n else\t\t\t\t\t\t\t\t\\\n {\t\t\t\t\t\t\t\t\\\n yyerror (&yylloc, scanner, YY_(\"syntax error: cannot back up\")); \\\n YYERROR;\t\t\t\t\t\t\t\\\n }\t\t\t\t\t\t\t\t\\\nwhile (YYID (0))\n\n\n#define YYTERROR\t1\n#define YYERRCODE\t256\n\n\n/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].\n If N is 0, then set CURRENT to the empty location which ends\n the previous symbol: RHS[0] (always defined). */\n\n#define YYRHSLOC(Rhs, K) ((Rhs)[K])\n#ifndef YYLLOC_DEFAULT\n# define YYLLOC_DEFAULT(Current, Rhs, N)\t\t\t\t\\\n do\t\t\t\t\t\t\t\t\t\\\n if (YYID (N)) \\\n\t{\t\t\t\t\t\t\t\t\\\n\t (Current).first_line = YYRHSLOC (Rhs, 1).first_line;\t\\\n\t (Current).first_column = YYRHSLOC (Rhs, 1).first_column;\t\\\n\t (Current).last_line = YYRHSLOC (Rhs, N).last_line;\t\t\\\n\t (Current).last_column = YYRHSLOC (Rhs, N).last_column;\t\\\n\t}\t\t\t\t\t\t\t\t\\\n else\t\t\t\t\t\t\t\t\\\n\t{\t\t\t\t\t\t\t\t\\\n\t (Current).first_line = (Current).last_line =\t\t\\\n\t YYRHSLOC (Rhs, 0).last_line;\t\t\t\t\\\n\t (Current).first_column = (Current).last_column =\t\t\\\n\t YYRHSLOC (Rhs, 0).last_column;\t\t\t\t\\\n\t}\t\t\t\t\t\t\t\t\\\n while (YYID (0))\n#endif\n\n\n/* YY_LOCATION_PRINT -- Print the location on the stream.\n This macro was not mandated originally: define only if we know\n we won't break user code: when these are the locations we know. */\n\n#ifndef YY_LOCATION_PRINT\n# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL\n# define YY_LOCATION_PRINT(File, Loc)\t\t\t\\\n fprintf (File, \"%d.%d-%d.%d\",\t\t\t\\\n\t (Loc).first_line, (Loc).first_column,\t\\\n\t (Loc).last_line, (Loc).last_column)\n# else\n# define YY_LOCATION_PRINT(File, Loc) ((void) 0)\n# endif\n#endif\n\n\n/* YYLEX -- calling `yylex' with the right arguments. */\n\n#ifdef YYLEX_PARAM\n# define YYLEX yylex (&yylval, &yylloc, YYLEX_PARAM)\n#else\n# define YYLEX yylex (&yylval, &yylloc, scanner)\n#endif\n\n/* Enable debugging if requested. */\n#if YYDEBUG\n\n# ifndef YYFPRINTF\n# include <stdio.h> /* INFRINGES ON USER NAME SPACE */\n# define YYFPRINTF fprintf\n# endif\n\n# define YYDPRINTF(Args)\t\t\t\\\ndo {\t\t\t\t\t\t\\\n if (yydebug)\t\t\t\t\t\\\n YYFPRINTF Args;\t\t\t\t\\\n} while (YYID (0))\n\n# define YY_SYMBOL_PRINT(Title, Type, Value, Location)\t\t\t \\\ndo {\t\t\t\t\t\t\t\t\t \\\n if (yydebug)\t\t\t\t\t\t\t\t \\\n {\t\t\t\t\t\t\t\t\t \\\n YYFPRINTF (stderr, \"%s \", Title);\t\t\t\t\t \\\n yy_symbol_print (stderr,\t\t\t\t\t\t \\\n\t\t Type, Value, Location, scanner); \\\n YYFPRINTF (stderr, \"\\n\");\t\t\t\t\t\t \\\n }\t\t\t\t\t\t\t\t\t \\\n} while (YYID (0))\n\n\n/*--------------------------------.\n| Print this symbol on YYOUTPUT. |\n`--------------------------------*/\n\n/*ARGSUSED*/\n#if (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\nstatic void\nyy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp, UpScanner* scanner)\n#else\nstatic void\nyy_symbol_value_print (yyoutput, yytype, yyvaluep, yylocationp, scanner)\n FILE *yyoutput;\n int yytype;\n YYSTYPE const * const yyvaluep;\n YYLTYPE const * const yylocationp;\n UpScanner* scanner;\n#endif\n{\n if (!yyvaluep)\n return;\n YYUSE (yylocationp);\n YYUSE (scanner);\n# ifdef YYPRINT\n if (yytype < YYNTOKENS)\n YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);\n# else\n YYUSE (yyoutput);\n# endif\n switch (yytype)\n {\n default:\n\tbreak;\n }\n}\n\n\n/*--------------------------------.\n| Print this symbol on YYOUTPUT. |\n`--------------------------------*/\n\n#if (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\nstatic void\nyy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp, UpScanner* scanner)\n#else\nstatic void\nyy_symbol_print (yyoutput, yytype, yyvaluep, yylocationp, scanner)\n FILE *yyoutput;\n int yytype;\n YYSTYPE const * const yyvaluep;\n YYLTYPE const * const yylocationp;\n UpScanner* scanner;\n#endif\n{\n if (yytype < YYNTOKENS)\n YYFPRINTF (yyoutput, \"token %s (\", yytname[yytype]);\n else\n YYFPRINTF (yyoutput, \"nterm %s (\", yytname[yytype]);\n\n YY_LOCATION_PRINT (yyoutput, *yylocationp);\n YYFPRINTF (yyoutput, \": \");\n yy_symbol_value_print (yyoutput, yytype, yyvaluep, yylocationp, scanner);\n YYFPRINTF (yyoutput, \")\");\n}\n\n/*------------------------------------------------------------------.\n| yy_stack_print -- Print the state stack from its BOTTOM up to its |\n| TOP (included). |\n`------------------------------------------------------------------*/\n\n#if (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\nstatic void\nyy_stack_print (yytype_int16 *bottom, yytype_int16 *top)\n#else\nstatic void\nyy_stack_print (bottom, top)\n yytype_int16 *bottom;\n yytype_int16 *top;\n#endif\n{\n YYFPRINTF (stderr, \"Stack now\");\n for (; bottom <= top; ++bottom)\n YYFPRINTF (stderr, \" %d\", *bottom);\n YYFPRINTF (stderr, \"\\n\");\n}\n\n# define YY_STACK_PRINT(Bottom, Top)\t\t\t\t\\\ndo {\t\t\t\t\t\t\t\t\\\n if (yydebug)\t\t\t\t\t\t\t\\\n yy_stack_print ((Bottom), (Top));\t\t\t\t\\\n} while (YYID (0))\n\n\n/*------------------------------------------------.\n| Report that the YYRULE is going to be reduced. |\n`------------------------------------------------*/\n\n#if (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\nstatic void\nyy_reduce_print (YYSTYPE *yyvsp, YYLTYPE *yylsp, int yyrule, UpScanner* scanner)\n#else\nstatic void\nyy_reduce_print (yyvsp, yylsp, yyrule, scanner)\n YYSTYPE *yyvsp;\n YYLTYPE *yylsp;\n int yyrule;\n UpScanner* scanner;\n#endif\n{\n int yynrhs = yyr2[yyrule];\n int yyi;\n unsigned long int yylno = yyrline[yyrule];\n YYFPRINTF (stderr, \"Reducing stack by rule %d (line %lu):\\n\",\n\t yyrule - 1, yylno);\n /* The symbols being reduced. */\n for (yyi = 0; yyi < yynrhs; yyi++)\n {\n fprintf (stderr, \" $%d = \", yyi + 1);\n yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],\n\t\t &(yyvsp[(yyi + 1) - (yynrhs)])\n\t\t , &(yylsp[(yyi + 1) - (yynrhs)])\t\t , scanner);\n fprintf (stderr, \"\\n\");\n }\n}\n\n# define YY_REDUCE_PRINT(Rule)\t\t\\\ndo {\t\t\t\t\t\\\n if (yydebug)\t\t\t\t\\\n yy_reduce_print (yyvsp, yylsp, Rule, scanner); \\\n} while (YYID (0))\n\n/* Nonzero means print parse trace. It is left uninitialized so that\n multiple parsers can coexist. */\nint yydebug;\n#else /* !YYDEBUG */\n# define YYDPRINTF(Args)\n# define YY_SYMBOL_PRINT(Title, Type, Value, Location)\n# define YY_STACK_PRINT(Bottom, Top)\n# define YY_REDUCE_PRINT(Rule)\n#endif /* !YYDEBUG */\n\n\n/* YYINITDEPTH -- initial size of the parser's stacks. */\n#ifndef\tYYINITDEPTH\n# define YYINITDEPTH 200\n#endif\n\n/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only\n if the built-in stack extension method is used).\n\n Do not make this value too large; the results are undefined if\n YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)\n evaluated with infinite-precision integer arithmetic. */\n\n#ifndef YYMAXDEPTH\n# define YYMAXDEPTH 10000\n#endif\n\n\f\n\n#if YYERROR_VERBOSE\n\n# ifndef yystrlen\n# if defined __GLIBC__ && defined _STRING_H\n# define yystrlen strlen\n# else\n/* Return the length of YYSTR. */\n#if (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\nstatic YYSIZE_T\nyystrlen (const char *yystr)\n#else\nstatic YYSIZE_T\nyystrlen (yystr)\n const char *yystr;\n#endif\n{\n YYSIZE_T yylen;\n for (yylen = 0; yystr[yylen]; yylen++)\n continue;\n return yylen;\n}\n# endif\n# endif\n\n# ifndef yystpcpy\n# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE\n# define yystpcpy stpcpy\n# else\n/* Copy YYSRC to YYDEST, returning the address of the terminating '\\0' in\n YYDEST. */\n#if (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\nstatic char *\nyystpcpy (char *yydest, const char *yysrc)\n#else\nstatic char *\nyystpcpy (yydest, yysrc)\n char *yydest;\n const char *yysrc;\n#endif\n{\n char *yyd = yydest;\n const char *yys = yysrc;\n\n while ((*yyd++ = *yys++) != '\\0')\n continue;\n\n return yyd - 1;\n}\n# endif\n# endif\n\n# ifndef yytnamerr\n/* Copy to YYRES the contents of YYSTR after stripping away unnecessary\n quotes and backslashes, so that it's suitable for yyerror. The\n heuristic is that double-quoting is unnecessary unless the string\n contains an apostrophe, a comma, or backslash (other than\n backslash-backslash). YYSTR is taken from yytname. If YYRES is\n null, do not copy; instead, return the length of what the result\n would have been. */\nstatic YYSIZE_T\nyytnamerr (char *yyres, const char *yystr)\n{\n if (*yystr == '\"')\n {\n YYSIZE_T yyn = 0;\n char const *yyp = yystr;\n\n for (;;)\n\tswitch (*++yyp)\n\t {\n\t case '\\'':\n\t case ',':\n\t goto do_not_strip_quotes;\n\n\t case '\\\\':\n\t if (*++yyp != '\\\\')\n\t goto do_not_strip_quotes;\n\t /* Fall through. */\n\t default:\n\t if (yyres)\n\t yyres[yyn] = *yyp;\n\t yyn++;\n\t break;\n\n\t case '\"':\n\t if (yyres)\n\t yyres[yyn] = '\\0';\n\t return yyn;\n\t }\n do_not_strip_quotes: ;\n }\n\n if (! yyres)\n return yystrlen (yystr);\n\n return yystpcpy (yyres, yystr) - yyres;\n}\n# endif\n\n/* Copy into YYRESULT an error message about the unexpected token\n YYCHAR while in state YYSTATE. Return the number of bytes copied,\n including the terminating null byte. If YYRESULT is null, do not\n copy anything; just return the number of bytes that would be\n copied. As a special case, return 0 if an ordinary \"syntax error\"\n message will do. Return YYSIZE_MAXIMUM if overflow occurs during\n size calculation. */\nstatic YYSIZE_T\nyysyntax_error (char *yyresult, int yystate, int yychar)\n{\n int yyn = yypact[yystate];\n\n if (! (YYPACT_NINF < yyn && yyn <= YYLAST))\n return 0;\n else\n {\n int yytype = YYTRANSLATE (yychar);\n YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);\n YYSIZE_T yysize = yysize0;\n YYSIZE_T yysize1;\n int yysize_overflow = 0;\n enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };\n char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];\n int yyx;\n\n# if 0\n /* This is so xgettext sees the translatable formats that are\n\t constructed on the fly. */\n YY_(\"syntax error, unexpected %s\");\n YY_(\"syntax error, unexpected %s, expecting %s\");\n YY_(\"syntax error, unexpected %s, expecting %s or %s\");\n YY_(\"syntax error, unexpected %s, expecting %s or %s or %s\");\n YY_(\"syntax error, unexpected %s, expecting %s or %s or %s or %s\");\n# endif\n char *yyfmt;\n char const *yyf;\n static char const yyunexpected[] = \"syntax error, unexpected %s\";\n static char const yyexpecting[] = \", expecting %s\";\n static char const yyor[] = \" or %s\";\n char yyformat[sizeof yyunexpected\n\t\t + sizeof yyexpecting - 1\n\t\t + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)\n\t\t * (sizeof yyor - 1))];\n char const *yyprefix = yyexpecting;\n\n /* Start YYX at -YYN if negative to avoid negative indexes in\n\t YYCHECK. */\n int yyxbegin = yyn < 0 ? -yyn : 0;\n\n /* Stay within bounds of both yycheck and yytname. */\n int yychecklim = YYLAST - yyn + 1;\n int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;\n int yycount = 1;\n\n yyarg[0] = yytname[yytype];\n yyfmt = yystpcpy (yyformat, yyunexpected);\n\n for (yyx = yyxbegin; yyx < yyxend; ++yyx)\n\tif (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)\n\t {\n\t if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)\n\t {\n\t\tyycount = 1;\n\t\tyysize = yysize0;\n\t\tyyformat[sizeof yyunexpected - 1] = '\\0';\n\t\tbreak;\n\t }\n\t yyarg[yycount++] = yytname[yyx];\n\t yysize1 = yysize + yytnamerr (0, yytname[yyx]);\n\t yysize_overflow |= (yysize1 < yysize);\n\t yysize = yysize1;\n\t yyfmt = yystpcpy (yyfmt, yyprefix);\n\t yyprefix = yyor;\n\t }\n\n yyf = YY_(yyformat);\n yysize1 = yysize + yystrlen (yyf);\n yysize_overflow |= (yysize1 < yysize);\n yysize = yysize1;\n\n if (yysize_overflow)\n\treturn YYSIZE_MAXIMUM;\n\n if (yyresult)\n\t{\n\t /* Avoid sprintf, as that infringes on the user's name space.\n\t Don't have undefined behavior even if the translation\n\t produced a string with the wrong number of \"%s\"s. */\n\t char *yyp = yyresult;\n\t int yyi = 0;\n\t while ((*yyp = *yyf) != '\\0')\n\t {\n\t if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)\n\t\t{\n\t\t yyp += yytnamerr (yyp, yyarg[yyi++]);\n\t\t yyf += 2;\n\t\t}\n\t else\n\t\t{\n\t\t yyp++;\n\t\t yyf++;\n\t\t}\n\t }\n\t}\n return yysize;\n }\n}\n#endif /* YYERROR_VERBOSE */\n\f\n\n/*-----------------------------------------------.\n| Release the memory associated to this symbol. |\n`-----------------------------------------------*/\n\n/*ARGSUSED*/\n#if (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\nstatic void\nyydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, YYLTYPE *yylocationp, UpScanner* scanner)\n#else\nstatic void\nyydestruct (yymsg, yytype, yyvaluep, yylocationp, scanner)\n const char *yymsg;\n int yytype;\n YYSTYPE *yyvaluep;\n YYLTYPE *yylocationp;\n UpScanner* scanner;\n#endif\n{\n YYUSE (yyvaluep);\n YYUSE (yylocationp);\n YYUSE (scanner);\n\n if (!yymsg)\n yymsg = \"Deleting\";\n YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);\n\n switch (yytype)\n {\n\n default:\n\tbreak;\n }\n}\n\f\n\n/* Prevent warnings from -Wmissing-prototypes. */\n\n#ifdef YYPARSE_PARAM\n#if defined __STDC__ || defined __cplusplus\nint yyparse (void *YYPARSE_PARAM);\n#else\nint yyparse ();\n#endif\n#else /* ! YYPARSE_PARAM */\n#if defined __STDC__ || defined __cplusplus\nint yyparse (UpScanner* scanner);\n#else\nint yyparse ();\n#endif\n#endif /* ! YYPARSE_PARAM */\n\n\n\n\n\n\n/*----------.\n| yyparse. |\n`----------*/\n\n#ifdef YYPARSE_PARAM\n#if (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\nint\nyyparse (void *YYPARSE_PARAM)\n#else\nint\nyyparse (YYPARSE_PARAM)\n void *YYPARSE_PARAM;\n#endif\n#else /* ! YYPARSE_PARAM */\n#if (defined __STDC__ || defined __C99__FUNC__ \\\n || defined __cplusplus || defined _MSC_VER)\nint\nyyparse (UpScanner* scanner)\n#else\nint\nyyparse (scanner)\n UpScanner* scanner;\n#endif\n#endif\n{\n /* The look-ahead symbol. */\nint yychar;\n\n/* The semantic value of the look-ahead symbol. */\nYYSTYPE yylval;\n\n/* Number of syntax errors so far. */\nint yynerrs;\n/* Location data for the look-ahead symbol. */\nYYLTYPE yylloc;\n\n int yystate;\n int yyn;\n int yyresult;\n /* Number of tokens to shift before error messages enabled. */\n int yyerrstatus;\n /* Look-ahead token as an internal (translated) token number. */\n int yytoken = 0;\n#if YYERROR_VERBOSE\n /* Buffer for error messages, and its allocated size. */\n char yymsgbuf[128];\n char *yymsg = yymsgbuf;\n YYSIZE_T yymsg_alloc = sizeof yymsgbuf;\n#endif\n\n /* Three stacks and their tools:\n `yyss': related to states,\n `yyvs': related to semantic values,\n `yyls': related to locations.\n\n Refer to the stacks thru separate pointers, to allow yyoverflow\n to reallocate them elsewhere. */\n\n /* The state stack. */\n yytype_int16 yyssa[YYINITDEPTH];\n yytype_int16 *yyss = yyssa;\n yytype_int16 *yyssp;\n\n /* The semantic value stack. */\n YYSTYPE yyvsa[YYINITDEPTH];\n YYSTYPE *yyvs = yyvsa;\n YYSTYPE *yyvsp;\n\n /* The location stack. */\n YYLTYPE yylsa[YYINITDEPTH];\n YYLTYPE *yyls = yylsa;\n YYLTYPE *yylsp;\n /* The locations where the error started and ended. */\n YYLTYPE yyerror_range[2];\n\n#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N), yylsp -= (N))\n\n YYSIZE_T yystacksize = YYINITDEPTH;\n\n /* The variables used to return semantic value and location from the\n action routines. */\n YYSTYPE yyval;\n YYLTYPE yyloc;\n\n /* The number of symbols on the RHS of the reduced rule.\n Keep to zero when no symbol should be popped. */\n int yylen = 0;\n\n YYDPRINTF ((stderr, \"Starting parse\\n\"));\n\n yystate = 0;\n yyerrstatus = 0;\n yynerrs = 0;\n yychar = YYEMPTY;\t\t/* Cause a token to be read. */\n\n /* Initialize stack pointers.\n Waste one element of value and location stack\n so that they stay on the same level as the state stack.\n The wasted elements are never initialized. */\n\n yyssp = yyss;\n yyvsp = yyvs;\n yylsp = yyls;\n#if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL\n /* Initialize the default location before parsing starts. */\n yylloc.first_line = yylloc.last_line = 1;\n yylloc.first_column = yylloc.last_column = 0;\n#endif\n\n goto yysetstate;\n\n/*------------------------------------------------------------.\n| yynewstate -- Push a new state, which is found in yystate. |\n`------------------------------------------------------------*/\n yynewstate:\n /* In all cases, when you get here, the value and location stacks\n have just been pushed. So pushing a state here evens the stacks. */\n yyssp++;\n\n yysetstate:\n *yyssp = yystate;\n\n if (yyss + yystacksize - 1 <= yyssp)\n {\n /* Get the current used size of the three stacks, in elements. */\n YYSIZE_T yysize = yyssp - yyss + 1;\n\n#ifdef yyoverflow\n {\n\t/* Give user a chance to reallocate the stack. Use copies of\n\t these so that the &'s don't force the real ones into\n\t memory. */\n\tYYSTYPE *yyvs1 = yyvs;\n\tyytype_int16 *yyss1 = yyss;\n\tYYLTYPE *yyls1 = yyls;\n\n\t/* Each stack pointer address is followed by the size of the\n\t data in use in that stack, in bytes. This used to be a\n\t conditional around just the two extra args, but that might\n\t be undefined if yyoverflow is a macro. */\n\tyyoverflow (YY_(\"memory exhausted\"),\n\t\t &yyss1, yysize * sizeof (*yyssp),\n\t\t &yyvs1, yysize * sizeof (*yyvsp),\n\t\t &yyls1, yysize * sizeof (*yylsp),\n\t\t &yystacksize);\n\tyyls = yyls1;\n\tyyss = yyss1;\n\tyyvs = yyvs1;\n }\n#else /* no yyoverflow */\n# ifndef YYSTACK_RELOCATE\n goto yyexhaustedlab;\n# else\n /* Extend the stack our own way. */\n if (YYMAXDEPTH <= yystacksize)\n\tgoto yyexhaustedlab;\n yystacksize *= 2;\n if (YYMAXDEPTH < yystacksize)\n\tyystacksize = YYMAXDEPTH;\n\n {\n\tyytype_int16 *yyss1 = yyss;\n\tunion yyalloc *yyptr =\n\t (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));\n\tif (! yyptr)\n\t goto yyexhaustedlab;\n\tYYSTACK_RELOCATE (yyss);\n\tYYSTACK_RELOCATE (yyvs);\n\tYYSTACK_RELOCATE (yyls);\n# undef YYSTACK_RELOCATE\n\tif (yyss1 != yyssa)\n\t YYSTACK_FREE (yyss1);\n }\n# endif\n#endif /* no yyoverflow */\n\n yyssp = yyss + yysize - 1;\n yyvsp = yyvs + yysize - 1;\n yylsp = yyls + yysize - 1;\n\n YYDPRINTF ((stderr, \"Stack size increased to %lu\\n\",\n\t\t (unsigned long int) yystacksize));\n\n if (yyss + yystacksize - 1 <= yyssp)\n\tYYABORT;\n }\n\n YYDPRINTF ((stderr, \"Entering state %d\\n\", yystate));\n\n goto yybackup;\n\n/*-----------.\n| yybackup. |\n`-----------*/\nyybackup:\n\n /* Do appropriate processing given the current state. Read a\n look-ahead token if we need one and don't already have one. */\n\n /* First try to decide what to do without reference to look-ahead token. */\n yyn = yypact[yystate];\n if (yyn == YYPACT_NINF)\n goto yydefault;\n\n /* Not known => get a look-ahead token if don't already have one. */\n\n /* YYCHAR is either YYEMPTY or YYEOF or a valid look-ahead symbol. */\n if (yychar == YYEMPTY)\n {\n YYDPRINTF ((stderr, \"Reading a token: \"));\n yychar = YYLEX;\n }\n\n if (yychar <= YYEOF)\n {\n yychar = yytoken = YYEOF;\n YYDPRINTF ((stderr, \"Now at end of input.\\n\"));\n }\n else\n {\n yytoken = YYTRANSLATE (yychar);\n YY_SYMBOL_PRINT (\"Next token is\", yytoken, &yylval, &yylloc);\n }\n\n /* If the proper action on seeing token YYTOKEN is to reduce or to\n detect an error, take that action. */\n yyn += yytoken;\n if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)\n goto yydefault;\n yyn = yytable[yyn];\n if (yyn <= 0)\n {\n if (yyn == 0 || yyn == YYTABLE_NINF)\n\tgoto yyerrlab;\n yyn = -yyn;\n goto yyreduce;\n }\n\n if (yyn == YYFINAL)\n YYACCEPT;\n\n /* Count tokens shifted since error; after three, turn off error\n status. */\n if (yyerrstatus)\n yyerrstatus--;\n\n /* Shift the look-ahead token. */\n YY_SYMBOL_PRINT (\"Shifting\", yytoken, &yylval, &yylloc);\n\n /* Discard the shifted token unless it is eof. */\n if (yychar != YYEOF)\n yychar = YYEMPTY;\n\n yystate = yyn;\n *++yyvsp = yylval;\n *++yylsp = yylloc;\n goto yynewstate;\n\n\n/*-----------------------------------------------------------.\n| yydefault -- do the default action for the current state. |\n`-----------------------------------------------------------*/\nyydefault:\n yyn = yydefact[yystate];\n if (yyn == 0)\n goto yyerrlab;\n goto yyreduce;\n\n\n/*-----------------------------.\n| yyreduce -- Do a reduction. |\n`-----------------------------*/\nyyreduce:\n /* yyn is the number of a rule to reduce with. */\n yylen = yyr2[yyn];\n\n /* If YYLEN is nonzero, implement the default value of the action:\n `$$ = $1'.\n\n Otherwise, the following line sets YYVAL to garbage.\n This behavior is undocumented and Bison\n users should not rely upon it. Assigning to YYVAL\n unconditionally makes the parser a bit smaller, and it avoids a\n GCC warning that YYVAL may be used uninitialized. */\n yyval = yyvsp[1-yylen];\n\n /* Default location. */\n YYLLOC_DEFAULT (yyloc, (yylsp - yylen), yylen);\n YY_REDUCE_PRINT (yyn);\n switch (yyn)\n {\n case 2:\n#line 152 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (1)].objectValue); scanner->rootSyntax = (yyvsp[(1) - (1)].objectValue); ;}\n break;\n\n case 3:\n#line 154 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (1)].objectValue); scanner->rootSyntax = (yyvsp[(1) - (1)].objectValue); ;}\n break;\n\n case 4:\n#line 159 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_SET((yylsp[(1) - (1)])); APPEND((yyval.objectValue), (yyvsp[(1) - (1)].objectValue)); ;}\n break;\n\n case 5:\n#line 161 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (2)].objectValue); APPEND((yyvsp[(1) - (2)].objectValue), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 6:\n#line 166 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = NULL; ;}\n break;\n\n case 14:\n#line 181 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_IMPORT((yylsp[(1) - (2)]), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 15:\n#line 186 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_SET((yylsp[(1) - (2)])); APPEND((yyval.objectValue), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 16:\n#line 188 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (3)].objectValue); APPEND((yyval.objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 17:\n#line 193 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_SET((yylsp[(1) - (1)])); APPEND((yyval.objectValue), (yyvsp[(1) - (1)].objectValue)); ;}\n break;\n\n case 18:\n#line 195 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (3)].objectValue); APPEND((yyval.objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 19:\n#line 200 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_1(UpReturnSyntaxType, (yylsp[(1) - (2)]), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 20:\n#line 202 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_1(UpReturnSyntaxType, (yylsp[(1) - (2)]), PARSE_UNDEFINED((yylsp[(1) - (2)]))); ;}\n break;\n\n case 21:\n#line 204 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_1(UpContinueSyntaxType, (yylsp[(1) - (2)]), NULL); ;}\n break;\n\n case 22:\n#line 206 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = NODE(UpBreakSyntaxType, (yylsp[(1) - (2)])); ;}\n break;\n\n case 23:\n#line 208 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_1(UpThrowSyntaxType, (yylsp[(1) - (2)]), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 24:\n#line 210 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_1(UpThrowSyntaxType, (yylsp[(1) - (2)]), PARSE_UNDEFINED((yylsp[(1) - (2)]))); ;}\n break;\n\n case 25:\n#line 215 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_WHILE((yylsp[(1) - (4)]), (yyvsp[(2) - (4)].objectValue), (yyvsp[(3) - (4)].objectValue)); ;}\n break;\n\n case 26:\n#line 220 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_TRY((yylsp[(1) - (4)]), (yyvsp[(2) - (4)].objectValue), (yyvsp[(4) - (4)].objectValue), NULL); ;}\n break;\n\n case 27:\n#line 222 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_TRY((yylsp[(1) - (5)]), (yyvsp[(2) - (5)].objectValue), (yyvsp[(4) - (5)].objectValue), (yyvsp[(5) - (5)].objectValue)); ;}\n break;\n\n case 28:\n#line 224 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_TRY((yylsp[(1) - (4)]), (yyvsp[(2) - (4)].objectValue), NULL, (yyvsp[(4) - (4)].objectValue)); ;}\n break;\n\n case 29:\n#line 229 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CATCH((yylsp[(1) - (3)]), NULL, (yyvsp[(2) - (3)].objectValue)); ;}\n break;\n\n case 30:\n#line 231 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CATCH((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].objectValue), NULL); ;}\n break;\n\n case 31:\n#line 233 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CATCH((yylsp[(1) - (4)]), (yyvsp[(2) - (4)].objectValue), (yyvsp[(3) - (4)].objectValue)); ;}\n break;\n\n case 32:\n#line 238 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_SET((yylsp[(1) - (1)])); APPEND((yyval.objectValue), (yyvsp[(1) - (1)].objectValue)); ;}\n break;\n\n case 33:\n#line 240 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (2)].objectValue); APPEND((yyvsp[(1) - (2)].objectValue), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 34:\n#line 245 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_1(UpFinallySyntaxType, (yylsp[(1) - (3)]), (yyvsp[(2) - (3)].objectValue)); ;}\n break;\n\n case 35:\n#line 250 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_DECLARATION((yylsp[(1) - (2)]), (yyvsp[(1) - (2)].operatorValue), (yyvsp[(2) - (2)].objectValue), NULL, NULL); ;}\n break;\n\n case 36:\n#line 252 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_DECLARATION((yylsp[(1) - (5)]), (yyvsp[(1) - (5)].operatorValue), (yyvsp[(2) - (5)].objectValue), NULL, (yyvsp[(5) - (5)].objectValue)); ;}\n break;\n\n case 37:\n#line 254 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_DECLARATION((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].operatorValue), (yyvsp[(2) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue), NULL); ;}\n break;\n\n case 38:\n#line 256 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_DECLARATION((yylsp[(1) - (6)]), (yyvsp[(1) - (6)].operatorValue), (yyvsp[(2) - (6)].objectValue), (yyvsp[(3) - (6)].objectValue), (yyvsp[(5) - (6)].objectValue)); ;}\n break;\n\n case 39:\n#line 258 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_DECLARATION((yylsp[(1) - (4)]), (yyvsp[(1) - (4)].operatorValue), (yyvsp[(2) - (4)].objectValue), (yyvsp[(3) - (4)].objectValue), NULL); ;}\n break;\n\n case 40:\n#line 260 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_DECLARATION((yylsp[(1) - (7)]), (yyvsp[(1) - (7)].operatorValue), (yyvsp[(2) - (7)].objectValue), (yyvsp[(3) - (7)].objectValue), (yyvsp[(6) - (7)].objectValue)); ;}\n break;\n\n case 41:\n#line 265 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpPublic; ;}\n break;\n\n case 42:\n#line 267 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpPrivate; ;}\n break;\n\n case 43:\n#line 269 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpExtra; ;}\n break;\n\n case 47:\n#line 283 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = ENSURE_SET((yyvsp[(1) - (3)].objectValue)); APPEND((yyval.objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 48:\n#line 285 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (2)].objectValue); ;}\n break;\n\n case 50:\n#line 291 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_WHERE((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 51:\n#line 293 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_WHERE((yylsp[(1) - (4)]), (yyvsp[(1) - (4)].objectValue), (yyvsp[(3) - (4)].objectValue)); ;}\n break;\n\n case 53:\n#line 299 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = APPEND_ARGS((yyvsp[(1) - (2)].objectValue), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 54:\n#line 304 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ARG((yylsp[(1) - (2)]), (yyvsp[(1) - (2)].stringValue), (yyvsp[(2) - (2)].objectValue), NULL); ;}\n break;\n\n case 55:\n#line 306 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ARG((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].stringValue), (yyvsp[(2) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 56:\n#line 308 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ARG((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].stringValue), (yyvsp[(2) - (3)].objectValue), NULL); ;}\n break;\n\n case 57:\n#line 310 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ARG((yylsp[(1) - (4)]), (yyvsp[(1) - (4)].stringValue), (yyvsp[(2) - (4)].objectValue), (yyvsp[(4) - (4)].objectValue)); ;}\n break;\n\n case 58:\n#line 313 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ARG((yylsp[(1) - (2)]), NULL, (yyvsp[(2) - (2)].objectValue), NULL); ;}\n break;\n\n case 59:\n#line 315 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ARG((yylsp[(1) - (3)]), NULL, (yyvsp[(2) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 60:\n#line 317 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ARG((yylsp[(1) - (3)]), NULL, (yyvsp[(2) - (3)].objectValue), NULL); ;}\n break;\n\n case 61:\n#line 319 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ARG((yylsp[(1) - (4)]), NULL, (yyvsp[(2) - (4)].objectValue), (yyvsp[(4) - (4)].objectValue)); ;}\n break;\n\n case 63:\n#line 325 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = APPEND_ARGS((yyvsp[(1) - (3)].objectValue), PARSE_ARG((yylsp[(2) - (3)]), NULL, (yyvsp[(2) - (3)].objectValue), NULL)); ;}\n break;\n\n case 64:\n#line 327 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CALL((yylsp[(1) - (4)]), (yyvsp[(1) - (4)].objectValue), PARSE_ARG((yylsp[(2) - (4)]), NULL, (yyvsp[(3) - (4)].objectValue), NULL)); ;}\n break;\n\n case 65:\n#line 329 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CALL((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), PARSE_ARG((yylsp[(2) - (3)]), NULL, (yyvsp[(3) - (3)].objectValue), NULL)); ;}\n break;\n\n case 66:\n#line 331 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CALL((yylsp[(1) - (4)]), (yyvsp[(1) - (4)].objectValue), PARSE_ARG((yylsp[(2) - (4)]), (yyvsp[(2) - (4)].stringValue), (yyvsp[(3) - (4)].objectValue), NULL)); ;}\n break;\n\n case 67:\n#line 333 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CALL((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), PARSE_ARG((yylsp[(2) - (3)]), (yyvsp[(2) - (3)].stringValue), (yyvsp[(3) - (3)].objectValue), NULL)); ;}\n break;\n\n case 68:\n#line 336 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_FUNCTION((yylsp[(1) - (5)]), PARSE_ASSIGNMENT((yylsp[(1) - (5)]), (yyvsp[(2) - (5)].operatorValue), (yyvsp[(1) - (5)].objectValue), (yyvsp[(3) - (5)].objectValue)), (yyvsp[(4) - (5)].objectValue), false); ;}\n break;\n\n case 69:\n#line 338 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ASSIGNMENT((yylsp[(1) - (4)]), (yyvsp[(2) - (4)].operatorValue), (yyvsp[(1) - (4)].objectValue), (yyvsp[(3) - (4)].objectValue)); ;}\n break;\n\n case 70:\n#line 340 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ASSIGNMENT((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].operatorValue), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 71:\n#line 343 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_1(UpPrintSyntaxType, (yylsp[(1) - (3)]), (yyvsp[(2) - (3)].objectValue)); ;}\n break;\n\n case 72:\n#line 345 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_1(UpPrintSyntaxType, (yylsp[(1) - (2)]), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 73:\n#line 348 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (4)]), (yyvsp[(2) - (4)].operatorValue), (yyvsp[(1) - (4)].objectValue), (yyvsp[(3) - (4)].objectValue)); ;}\n break;\n\n case 74:\n#line 350 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].operatorValue), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 75:\n#line 353 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_UNARY((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].operatorValue), (yyvsp[(2) - (3)].objectValue)); ;}\n break;\n\n case 76:\n#line 355 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_UNARY((yylsp[(1) - (2)]), (yyvsp[(1) - (2)].operatorValue), NULL); ;}\n break;\n\n case 77:\n#line 357 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_UNARY((yylsp[(1) - (2)]), (yyvsp[(1) - (2)].operatorValue), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 78:\n#line 360 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_FUNCTION((yylsp[(1) - (4)]), (yyvsp[(1) - (4)].objectValue), (yyvsp[(3) - (4)].objectValue), (yyvsp[(2) - (4)].operatorValue)); ;}\n break;\n\n case 79:\n#line 362 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_FUNCTION((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue), (yyvsp[(2) - (3)].operatorValue)); ;}\n break;\n\n case 80:\n#line 364 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_FUNCTION((yylsp[(1) - (3)]), NULL, (yyvsp[(2) - (3)].objectValue), (yyvsp[(1) - (3)].operatorValue)); ;}\n break;\n\n case 81:\n#line 366 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_FUNCTION((yylsp[(1) - (2)]), NULL, (yyvsp[(2) - (2)].objectValue), (yyvsp[(1) - (2)].operatorValue)); ;}\n break;\n\n case 82:\n#line 369 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_IS((yylsp[(1) - (4)]), (yyvsp[(1) - (4)].objectValue), (yyvsp[(3) - (4)].objectValue)); ;}\n break;\n\n case 83:\n#line 371 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_IS((yylsp[(1) - (5)]), (yyvsp[(1) - (5)].objectValue), (yyvsp[(4) - (5)].objectValue)); ;}\n break;\n\n case 84:\n#line 374 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(2) - (3)].objectValue); ;}\n break;\n\n case 85:\n#line 376 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(3) - (4)].objectValue); ;}\n break;\n\n case 86:\n#line 378 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(2) - (2)].objectValue); ;}\n break;\n\n case 87:\n#line 381 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (7)]), (yyvsp[(2) - (7)].objectValue), (yyvsp[(4) - (7)].objectValue), NULL, (yyvsp[(6) - (7)].objectValue), (yyvsp[(3) - (7)].operatorValue), false); ;}\n break;\n\n case 88:\n#line 383 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (6)]), (yyvsp[(2) - (6)].objectValue), (yyvsp[(4) - (6)].objectValue), NULL, (yyvsp[(6) - (6)].objectValue), (yyvsp[(3) - (6)].operatorValue), false); ;}\n break;\n\n case 89:\n#line 385 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (6)]), (yyvsp[(2) - (6)].objectValue), (yyvsp[(4) - (6)].objectValue), NULL, (yyvsp[(5) - (6)].objectValue), (yyvsp[(3) - (6)].operatorValue), false); ;}\n break;\n\n case 90:\n#line 387 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (4)]), (yyvsp[(2) - (4)].objectValue), NULL, NULL, (yyvsp[(3) - (4)].objectValue), 0, false); ;}\n break;\n\n case 91:\n#line 390 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (9)]), (yyvsp[(2) - (9)].objectValue), (yyvsp[(4) - (9)].objectValue), (yyvsp[(6) - (9)].objectValue), (yyvsp[(8) - (9)].objectValue), (yyvsp[(3) - (9)].operatorValue), (yyvsp[(5) - (9)].operatorValue)); ;}\n break;\n\n case 92:\n#line 392 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (8)]), (yyvsp[(2) - (8)].objectValue), (yyvsp[(4) - (8)].objectValue), (yyvsp[(6) - (8)].objectValue), (yyvsp[(8) - (8)].objectValue), (yyvsp[(3) - (8)].operatorValue), (yyvsp[(5) - (8)].operatorValue)); ;}\n break;\n\n case 93:\n#line 394 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (8)]), (yyvsp[(2) - (8)].objectValue), (yyvsp[(4) - (8)].objectValue), (yyvsp[(6) - (8)].objectValue), (yyvsp[(7) - (8)].objectValue), (yyvsp[(3) - (8)].operatorValue), (yyvsp[(5) - (8)].operatorValue)); ;}\n break;\n\n case 94:\n#line 396 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (6)]), (yyvsp[(2) - (6)].objectValue), NULL, (yyvsp[(4) - (6)].objectValue), (yyvsp[(5) - (6)].objectValue), 0, (yyvsp[(3) - (6)].operatorValue)); ;}\n break;\n\n case 95:\n#line 399 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_MAPPER((yylsp[(1) - (5)]), (yyvsp[(2) - (5)].objectValue), NULL, (yyvsp[(4) - (5)].objectValue), false, false); ;}\n break;\n\n case 96:\n#line 401 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_MAPPER((yylsp[(1) - (4)]), (yyvsp[(2) - (4)].objectValue), NULL, (yyvsp[(4) - (4)].objectValue), false, false); ;}\n break;\n\n case 97:\n#line 404 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_MAPPER((yylsp[(1) - (7)]), (yyvsp[(2) - (7)].objectValue), (yyvsp[(4) - (7)].objectValue), (yyvsp[(6) - (7)].objectValue), false, (yyvsp[(3) - (7)].operatorValue)); ;}\n break;\n\n case 98:\n#line 406 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_MAPPER((yylsp[(1) - (6)]), (yyvsp[(2) - (6)].objectValue), (yyvsp[(4) - (6)].objectValue), (yyvsp[(6) - (6)].objectValue), false, (yyvsp[(3) - (6)].operatorValue)); ;}\n break;\n\n case 100:\n#line 412 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ASSIGNMENT((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].operatorValue), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 101:\n#line 415 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CALL((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), NULL); APPEND_ARGS((yyval.objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 102:\n#line 418 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_1(UpPrintSyntaxType, (yylsp[(1) - (2)]), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 103:\n#line 421 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].operatorValue), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 104:\n#line 423 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_UNARY((yylsp[(1) - (2)]), (yyvsp[(1) - (2)].operatorValue), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 105:\n#line 425 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_UNARY((yylsp[(1) - (2)]), (yyvsp[(1) - (2)].operatorValue), NULL); ;}\n break;\n\n case 106:\n#line 428 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_FUNCTION((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue), (yyvsp[(2) - (3)].operatorValue)); ;}\n break;\n\n case 107:\n#line 430 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_FUNCTION((yylsp[(1) - (2)]), NULL, (yyvsp[(2) - (2)].objectValue), (yyvsp[(1) - (2)].operatorValue)); ;}\n break;\n\n case 108:\n#line 433 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_IS((yylsp[(1) - (4)]), (yyvsp[(1) - (4)].objectValue), (yyvsp[(3) - (4)].objectValue)); ;}\n break;\n\n case 109:\n#line 435 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_IS((yylsp[(1) - (5)]), (yyvsp[(1) - (5)].objectValue), (yyvsp[(4) - (5)].objectValue)); ;}\n break;\n\n case 110:\n#line 438 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(2) - (3)].objectValue); ;}\n break;\n\n case 111:\n#line 440 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(3) - (4)].objectValue); ;}\n break;\n\n case 112:\n#line 442 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(2) - (2)].objectValue); ;}\n break;\n\n case 113:\n#line 445 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (6)]), (yyvsp[(2) - (6)].objectValue), (yyvsp[(4) - (6)].objectValue), NULL, (yyvsp[(6) - (6)].objectValue), (yyvsp[(3) - (6)].operatorValue), false); ;}\n break;\n\n case 114:\n#line 447 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (5)]), (yyvsp[(2) - (5)].objectValue), (yyvsp[(4) - (5)].objectValue), NULL, (yyvsp[(5) - (5)].objectValue), (yyvsp[(3) - (5)].operatorValue), false); ;}\n break;\n\n case 115:\n#line 449 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].objectValue), NULL, NULL, (yyvsp[(3) - (3)].objectValue), 0, false); ;}\n break;\n\n case 116:\n#line 452 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (8)]), (yyvsp[(2) - (8)].objectValue), (yyvsp[(4) - (8)].objectValue), (yyvsp[(6) - (8)].objectValue), (yyvsp[(8) - (8)].objectValue), (yyvsp[(3) - (8)].operatorValue), (yyvsp[(5) - (8)].operatorValue)); ;}\n break;\n\n case 117:\n#line 454 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (7)]), (yyvsp[(2) - (7)].objectValue), (yyvsp[(4) - (7)].objectValue), (yyvsp[(6) - (7)].objectValue), (yyvsp[(7) - (7)].objectValue), (yyvsp[(3) - (7)].operatorValue), (yyvsp[(5) - (7)].operatorValue)); ;}\n break;\n\n case 118:\n#line 456 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (5)]), (yyvsp[(2) - (5)].objectValue), NULL, (yyvsp[(4) - (5)].objectValue), (yyvsp[(5) - (5)].objectValue), 0, (yyvsp[(3) - (5)].operatorValue)); ;}\n break;\n\n case 119:\n#line 459 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_MAPPER((yylsp[(1) - (4)]), (yyvsp[(2) - (4)].objectValue), NULL, (yyvsp[(4) - (4)].objectValue), false, false); ;}\n break;\n\n case 120:\n#line 461 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_MAPPER((yylsp[(1) - (6)]), (yyvsp[(2) - (6)].objectValue), (yyvsp[(4) - (6)].objectValue), (yyvsp[(6) - (6)].objectValue), false, (yyvsp[(3) - (6)].operatorValue)); ;}\n break;\n\n case 122:\n#line 467 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ASSIGNMENT((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].operatorValue), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 123:\n#line 470 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CALL((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), NULL); APPEND_ARGS((yyval.objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 124:\n#line 473 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_1(UpPrintSyntaxType, (yylsp[(1) - (2)]), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 125:\n#line 476 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].operatorValue), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 126:\n#line 478 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_UNARY((yylsp[(1) - (2)]), (yyvsp[(1) - (2)].operatorValue), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 127:\n#line 481 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_FUNCTION((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue), (yyvsp[(2) - (3)].operatorValue)); ;}\n break;\n\n case 128:\n#line 483 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_FUNCTION((yylsp[(1) - (2)]), NULL, (yyvsp[(2) - (2)].objectValue), (yyvsp[(1) - (2)].operatorValue)); ;}\n break;\n\n case 129:\n#line 486 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (6)]), (yyvsp[(2) - (6)].objectValue), (yyvsp[(4) - (6)].objectValue), NULL, (yyvsp[(6) - (6)].objectValue), (yyvsp[(3) - (6)].operatorValue), false); ;}\n break;\n\n case 130:\n#line 489 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (8)]), (yyvsp[(2) - (8)].objectValue), (yyvsp[(4) - (8)].objectValue), (yyvsp[(6) - (8)].objectValue), (yyvsp[(8) - (8)].objectValue), (yyvsp[(3) - (8)].operatorValue), (yyvsp[(5) - (8)].operatorValue)); ;}\n break;\n\n case 131:\n#line 492 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_MAPPER((yylsp[(1) - (4)]), (yyvsp[(2) - (4)].objectValue), NULL, (yyvsp[(4) - (4)].objectValue), false, false); ;}\n break;\n\n case 132:\n#line 494 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_MAPPER((yylsp[(1) - (6)]), (yyvsp[(2) - (6)].objectValue), (yyvsp[(4) - (6)].objectValue), (yyvsp[(6) - (6)].objectValue), false, (yyvsp[(3) - (6)].operatorValue)); ;}\n break;\n\n case 134:\n#line 500 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ASSIGNMENT((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].operatorValue), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 135:\n#line 503 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CALL((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), NULL); APPEND_ARGS((yyval.objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 136:\n#line 506 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_1(UpPrintSyntaxType, (yylsp[(1) - (2)]), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 137:\n#line 509 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].operatorValue), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 138:\n#line 512 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_UNARY((yylsp[(1) - (2)]), (yyvsp[(1) - (2)].operatorValue), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 139:\n#line 514 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_UNARY((yylsp[(1) - (1)]), (yyvsp[(1) - (1)].operatorValue), NULL); ;}\n break;\n\n case 140:\n#line 517 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_FUNCTION((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue), (yyvsp[(2) - (3)].operatorValue)); ;}\n break;\n\n case 141:\n#line 519 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_FUNCTION((yylsp[(1) - (2)]), NULL, (yyvsp[(2) - (2)].objectValue), (yyvsp[(1) - (2)].operatorValue)); ;}\n break;\n\n case 142:\n#line 522 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_IS((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 143:\n#line 525 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(2) - (2)].objectValue); ;}\n break;\n\n case 144:\n#line 528 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (6)]), (yyvsp[(2) - (6)].objectValue), (yyvsp[(4) - (6)].objectValue), NULL, (yyvsp[(6) - (6)].objectValue), (yyvsp[(3) - (6)].operatorValue), false); ;}\n break;\n\n case 145:\n#line 530 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ITERATOR((yylsp[(1) - (8)]), (yyvsp[(2) - (8)].objectValue), (yyvsp[(4) - (8)].objectValue), (yyvsp[(6) - (8)].objectValue), (yyvsp[(8) - (8)].objectValue), (yyvsp[(3) - (8)].operatorValue), (yyvsp[(5) - (8)].operatorValue)); ;}\n break;\n\n case 146:\n#line 533 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_MAPPER((yylsp[(1) - (4)]), (yyvsp[(2) - (4)].objectValue), NULL, (yyvsp[(4) - (4)].objectValue), false, false); ;}\n break;\n\n case 147:\n#line 535 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_MAPPER((yylsp[(1) - (6)]), (yyvsp[(2) - (6)].objectValue), (yyvsp[(4) - (6)].objectValue), (yyvsp[(6) - (6)].objectValue), false, (yyvsp[(3) - (6)].operatorValue)); ;}\n break;\n\n case 148:\n#line 540 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_IF((yylsp[(1) - (1)]), (yyvsp[(1) - (1)].objectValue), NULL); ;}\n break;\n\n case 149:\n#line 542 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_IF((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 150:\n#line 547 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_TRANSFORM((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(2) - (3)].objectValue)); ;}\n break;\n\n case 151:\n#line 549 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = APPEND_TRANSFORM((yyvsp[(1) - (6)].objectValue), PARSE_TRANSFORM((yylsp[(1) - (6)]), (yyvsp[(4) - (6)].objectValue), (yyvsp[(5) - (6)].objectValue))); ;}\n break;\n\n case 152:\n#line 554 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_IF((yylsp[(1) - (2)]), (yyvsp[(1) - (2)].objectValue), NULL); ;}\n break;\n\n case 153:\n#line 556 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_IF((yylsp[(1) - (6)]), (yyvsp[(1) - (6)].objectValue), (yyvsp[(5) - (6)].objectValue)); ;}\n break;\n\n case 154:\n#line 561 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (1)].objectValue); ;}\n break;\n\n case 155:\n#line 563 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_TRANSFORM((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 156:\n#line 565 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = APPEND_TRANSFORM((yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 157:\n#line 570 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_TRANSFORM((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 158:\n#line 575 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_IF((yylsp[(1) - (1)]), (yyvsp[(1) - (1)].objectValue), NULL); ;}\n break;\n\n case 159:\n#line 577 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_IF((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 160:\n#line 582 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (1)].objectValue); ;}\n break;\n\n case 161:\n#line 584 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = APPEND_TRANSFORM((yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 162:\n#line 589 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_IF((yylsp[(1) - (1)]), (yyvsp[(1) - (1)].objectValue), NULL); ;}\n break;\n\n case 163:\n#line 591 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_IF((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 164:\n#line 596 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (1)].objectValue); ;}\n break;\n\n case 165:\n#line 598 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = APPEND_TRANSFORM((yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 166:\n#line 603 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_TRANSFORM((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 168:\n#line 609 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = ENSURE_SET((yyvsp[(1) - (3)].objectValue)); APPEND((yyval.objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 169:\n#line 611 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = ENSURE_SET((yyvsp[(1) - (2)].objectValue)); ;}\n break;\n\n case 173:\n#line 624 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_SET((yylsp[(1) - (4)])); APPEND((yyval.objectValue), (yyvsp[(1) - (4)].objectValue)); APPEND((yyval.objectValue), (yyvsp[(4) - (4)].objectValue)); (yyval.objectValue) = PARSE_CALL((yylsp[(1) - (4)]), (yyvsp[(3) - (4)].objectValue), (yyval.objectValue)); ;}\n break;\n\n case 175:\n#line 630 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpConcatStringOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 177:\n#line 636 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpOrOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 178:\n#line 638 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_RANGE((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue), PARSE_UNDEFINED((yylsp[(1) - (3)])), false); ;}\n break;\n\n case 179:\n#line 640 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_RANGE((yylsp[(1) - (5)]), (yyvsp[(1) - (5)].objectValue), (yyvsp[(3) - (5)].objectValue), (yyvsp[(5) - (5)].objectValue), false); ;}\n break;\n\n case 180:\n#line 642 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_RANGE((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue), PARSE_UNDEFINED((yylsp[(1) - (3)])), true); ;}\n break;\n\n case 181:\n#line 644 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_RANGE((yylsp[(1) - (5)]), (yyvsp[(1) - (5)].objectValue), (yyvsp[(3) - (5)].objectValue), (yyvsp[(5) - (5)].objectValue), true); ;}\n break;\n\n case 183:\n#line 650 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpAndOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 185:\n#line 656 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpEqualsOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 186:\n#line 658 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpNotEqualsOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 188:\n#line 664 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpLessThanOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 189:\n#line 666 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpGreaterThanOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 190:\n#line 668 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpLessThanEqualsOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 191:\n#line 670 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpGreaterThanEqualsOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 192:\n#line 672 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpIsNotOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 193:\n#line 674 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpIsInOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 194:\n#line 676 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpNotInOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 196:\n#line 682 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpAddOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 197:\n#line 684 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpSubtractOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 199:\n#line 690 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpMultiplyOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 200:\n#line 692 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpDivideOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 201:\n#line 694 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpModOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 202:\n#line 696 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpPowOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 203:\n#line 698 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpConcatOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 205:\n#line 704 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_UNARY((yylsp[(1) - (2)]), UpDeleteOp, (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 206:\n#line 706 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_UNARY((yylsp[(1) - (2)]), UpNegativeOp, (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 207:\n#line 708 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_UNARY((yylsp[(1) - (2)]), UpNotOp, (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 208:\n#line 710 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_UNARY((yylsp[(1) - (2)]), UpInOp, (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 209:\n#line 712 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_SET((yylsp[(1) - (3)])); APPEND((yyval.objectValue), (yyvsp[(3) - (3)].objectValue)); (yyval.objectValue) = PARSE_CALL((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].objectValue), (yyval.objectValue)); ;}\n break;\n\n case 211:\n#line 718 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_UNARY((yylsp[(1) - (2)]), UpBindOp, (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 212:\n#line 720 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_UNARY((yylsp[(1) - (2)]), UpBindOp, (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 214:\n#line 726 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = ENSURE_SET((yyvsp[(1) - (3)].objectValue)); APPEND((yyval.objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 216:\n#line 732 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_SUBTYPE((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 217:\n#line 734 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_SUBTYPE((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 218:\n#line 736 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CAST((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 219:\n#line 739 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CALL((yylsp[(1) - (2)]), (yyvsp[(1) - (2)].objectValue), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 220:\n#line 741 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CALL_TASK((yylsp[(1) - (4)]), (yyvsp[(1) - (4)].objectValue), (yyvsp[(2) - (4)].objectValue), false, (yyvsp[(4) - (4)].objectValue)); ;}\n break;\n\n case 221:\n#line 743 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CALL_TASK((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(2) - (3)].objectValue), true, NULL); ;}\n break;\n\n case 222:\n#line 745 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CALL_TASK((yylsp[(1) - (5)]), (yyvsp[(1) - (5)].objectValue), (yyvsp[(2) - (5)].objectValue), true, (yyvsp[(4) - (5)].objectValue)); ;}\n break;\n\n case 223:\n#line 748 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_PROPERTY((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].stringValue)); ;}\n break;\n\n case 224:\n#line 751 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (5)]), UpLookupOp, (yyvsp[(1) - (5)].objectValue), (yyvsp[(4) - (5)].objectValue)); ;}\n break;\n\n case 225:\n#line 753 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (7)]), UpLookupOp, (yyvsp[(1) - (7)].objectValue), PARSE_DEFAULT((yylsp[(4) - (7)]), (yyvsp[(4) - (7)].objectValue), (yyvsp[(6) - (7)].objectValue))); ;}\n break;\n\n case 226:\n#line 756 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (4)]), UpIndexOp, (yyvsp[(1) - (4)].objectValue), (yyvsp[(3) - (4)].objectValue)); ;}\n break;\n\n case 227:\n#line 758 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (6)]), UpIndexOp, (yyvsp[(1) - (6)].objectValue), PARSE_DEFAULT((yylsp[(3) - (6)]), (yyvsp[(3) - (6)].objectValue), (yyvsp[(5) - (6)].objectValue))); ;}\n break;\n\n case 229:\n#line 765 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = APPEND_TYPE((yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 236:\n#line 779 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_GROUP((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].objectValue)); ;}\n break;\n\n case 237:\n#line 781 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_GROUP((yylsp[(1) - (2)]), NULL); ;}\n break;\n\n case 238:\n#line 786 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_LIST((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].objectValue)); ;}\n break;\n\n case 239:\n#line 788 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_LIST((yylsp[(1) - (2)]), NULL); ;}\n break;\n\n case 240:\n#line 793 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_MAP((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].objectValue)); ;}\n break;\n\n case 241:\n#line 795 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_MAP((yylsp[(1) - (2)]), NULL); ;}\n break;\n\n case 242:\n#line 800 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CHANNEL((yylsp[(1) - (2)]), NULL); ;}\n break;\n\n case 243:\n#line 802 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CHANNEL((yylsp[(1) - (3)]), (yyvsp[(2) - (3)].objectValue)); ;}\n break;\n\n case 244:\n#line 807 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ID((yylsp[(1) - (1)]), (yyvsp[(1) - (1)].stringValue)); ;}\n break;\n\n case 245:\n#line 809 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_TYPEID((yylsp[(1) - (1)]), (yyvsp[(1) - (1)].stringValue)); ;}\n break;\n\n case 250:\n#line 818 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ID((yylsp[(1) - (1)]), \"null\"); ;}\n break;\n\n case 251:\n#line 820 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_PRIVATE_CFUNCTION((yyvsp[(1) - (1)].objectValue)); ;}\n break;\n\n case 252:\n#line 825 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpEqOp; ;}\n break;\n\n case 253:\n#line 827 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpReadOp; ;}\n break;\n\n case 254:\n#line 829 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpAddEqOp; ;}\n break;\n\n case 255:\n#line 831 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpSubtractEqOp; ;}\n break;\n\n case 256:\n#line 833 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpMultiplyEqOp; ;}\n break;\n\n case 257:\n#line 835 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpDivideEqOp; ;}\n break;\n\n case 258:\n#line 837 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpModEqOp; ;}\n break;\n\n case 259:\n#line 839 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpPowEqOp; ;}\n break;\n\n case 260:\n#line 841 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpConcatEqOp; ;}\n break;\n\n case 261:\n#line 846 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpReadOp; ;}\n break;\n\n case 262:\n#line 848 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpWriteOp; ;}\n break;\n\n case 263:\n#line 850 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpWriteAllOp; ;}\n break;\n\n case 264:\n#line 855 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpWriteOp; ;}\n break;\n\n case 265:\n#line 857 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = UpWriteAllOp; ;}\n break;\n\n case 266:\n#line 862 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = false; ;}\n break;\n\n case 267:\n#line 864 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = true; ;}\n break;\n\n case 268:\n#line 869 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = 0; ;}\n break;\n\n case 269:\n#line 871 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = 1; ;}\n break;\n\n case 270:\n#line 876 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = 0; ;}\n break;\n\n case 271:\n#line 878 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.operatorValue) = 1; ;}\n break;\n\n case 272:\n#line 883 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(2) - (3)].objectValue); ;}\n break;\n\n case 273:\n#line 888 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = NULL; ;}\n break;\n\n case 274:\n#line 890 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(2) - (3)].objectValue); ;}\n break;\n\n case 275:\n#line 895 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (1)].objectValue); ;}\n break;\n\n case 276:\n#line 897 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (2)].objectValue); ;}\n break;\n\n case 277:\n#line 899 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (3)].objectValue); APPEND_ARG((yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 278:\n#line 904 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ARG((yylsp[(1) - (1)]), NULL, (yyvsp[(1) - (1)].objectValue), NULL); ;}\n break;\n\n case 279:\n#line 906 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ARG((yylsp[(1) - (2)]), (yyvsp[(1) - (2)].stringValue), (yyvsp[(2) - (2)].objectValue), NULL); ;}\n break;\n\n case 280:\n#line 908 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ARG((yylsp[(1) - (1)]), (yyvsp[(1) - (1)].stringValue), NULL, NULL); ;}\n break;\n\n case 281:\n#line 910 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_ARG((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].stringValue), PARSE_ASSIGNMENT((yylsp[(1) - (3)]), UpEqOp, PARSE_ID((yylsp[(1) - (3)]), (yyvsp[(1) - (3)].stringValue)), (yyvsp[(3) - (3)].objectValue)), NULL); ;}\n break;\n\n case 282:\n#line 912 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_VARG((yylsp[(1) - (2)]), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 284:\n#line 918 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = ENSURE_SET((yyvsp[(1) - (3)].objectValue)); APPEND((yyval.objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 285:\n#line 920 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = ENSURE_SET((yyvsp[(1) - (2)].objectValue)); ;}\n break;\n\n case 286:\n#line 925 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_BINARY((yylsp[(1) - (3)]), UpEqOp, (yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 287:\n#line 932 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_SET((yylsp[(2) - (2)])); APPEND((yyval.objectValue), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 288:\n#line 934 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (2)].objectValue); APPEND((yyvsp[(1) - (2)].objectValue), (yyvsp[(2) - (2)].objectValue)); ;}\n break;\n\n case 292:\n#line 948 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CFUNCTION((yylsp[(1) - (5)]), (yyvsp[(2) - (5)].stringValue), (yyvsp[(1) - (5)].objectValue), (yyvsp[(4) - (5)].objectValue)); ;}\n break;\n\n case 293:\n#line 950 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CFUNCTION((yylsp[(1) - (4)]), (yyvsp[(2) - (4)].stringValue), (yyvsp[(1) - (4)].objectValue), NULL); ;}\n break;\n\n case 294:\n#line 955 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CTYPE((yylsp[(1) - (1)]), (yyvsp[(1) - (1)].stringValue)); ;}\n break;\n\n case 295:\n#line 957 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CTYPE((yylsp[(1) - (2)]), (yyvsp[(2) - (2)].stringValue)); ;}\n break;\n\n case 296:\n#line 959 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CTYPE((yylsp[(1) - (2)]), (yyvsp[(2) - (2)].stringValue)); ;}\n break;\n\n case 297:\n#line 961 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CTYPE((yylsp[(1) - (3)]), (yyvsp[(3) - (3)].stringValue)); ;}\n break;\n\n case 298:\n#line 963 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CTYPE((yylsp[(1) - (1)]), (yyvsp[(1) - (1)].stringValue)); ;}\n break;\n\n case 299:\n#line 965 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CTYPE((yylsp[(1) - (2)]), (yyvsp[(2) - (2)].stringValue)); ;}\n break;\n\n case 300:\n#line 967 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = ADD_CTYPE_POINTER((yylsp[(1) - (2)]), (yyvsp[(1) - (2)].objectValue)); ;}\n break;\n\n case 301:\n#line 972 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_SET((yylsp[(1) - (1)])); APPEND((yyval.objectValue), (yyvsp[(1) - (1)].objectValue)); ;}\n break;\n\n case 302:\n#line 974 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = (yyvsp[(1) - (3)].objectValue); APPEND((yyvsp[(1) - (3)].objectValue), (yyvsp[(3) - (3)].objectValue)); ;}\n break;\n\n case 303:\n#line 979 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CARGUMENT((yylsp[(1) - (1)]), (yyvsp[(1) - (1)].objectValue), NULL); ;}\n break;\n\n case 304:\n#line 981 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n { (yyval.objectValue) = PARSE_CARGUMENT((yylsp[(1) - (2)]), (yyvsp[(1) - (2)].objectValue), (yyvsp[(2) - (2)].stringValue)); ;}\n break;\n\n\n/* Line 1267 of yacc.c. */\n#line 3838 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.tab.c\"\n default: break;\n }\n YY_SYMBOL_PRINT (\"-> $$ =\", yyr1[yyn], &yyval, &yyloc);\n\n YYPOPSTACK (yylen);\n yylen = 0;\n YY_STACK_PRINT (yyss, yyssp);\n\n *++yyvsp = yyval;\n *++yylsp = yyloc;\n\n /* Now `shift' the result of the reduction. Determine what state\n that goes to, based on the state we popped back to and the rule\n number reduced by. */\n\n yyn = yyr1[yyn];\n\n yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;\n if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)\n yystate = yytable[yystate];\n else\n yystate = yydefgoto[yyn - YYNTOKENS];\n\n goto yynewstate;\n\n\n/*------------------------------------.\n| yyerrlab -- here on detecting error |\n`------------------------------------*/\nyyerrlab:\n /* If not already recovering from an error, report this error. */\n if (!yyerrstatus)\n {\n ++yynerrs;\n#if ! YYERROR_VERBOSE\n yyerror (&yylloc, scanner, YY_(\"syntax error\"));\n#else\n {\n\tYYSIZE_T yysize = yysyntax_error (0, yystate, yychar);\n\tif (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)\n\t {\n\t YYSIZE_T yyalloc = 2 * yysize;\n\t if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))\n\t yyalloc = YYSTACK_ALLOC_MAXIMUM;\n\t if (yymsg != yymsgbuf)\n\t YYSTACK_FREE (yymsg);\n\t yymsg = (char *) YYSTACK_ALLOC (yyalloc);\n\t if (yymsg)\n\t yymsg_alloc = yyalloc;\n\t else\n\t {\n\t\tyymsg = yymsgbuf;\n\t\tyymsg_alloc = sizeof yymsgbuf;\n\t }\n\t }\n\n\tif (0 < yysize && yysize <= yymsg_alloc)\n\t {\n\t (void) yysyntax_error (yymsg, yystate, yychar);\n\t yyerror (&yylloc, scanner, yymsg);\n\t }\n\telse\n\t {\n\t yyerror (&yylloc, scanner, YY_(\"syntax error\"));\n\t if (yysize != 0)\n\t goto yyexhaustedlab;\n\t }\n }\n#endif\n }\n\n yyerror_range[0] = yylloc;\n\n if (yyerrstatus == 3)\n {\n /* If just tried and failed to reuse look-ahead token after an\n\t error, discard it. */\n\n if (yychar <= YYEOF)\n\t{\n\t /* Return failure if at end of input. */\n\t if (yychar == YYEOF)\n\t YYABORT;\n\t}\n else\n\t{\n\t yydestruct (\"Error: discarding\",\n\t\t yytoken, &yylval, &yylloc, scanner);\n\t yychar = YYEMPTY;\n\t}\n }\n\n /* Else will try to reuse look-ahead token after shifting the error\n token. */\n goto yyerrlab1;\n\n\n/*---------------------------------------------------.\n| yyerrorlab -- error raised explicitly by YYERROR. |\n`---------------------------------------------------*/\nyyerrorlab:\n\n /* Pacify compilers like GCC when the user code never invokes\n YYERROR and the label yyerrorlab therefore never appears in user\n code. */\n if (/*CONSTCOND*/ 0)\n goto yyerrorlab;\n\n yyerror_range[0] = yylsp[1-yylen];\n /* Do not reclaim the symbols of the rule which action triggered\n this YYERROR. */\n YYPOPSTACK (yylen);\n yylen = 0;\n YY_STACK_PRINT (yyss, yyssp);\n yystate = *yyssp;\n goto yyerrlab1;\n\n\n/*-------------------------------------------------------------.\n| yyerrlab1 -- common code for both syntax error and YYERROR. |\n`-------------------------------------------------------------*/\nyyerrlab1:\n yyerrstatus = 3;\t/* Each real token shifted decrements this. */\n\n for (;;)\n {\n yyn = yypact[yystate];\n if (yyn != YYPACT_NINF)\n\t{\n\t yyn += YYTERROR;\n\t if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)\n\t {\n\t yyn = yytable[yyn];\n\t if (0 < yyn)\n\t\tbreak;\n\t }\n\t}\n\n /* Pop the current state because it cannot handle the error token. */\n if (yyssp == yyss)\n\tYYABORT;\n\n yyerror_range[0] = *yylsp;\n yydestruct (\"Error: popping\",\n\t\t yystos[yystate], yyvsp, yylsp, scanner);\n YYPOPSTACK (1);\n yystate = *yyssp;\n YY_STACK_PRINT (yyss, yyssp);\n }\n\n if (yyn == YYFINAL)\n YYACCEPT;\n\n *++yyvsp = yylval;\n\n yyerror_range[1] = yylloc;\n /* Using YYLLOC is tempting, but would change the location of\n the look-ahead. YYLOC is available though. */\n YYLLOC_DEFAULT (yyloc, (yyerror_range - 1), 2);\n *++yylsp = yyloc;\n\n /* Shift the error token. */\n YY_SYMBOL_PRINT (\"Shifting\", yystos[yyn], yyvsp, yylsp);\n\n yystate = yyn;\n goto yynewstate;\n\n\n/*-------------------------------------.\n| yyacceptlab -- YYACCEPT comes here. |\n`-------------------------------------*/\nyyacceptlab:\n yyresult = 0;\n goto yyreturn;\n\n/*-----------------------------------.\n| yyabortlab -- YYABORT comes here. |\n`-----------------------------------*/\nyyabortlab:\n yyresult = 1;\n goto yyreturn;\n\n#ifndef yyoverflow\n/*-------------------------------------------------.\n| yyexhaustedlab -- memory exhaustion comes here. |\n`-------------------------------------------------*/\nyyexhaustedlab:\n yyerror (&yylloc, scanner, YY_(\"memory exhausted\"));\n yyresult = 2;\n /* Fall through. */\n#endif\n\nyyreturn:\n if (yychar != YYEOF && yychar != YYEMPTY)\n yydestruct (\"Cleanup: discarding lookahead\",\n\t\t yytoken, &yylval, &yylloc, scanner);\n /* Do not reclaim the symbols of the rule which action triggered\n this YYABORT or YYACCEPT. */\n YYPOPSTACK (yylen);\n YY_STACK_PRINT (yyss, yyssp);\n while (yyssp != yyss)\n {\n yydestruct (\"Cleanup: popping\",\n\t\t yystos[*yyssp], yyvsp, yylsp, scanner);\n YYPOPSTACK (1);\n }\n#ifndef yyoverflow\n if (yyss != yyssa)\n YYSTACK_FREE (yyss);\n#endif\n#if YYERROR_VERBOSE\n if (yymsg != yymsgbuf)\n YYSTACK_FREE (yymsg);\n#endif\n /* Make sure YYID is used. */\n return YYID (yyresult);\n}\n\n\n#line 984 \"/Users/joehewitt/Dropbox/Code/up/src/vm/Up.y\"\n\n\n" }, { "alpha_fraction": 0.5140351057052612, "alphanum_fraction": 0.5561403632164001, "avg_line_length": 15.507246017456055, "blob_id": "21aaeb53e263c974dfb14d2d4aa1fb86cfb5cfbb", "content_id": "64a16c71316386c893a3d099e9a91d3157d2e881", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1140, "license_type": "permissive", "max_line_length": 99, "num_lines": 69, "path": "/src/vm/UpTestFunctions.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n// ************************************************************************************************\n// These are used in runCCalls.test\n\nvoid ctest1() {\n printf(\"received!\\n\");\n}\n\nvoid ctest2(int value) {\n printf(\"received %d\\n\", value);\n}\n\nvoid ctest2b(char a) {\n printf(\"received %c\", a);\n}\n\nvoid ctest3(double value) {\n printf(\"received %g\\n\", value);\n}\n\nvoid ctest4(long long value) {\n printf(\"received %lld\\n\", value);\n}\n\nvoid ctest5(char* value) {\n printf(\"received %s\\n\", value);\n}\n\nint ctest6() {\n return 42;\n}\n\nlong long ctest7() {\n return 9223372036854775807;\n}\n\ndouble ctest8() {\n return 42.5;\n}\n\nchar* ctest9() {\n return \"hello\";\n}\n\nUpObject* ctest10(UpObject* a) {\n return a;\n}\n\ntypedef struct {\n int value;\n} FakeType;\n\nFakeType* ctest11() {\n FakeType* data = (FakeType*)malloc(sizeof(FakeType));\n data->value = 42;\n return data;\n}\n\nvoid ctest12(FakeType* data) {\n printf(\"%d\", 42);\n}\n\nvoid ctest13(UpObject* object, const char* str) {\n printf(\"%s\\n\", object->cls->def->name); fflush(stdout);\n printf(\"%s\\n\", str); fflush(stdout);\n}\n\nchar* ctest14() {\n return NULL;\n}\n" }, { "alpha_fraction": 0.5450348854064941, "alphanum_fraction": 0.5507796406745911, "avg_line_length": 27.252174377441406, "blob_id": "e26047a705480ad87db2ef5f2e2eadebb880e014", "content_id": "666e4ab2edd3122fe914a4a623c0776736dd08cb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9748, "license_type": "permissive", "max_line_length": 102, "num_lines": 345, "path": "/make/build/ProbeMaker.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nimport os.path, json, re\n\nreFormat = re.compile('%(\\((.*?)\\))?((ll)?[diuoxXfFeEgGaAcspn%])')\n\nIntegerType = 1\nLongType = 2\nFloatType = 3\nCharType = 4\nStringType = 5\n\ncTypeForSlotType = {\n IntegerType: 'uint32_t',\n LongType: 'uint64_t',\n FloatType: 'double',\n CharType: 'char',\n StringType: 'uint32_t',\n}\n\nsizeForSlotType = {\n IntegerType: 4,\n LongType: 8,\n FloatType: 8,\n CharType: 1,\n StringType: 4,\n}\n\ntypeForLetter = {\n 'd': IntegerType,\n 'i': IntegerType,\n 'u': IntegerType,\n 'o': IntegerType,\n 'x': IntegerType,\n 'X': IntegerType,\n 'lld': LongType,\n 'lli': LongType,\n 'llu': LongType,\n 'llo': LongType,\n 'llx': LongType,\n 'llX': LongType,\n 'f': FloatType,\n 'F': FloatType,\n 'e': FloatType,\n 'E': FloatType,\n 'g': FloatType,\n 'G': FloatType,\n 'a': FloatType,\n 'A': FloatType,\n 'c': CharType,\n 's': StringType,\n 'p': IntegerType,\n}\n\nclass Probe(object):\n def __init__(self, name):\n self.name = name\n self.isData = False\n self.slots = []\n\n @property\n def upperName(self):\n name = re.sub(re.compile('([A-Z])'), '_\\g<0>', self.name)\n return name.upper()\n\n @property\n def enumName(self):\n return 'UpProbe%s' % camelCaseName(self.name)\n\n def dataTypeNameForMacro(self, macroName):\n return 'Up%s%s%s' % (macroName, self.name[0].upper(), self.name[1:])\n\n @property\n def argNames(self):\n return ['_ARG%d' % i for i in xrange(0, len(self.slots))]\n\n @property\n def structSizes(self):\n yield sizeForSlotType[CharType]\n yield sizeForSlotType[IntegerType]\n yield sizeForSlotType[FloatType]\n for slot in self.slots:\n yield sizeForSlotType[slot['type']]\n\n def parse(self, format, mappings):\n if isinstance(format, list):\n format = format[0]\n self.isData = True\n\n index = 0\n parts = []\n\n while 1:\n m = reFormat.search(format, index)\n if m and not m.groups()[0]:\n start = m.start()\n if start > index:\n parts.append(format[index:start])\n\n letter = m.groups()[2]\n if letter == '%':\n parts.append('%')\n elif letter == 'n':\n pass\n else:\n parts.append('%%%s' % letter)\n\n slotType = typeForLetter[letter]\n self.slots.append({'type': slotType})\n\n index = m.end()\n elif m:\n start = m.start()\n if start > index:\n parts.append(format[index:start])\n\n name = m.groups()[1]\n if name not in mappings:\n mappings[name] = len(mappings)+1\n parts.append('%(' + str(mappings[name]) + ')s')\n\n self.slots.append({'type': IntegerType})\n\n index = m.end()\n else:\n break\n\n if index+1 < len(format):\n parts.append(format[index:])\n\n self.formatString = ''.join(parts)\n\ndef probeMake(jsonPath):\n jsonPath = os.path.abspath(jsonPath)\n f = open(jsonPath)\n source = f.read()\n f.close()\n\n data = json.loads(source)\n probes = []\n mappings = {}\n for probeName,format in data.iteritems():\n probe = Probe(probeName)\n probe.parse(format, mappings)\n probes.append(probe)\n\n hSource,cSource = renderFiles(probes, mappings)\n\n dirPath = os.path.dirname(jsonPath)\n fileName = os.path.basename(jsonPath)\n basePath,ext = os.path.splitext(fileName)\n\n hPath = \"%s/include/%s.h\" % (dirPath, basePath)\n hFile = open(hPath, 'w')\n hFile.write(hSource)\n hFile.close()\n\n cPath = \"%s/%s.c\" % (dirPath, basePath)\n cFile = open(cPath, 'w')\n cFile.write(cSource)\n cFile.close()\n\ndef renderFiles(probes, mappings):\n hLines = [hHeader]\n cLines = [cHeader]\n\n hLines.append('typedef enum {')\n hLines.append(' UpProbeNull,')\n\n for probe in probes:\n hLines.append(' %s,' % probe.enumName)\n\n hLines.append('} UpProbeType;')\n hLines.append('')\n\n hLines.append('typedef enum {')\n hLines.append(' UpProbeMappingNull,')\n\n for mapping in mappings:\n hLines.append(' UpProbeMapping%s = %s,' % (camelCaseName(mapping), mappings[mapping]))\n\n hLines.append('} UpProbeMapping;')\n hLines.append('')\n\n for probe in probes:\n hLines.append('extern bool %sEnabled;' % probe.enumName)\n\n hLines.append('')\n\n for probe in probes:\n if probe.isData:\n hLines.append(structForProbe(probe, 'Do'))\n else:\n hLines.append(structForProbe(probe, 'Begin', 'double time;'))\n hLines.append(structForProbe(probe, 'End', 'double time;'))\n hLines.append(structForProbe(probe, 'Count'))\n\n for probe in probes:\n if probe.isData:\n hLines.append(macroForProbe(probe, 'Do', 7))\n else:\n hLines.append(macroForProbe(probe, 'Begin', 5, 'UpGetProbeTime()'))\n hLines.append(macroForProbe(probe, 'End', 6, 'UpGetProbeTime()'))\n hLines.append(macroForProbe(probe, 'Count', 7))\n\n hLines.append('bool* UpGetProbeFlag(const char* name);')\n hLines.append('void UpInitProbes();')\n\n hLines.append('#else\\n')\n\n for probe in probes:\n if probe.isData:\n hLines.append('#define DO_%s(%s)' % (probe.upperName, ', '.join(probe.argNames)))\n else:\n hLines.append('#define BEGIN_%s(%s)' % (probe.upperName, ', '.join(probe.argNames)))\n hLines.append('#define END_%s(%s)' % (probe.upperName, ', '.join(probe.argNames)))\n hLines.append('#define COUNT_%s(%s)' % (probe.upperName, ', '.join(probe.argNames)))\n\n hLines.append(hFooter)\n\n for probe in probes:\n cLines.append('bool %sEnabled = false;' % probe.enumName)\n\n first = True\n cLines.append('')\n cLines.append('bool* UpGetProbeFlag(const char* name) {')\n for probe in probes:\n cLines.append(' %sif (!strcmp(name, \"%s\")) {' % ('' if first else '} else ', probe.name))\n cLines.append(' return &%sEnabled;' % probe.enumName)\n first = False\n cLines.append(' }')\n cLines.append(' return NULL;')\n cLines.append('}')\n\n cLines.append('')\n cLines.append('void UpInitProbes() {')\n for probe in probes:\n format = probe.formatString\n cLines.append(' {')\n cLines.append(' char* name = \"%s\";' % probe.name)\n cLines.append(' char* format = \"%s\";' % format)\n cLines.append(' UpDeclareProbe probe = {2, %s, %s, %s, %d};' \\\n % (probe.enumName, len(probe.name), len(format), 1 if probe.isData else 0))\n cLines.append(' UpProbe(&probe, sizeof(probe));')\n cLines.append(' UpProbe(name, sizeof(char) * %s);' % len(probe.name))\n cLines.append(' UpProbe(format, sizeof(char) * %s);' % len(format))\n cLines.append(' }')\n\n cLines.append(' {')\n cLines.append(' UpDeclareProbe probe = {2, 0, 0, 0, 0};')\n cLines.append(' UpProbe(&probe, sizeof(probe));')\n cLines.append(' }')\n\n cLines.append('}')\n\n cLines.append(cFooter)\n\n return '\\n'.join(hLines), '\\n'.join(cLines)\n\ndef structForProbe(probe, name, defs=None):\n lines = []\n lines.append('typedef struct __attribute__((__packed__)) {')\n lines.append(' uint8_t type;')\n lines.append(' uint8_t probe;')\n if defs:\n lines.append(' %s' % defs)\n for i,slot in zip(xrange(0, len(probe.slots)), probe.slots):\n lines.append(' %s _ARG%s;' % (cTypeForSlotType[slot['type']], i))\n lines.append('} %s;\\n' % probe.dataTypeNameForMacro(name))\n return '\\n'.join(lines)\n\ndef macroForProbe(probe, name, number, initializers=None):\n if initializers:\n initializers = initializers + ','\n else:\n initializers = ''\n\n lines = []\n lines.append('#define %s_%s(%s) \\\\' % (name.upper(), probe.upperName,\n ', '.join(probe.argNames)))\n lines.append(' if (%sEnabled) { \\\\' % probe.enumName)\n\n argNames = []\n for i,slot in zip(xrange(0, len(probe.slots)), probe.slots):\n if slot['type'] == StringType:\n lines.append(' uint32_t len%s = sizeof(char) * strlen(_ARG%s); \\\\' % (i, i))\n argNames.append('len%s' % i)\n else:\n argNames.append('(%s)_ARG%s' % (cTypeForSlotType[slot['type']], i))\n\n lines.append(' %s probe = {%s, %s,%s %s}; \\\\' %\n (probe.dataTypeNameForMacro(name), number, probe.enumName, initializers, ', '.join(argNames)))\n lines.append(' UpProbe(&probe, sizeof(probe)); \\\\')\n\n for i,slot in zip(xrange(0, len(probe.slots)), probe.slots):\n if slot['type'] == StringType:\n lines.append(' UpProbe((void*)_ARG%s, len%s); \\\\' % (i, i))\n\n lines.append(' }\\n')\n return '\\n'.join(lines)\n\ndef camelCaseName(name):\n return '%s%s' % (name[0].upper(), name[1:])\n\n# **************************************************************************************************\n\nhHeader = \"\"\"\n// Do not edit this file. It is generated!\n\n#ifndef UP_PROBES_H\n#define UP_PROBES_H\n\n#include \"Up/UpGlobal.h\"\n\n#ifdef UP_ENABLE_PROBES\n\"\"\"\n\nhFooter = \"\"\"\n#endif\n\n#endif // UP_PROBES_H\n\"\"\"\n\ncHeader = \"\"\"\n// Do not edit this file. It is generated!\n\n#ifdef UP_ENABLE_PROBES\n\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpProbes.h\"\n#include \"Up/UpContext.h\"\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t name;\n uint32_t format;\n uint8_t isData;\n} UpDeclareProbe;\n\n\"\"\"\n\ncFooter = \"\"\"\n#endif\n\n\"\"\"\n" }, { "alpha_fraction": 0.7184363603591919, "alphanum_fraction": 0.724775493144989, "avg_line_length": 22.95569610595703, "blob_id": "a5c027edc16fc5ca2ba68596fb4d189f8cb25081", "content_id": "b678f9eeb88ae46707da1484b617654bfed61fd7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3786, "license_type": "permissive", "max_line_length": 99, "num_lines": 158, "path": "/src/vm/include/UpGlobal.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPGLOBAL_H\n#define UP_UPGLOBAL_H\n\n#include <stdlib.h>\n#include <stdbool.h>\n#include <stdint.h>\n#include \"Up/UpInstructions.h\"\n#include \"Up/UpProbes.h\"\n\ntypedef struct UpSyntax UpSyntax;\n\ntypedef struct UpClassDef UpClassDef;\ntypedef struct UpFunctionDef UpFunctionDef;\ntypedef struct UpCFunctionDef UpCFunctionDef;\ntypedef struct UpArgumentDef UpArgumentDef;\ntypedef struct UpGetterDef UpGetterDef;\ntypedef struct UpSetterDef UpSetterDef;\ntypedef struct UpType UpType;\n\ntypedef struct UpContext UpContext;\ntypedef struct UpProperty UpProperty;\ntypedef struct UpObject UpObject;\ntypedef struct UpClass UpClass;\ntypedef struct UpException UpException;\ntypedef struct UpChannel UpChannel;\ntypedef struct UpFunction UpFunction;\ntypedef struct UpCFunction UpCFunction;\ntypedef struct UpCPointer UpCPointer;\ntypedef struct UpInteger UpInteger;\ntypedef struct UpLong UpLong;\ntypedef struct UpFloat UpFloat;\ntypedef struct UpString UpString;\ntypedef struct UpList UpList;\ntypedef struct UpMap UpMap;\n\ntypedef struct UpScheduler UpScheduler;\ntypedef struct UpEvent UpEvent;\ntypedef struct UpTask UpTask;\n\ntypedef struct UpArena UpArena;\ntypedef struct UpBuffer UpBuffer;\ntypedef struct UpArray UpArray;\ntypedef struct UpStrTable UpStrTable;\ntypedef struct UpIntTable UpIntTable;\n\ntypedef struct UpScanner UpScanner;\ntypedef struct UpCLibrary UpCLibrary;\ntypedef struct UpCType UpCType;\n\ntypedef struct UpCompileFrame UpCompileFrame;\ntypedef struct UpCallFrame UpCallFrame;\ntypedef struct UpStorageFrame UpStorageFrame;\ntypedef struct UpVariables UpVariables;\ntypedef struct UpVariable UpVariable;\ntypedef struct UpScope UpScope;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t mapping;\n uint32_t symbol;\n uint32_t length;\n} UpMappingProbe;\n\n// ************************************************************************************************\n\ntypedef bool UpStatus;\n#define UpSuccess true\n#define UpFailure false\n\ntypedef int32_t UpSymbol;\n#define UpNullSymbol 0\n\ntypedef unsigned int UpIndex;\n#define UpNotFound -1\n\ntypedef unsigned long long UpHash;\n\ntypedef unsigned int UpThreadId;\n\n#define UpFalseConst 0\n#define UpTrueConst 1\n#define UpNullConst 2\n#define UpUndefinedConst 3\n\n#define UP_EXPORT_SYMBOL(_NAME) void* __ ## _NAME ## __ = (void*)&_NAME;\n\n#define UpSharedIntegerMin -1\n#define UpSharedIntegerMax 10\n#define UpSharedLongMin -1\n#define UpSharedLongMax 10\n#define UpSharedFloatMin -1\n#define UpSharedFloatMax 10\n\n#define UP_BUILTIN(_TYPE) UpGetBuiltinClasses()->_TYPE##Class\n\n#define UPCTX UpGlobalContext\n\n\n#ifdef UP_ENABLE_PROBES\n\n#define UP_LOG(_CONTENT) UpLog(UpFormatString _CONTENT);\n\n#define UP_MAPPING(_MAPPING, _TEXT, _NUM) {\\\n const char* text = _TEXT; \\\n int len = sizeof(char) * strlen(text); \\\n UpMappingProbe probe = {3, _MAPPING, _NUM, len}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)text, len); \\\n}\n\n#define UP_PROBE_ID UpSymbol probeId\n#define UP_SET_PROBE_ID(_OBJECT, _VALUE) _OBJECT->probeId = _VALUE\n\n#else\n\n#define UP_LOG(_CONTENT)\n#define UP_MAPPING(_MAPPING, _TEXT, _NUM)\n#define UP_PROBE_ID\n#define UP_SET_PROBE_ID(_OBJECT, _VALUE)\n\n#endif\n\n// ************************************************************************************************\n\nstruct UpCompileFrame {\n char* file;\n char* module;\n int line;\n int column;\n UpCompileFrame* previous;\n};\n\nstruct UpCallFrame {\n UpFunctionDef* functionDef;\n char* cursor;\n UpCallFrame* previous;\n};\n\nstruct UpVariables {\n UpObject** variables;\n int count;\n UpVariables* previous;\n};\n\nstruct UpVariable {\n UpSymbol name;\n UpSyntax* import;\n UpClassDef* classDef;\n};\n\nstruct UpScope {\n UpArray* locals;\n UpArray* funcDefs;\n UpArray* classDefs;\n UpScope* previous;\n};\n\n#endif // UP_UPGLOBAL_H\n" }, { "alpha_fraction": 0.7212355136871338, "alphanum_fraction": 0.7212355136871338, "avg_line_length": 34.94444274902344, "blob_id": "d7c540e11d4a13149cfed6205242e41afd2fc887", "content_id": "41b271aef90316962ed078ae07733fe1583eff75", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1295, "license_type": "permissive", "max_line_length": 100, "num_lines": 36, "path": "/src/vm/include/UpInteger.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPINTEGER_H\n#define UP_UPINTEGER_H\n\n#include \"Up/UpObject.h\"\n\nstruct UpInteger {\n UpObject __base;\n int value;\n UpSymbol unit;\n};\n\n// *************************************************************************************************\n\nUpInteger* UpIntegerCreate(int value);\nUpInteger* UpIntegerCreateWithUnit(int value, UpSymbol unit);\n\nvoid UpIntegerInit(UpInteger* self, const char* value, int base, const char* unit);\n\nconst char* UpIntegerToCString(UpInteger* self, int base);\nUpString* UpIntegerToString(UpInteger* self, int base);\n\nbool UpIntegerEquals(UpInteger* self, UpObject* other);\nbool UpIntegerGreaterThan(UpInteger* self, UpObject* other);\nbool UpIntegerGreaterThanEquals(UpInteger* self, UpObject* other);\nbool UpIntegerLessThan(UpInteger* self, UpObject* other);\nbool UpIntegerLessThanEquals(UpInteger* self, UpObject* other);\n\nUpObject* UpIntegerAdd(UpInteger* self, UpObject* other);\nUpObject* UpIntegerSubtract(UpInteger* self, UpObject* other);\nUpObject* UpIntegerMultiply(UpInteger* self, UpObject* other);\nUpObject* UpIntegerDivide(UpInteger* self, UpObject* other);\nUpObject* UpIntegerMod(UpInteger* self, UpObject* other);\nUpObject* UpIntegerPow(UpInteger* self, UpObject* other);\nUpObject* UpIntegerNegate(UpInteger* self);\n\n#endif // UP_UPINTEGER_H\n" }, { "alpha_fraction": 0.5601423978805542, "alphanum_fraction": 0.5613291263580322, "avg_line_length": 28.9390869140625, "blob_id": "ec69f6579c47ed7b450410c52b569327b7fa4282", "content_id": "af09c878a61103ff8dd0645631de5ff1d05a464f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 11797, "license_type": "permissive", "max_line_length": 99, "num_lines": 394, "path": "/src/vm/UpObject.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpObject.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpProperty.h\"\n#include \"Up/UpFunction.h\"\n#include \"Up/UpNull.h\"\n#include \"Up/UpBool.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpLong.h\"\n#include \"Up/UpFloat.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpList.h\"\n#include \"Up/UpMap.h\"\n#include \"UpIntTable.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nstatic bool _GetGetter(UpObject* self, UpSymbol name, UpGetterDef** outGetterDef,\n UpClass** outClass) {\n UpGetterDef* getterDef;\n for (UpClass* cls = self->cls; cls; cls = cls->baseClass) {\n if (UpIntTableGet(cls->def->getters, name, &getterDef)) {\n if (outGetterDef) {\n *outGetterDef = getterDef;\n }\n if (outClass) {\n *outClass = cls;\n }\n return true;\n }\n }\n\n return false;\n}\n\nstatic bool _GetSetter(UpObject* self, UpSymbol name, UpFunctionDef** outSetterDef,\n UpClass** outClass) {\n UpFunctionDef* setterDef;\n for (UpClass* cls = self->cls; cls; cls = cls->baseClass) {\n if (cls->def->setters && UpIntTableGet(cls->def->setters, name, &setterDef)) {\n if (outSetterDef) {\n *outSetterDef = setterDef;\n }\n if (outClass) {\n *outClass = cls;\n }\n return true;\n }\n }\n\n return false;\n}\n\nstatic UpProperty* _GetProperty(UpObject* self, UpSymbol name) {\n if (!self->properties) {\n self->properties = UpIntTableCreate(UpGetHeap(), sizeof(UpObject*));\n }\n UpProperty* property;\n if (!UpIntTableGet(self->properties, name, &property)) {\n property = UpPropertyCreate();\n UpIntTableSet(self->properties, name, &property);\n }\n return property;\n}\n\nstatic UpProperty* _SetProperty(UpObject* self, UpSymbol name, UpObject* value) {\n UpProperty* property = _GetProperty(self, name);\n property->value = value;\n return property;\n}\n\n// ************************************************************************************************\n\nUpObject* UpObjectCreate() {\n return UpObjectCreateWithClass(UP_BUILTIN(object));\n}\n\nUpObject* UpObjectCreateWithClass(UpClass* cls) {\n size_t size = cls ? cls->size : sizeof(UpObject);\n const char* name = cls && cls->def ? cls->def->name : \"Object\";\n UpObject* self = UpArenaAllocate(UpGetHeap(), size, name);\n self->cls = cls;\n return self;\n}\n\nUpObject* UpObjectCreateWithSize(size_t size) {\n UpObject* self = UpArenaAllocate(UpGetHeap(), size, \"No Class\");\n return self;\n}\n\nbool UpAsTruth(UpObject* self) {\n if (self == UpTrue()) {\n return true;\n } else if (self == UpFalse()) {\n return false;\n } else if (self == UpNull()) {\n return false;\n } else if (self == UpUndefined()) {\n return false;\n } else if (UpIsTypeOf(self, UP_BUILTIN(integer))) {\n return ((UpInteger*)self)->value != 0;\n } else if (UpIsTypeOf(self, UP_BUILTIN(long))) {\n return ((UpLong*)self)->value != 0;\n } else if (UpIsTypeOf(self, UP_BUILTIN(float))) {\n return ((UpFloat*)self)->value != 0.0;\n } else if (UpIsTypeOf(self, UP_BUILTIN(string))) {\n return !UpStringIsEmpty((UpString*)self);\n } else if (UpIsTypeOf(self, UP_BUILTIN(list))) {\n return UpListCount((UpList*)self);\n } else if (UpIsTypeOf(self, UP_BUILTIN(map))) {\n // return UpMapCount((UpMap*)self);\n }\n\n return true;\n}\n\nbool UpAsInt(UpObject* self, int* out) {\n if (UpIsTypeOf(self, UP_BUILTIN(integer))) {\n *out = ((UpInteger*)self)->value;\n } else if (UpIsTypeOf(self, UP_BUILTIN(long))) {\n *out = ((UpLong*)self)->value;\n } else if (UpIsTypeOf(self, UP_BUILTIN(float))) {\n *out = ((UpFloat*)self)->value;\n } else if (self == UpTrue()) {\n *out = 1;\n } else if (UpIsTypeOf(self, UP_BUILTIN(bool))) {\n *out = 0;\n } else if (UpIsTypeOf(self, UP_BUILTIN(null))) {\n *out = 0;\n } else if (UpIsTypeOf(self, UP_BUILTIN(string))) {\n return UpAsInt(UpStringToNumber((UpString*)self, true), out);\n } else {\n return false;\n }\n return true;\n}\n\nbool UpAsLong(UpObject* self, long long* out) {\n if (UpIsTypeOf(self, UP_BUILTIN(integer))) {\n *out = ((UpInteger*)self)->value;\n } else if (UpIsTypeOf(self, UP_BUILTIN(long))) {\n *out = ((UpLong*)self)->value;\n } else if (UpIsTypeOf(self, UP_BUILTIN(float))) {\n *out = ((UpFloat*)self)->value;\n } else if (self == UpTrue()) {\n *out = 1L;\n } else if (UpIsTypeOf(self, UP_BUILTIN(bool))) {\n *out = 0L;\n } else if (UpIsTypeOf(self, UP_BUILTIN(string))) {\n return UpAsLong(UpStringToNumber((UpString*)self, true), out);\n } else {\n return false;\n }\n return true;\n}\n\nbool UpAsFloat(UpObject* self, double* out) {\n if (UpIsTypeOf(self, UP_BUILTIN(integer))) {\n *out = ((UpInteger*)self)->value;\n } else if (UpIsTypeOf(self, UP_BUILTIN(long))) {\n *out = ((UpLong*)self)->value;\n } else if (UpIsTypeOf(self, UP_BUILTIN(float))) {\n *out = ((UpFloat*)self)->value;\n } else if (self == UpTrue()) {\n *out = 1.0;\n } else if (UpIsTypeOf(self, UP_BUILTIN(bool))) {\n *out = 0.0;\n } else if (UpIsTypeOf(self, UP_BUILTIN(string))) {\n return UpAsFloat(UpStringToNumber((UpString*)self, true), out);\n } else {\n return false;\n }\n return true;\n}\n\nUpStatus UpIs(UpObject* self, UpObject* other, bool* out) {\n if (UpIsClass(other)) {\n UpClass* cls = (UpClass*)other;\n if (cls == UP_BUILTIN(class)) {\n if (UpIsClass(self)) {\n *out = true;\n } else {\n *out = false;\n }\n } else {\n *out = UpClassIsInstance((UpClass*)other, self);\n }\n } else {\n *out = false;\n }\n return UpSuccess;\n}\n\nbool UpIsTypeOf(UpObject* self, UpClass* isClass) {\n for (UpClass* cls = self->cls; cls; cls = cls->baseClass) {\n if (cls == isClass) {\n return true;\n }\n }\n return false;\n}\n\nbool UpIsClass(UpObject* self) {\n return (UpObject*)self->cls == self\n || self == (UpObject*)UP_BUILTIN(object);\n}\n\nUpClassDef* UpObjectGetClassDef(UpObject* self) {\n return self->cls->def;\n}\n\nUpClass* UpObjectGetClass(UpObject* self) {\n return self->cls;\n}\n\nchar* UpObjectGetClassName(UpObject* self) {\n return self->cls->def->name;\n}\n\nUpObject* UpObjectGet(UpObject* self, UpSymbol name) {\n UpProperty* property;\n if (!self->properties\n || !UpIntTableGet(self->properties, name, &property)\n || !property->isValid) {\n return UpUndefined();\n } else {\n return property->value;\n }\n}\n\nbool UpObjectHas(UpObject* self, UpSymbol name) {\n UpProperty* property;\n if (self->properties && UpIntTableGet(self->properties, name, &property)) {\n return true;\n } else {\n return !!_GetGetter(self, name, NULL, NULL);\n }\n}\n\nvoid UpObjectSet(UpObject* self, UpSymbol name, UpObject* value) {\n UpProperty* property = _SetProperty(self, name, value);\n property->isValid = true;\n property->isValidating = false;\n property->isOverriden = true;\n}\n\nvoid UpObjectValidate(UpObject* self, UpSymbol name, UpObject* value) {\n UpProperty* property = _SetProperty(self, name, value);\n property->isValid = true;\n property->isValidating = false;\n property->isOverriden = false;\n}\n\nUpObject* UpObjectUnset(UpObject* self, UpSymbol name) {\n if (self->properties) {\n UpProperty* property;\n if (UpIntTableGet(self->properties, name, &property)) {\n UpObject* previousValue = property->isValid ? property->value : NULL;\n UpIntTableRemove(self->properties, name);\n return previousValue ? previousValue : UpUndefined();\n }\n }\n\n return UpUndefined();\n}\n\nvoid _IterateProperties(UpStrTable* self, UpSymbol key, UpProperty* property, UpList* list) {\n if (property->isOverriden) {\n UpString* name = UpStringCreate(UpGetSymbolName(key));\n UpListAppend(list, (UpObject*)name);\n }\n}\n\nUpList* UpObjectGetProperties(UpObject* self) {\n UpList* list = UpListCreate();\n if (self->properties) {\n UpIntTableIterate(self->properties, (UpIntTableIterator)_IterateProperties, list);\n }\n return list;\n}\n\nUpStatus UpObjectGetGetter(UpObject* self, UpSymbol name, bool bindFunctions,\n UpProperty** outProperty, UpGetterDef** outGetterDef,\n UpClass** outClass) {\n UpProperty* property = NULL;\n if (self->properties && UpIntTableGet(self->properties, name, &property)) {\n if (property->isValid) {\n if (outProperty) {\n *outProperty = property;\n }\n return UpSuccess;\n }\n }\n\n if (self != (UpObject*)self->cls) {\n for (UpObject* o = (UpObject*)self->cls; o; o = (UpObject*)o->cls) {\n if (o->properties && UpIntTableGet(o->properties, name, &property)) {\n if (property->isValid) {\n if (outProperty) {\n *outProperty = property;\n }\n return UpSuccess;\n }\n }\n if (o == (UpObject*)o->cls) {\n break;\n }\n }\n }\n\n UpGetterDef* getterDef;\n UpClass* cls;\n if (_GetGetter(self, name, &getterDef, &cls)) {\n if (bindFunctions) {\n if (!property) {\n property = _GetProperty(self, name);\n }\n if (getterDef->isMember && !property->isValid) {\n UpFunction* fn = UpFunctionCreate(getterDef->funcDef, cls->closure, self);\n property->value = (UpObject*)fn;\n property->isValid = true;\n }\n }\n\n if (outProperty) {\n *outProperty = property;\n }\n if (outGetterDef) {\n *outGetterDef = getterDef;\n }\n if (outClass) {\n *outClass = cls;\n }\n return UpSuccess;\n } else {\n if (outProperty) {\n *outProperty = property;\n }\n if (outGetterDef) {\n *outGetterDef = NULL;\n }\n if (outClass) {\n *outClass = NULL;\n }\n return UpSuccess;\n }\n}\n\nUpStatus UpObjectGetSetter(UpObject* self, UpSymbol name, bool isSet, UpProperty** outProperty,\n UpClass** outClass, UpFunctionDef** outDef) {\n if (!_GetSetter(self, name, outDef, outClass)) {\n if (outDef) {\n *outDef = NULL;\n }\n if (outClass) {\n *outClass = NULL;\n }\n }\n if (outProperty) {\n *outProperty = _GetProperty(self, name);\n }\n return UpSuccess;\n}\n\nbool UpObjectEquality(UpObject* self, UpObject* other) {\n return self == other;\n}\n\nUpHash UpObjectHash(UpObject* self) {\n return (UpHash)self;\n}\n\nUpHash UpGetBuiltinHash(UpObject* object) {\n if (UpIsTypeOf(object, UP_BUILTIN(null))) {\n if (object == UpUndefined()) {\n return -1;\n } else {\n return UpObjectHash(object);\n }\n } else if (UpIsTypeOf(object, UP_BUILTIN(integer))) {\n return ((UpInteger*)object)->value;\n } else if (UpIsTypeOf(object, UP_BUILTIN(long))) {\n return ((UpLong*)object)->value;\n } else if (UpIsTypeOf(object, UP_BUILTIN(float))) {\n return ((UpFloat*)object)->value;\n } else if (UpIsTypeOf(object, UP_BUILTIN(string))) {\n return UpStringHash((UpString*)object);\n } else {\n return UpObjectHash(object);\n }\n}\n" }, { "alpha_fraction": 0.5791217684745789, "alphanum_fraction": 0.5851284265518188, "avg_line_length": 33.726619720458984, "blob_id": "094aae5d5cf996119b74d6d87416ec7e32e5ee07", "content_id": "0d2b1b5fa0d671390c3fadf158f6978db7a59987", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4828, "license_type": "permissive", "max_line_length": 100, "num_lines": 139, "path": "/make/test/run.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nimport types, sys, os.path, re, imp, time\nfrom .TestRunner import TestRunner\nfrom .TestFixture import TestFixture, RunException, TestAbortException\nfrom .utils import *\n\n# **************************************************************************************************\n\ndef runTests(module, writer, testName=None, **options):\n if testName:\n names = testName.split(\"/\")\n if len(names) == 1:\n fixtureName,functionName = names[0],None\n elif len(names) == 2:\n fixtureName,functionName = names\n else:\n return\n else:\n fixtureName = None\n functionName = None\n\n failureCount = 0\n testCount = 0\n skipCount = 0\n nyiCount = 0\n\n debugPath = None\n abortMessage = None\n criticalError = None\n buildOnly = options.get('buildOnly')\n\n # When running all tests, don't let focused tests exclude the others in their file\n options['disregardFocus'] = not testName\n options['ignoreAdHoc'] = not testName\n\n # Mute all stdout writes so that we only output the desired exe path, intended\n # for external tools and not so much human eyes. This is kind of sloppy - a\n # generalized method for machine-readable requests would be preferable in the long term.\n if buildOnly:\n stdout = sys.stdout\n sys.stdout = PipeEater()\n\n c1 = time.time()\n runners = getTestRunners(module, writer, fixtureName)\n for runner in runners:\n try:\n failures, testsRun, testsNYI, testsSkipped = runner.run(functionName, options)\n failureCount += failures\n testCount += testsRun\n nyiCount += testsNYI\n skipCount += testsSkipped\n except RunException,exc:\n # if hasattr(exc, \"fileName\"):\n # print sourceLink(exc.fileName, exc.lineNo)\n debugPath = exc.exePath\n break\n except TestAbortException,exc:\n abortMessage = exc.message\n break\n except Exception,exc:\n # if hasattr(exc, \"fileName\"):\n # print sourceLink(exc.fileName, exc.lineNo)\n criticalError = exc\n break\n\n c2 = time.time()\n\n metas = []\n if skipCount:\n metas.append(\"%d skipped\" % skipCount)\n if nyiCount:\n metas.append(\"%d NYI\" % nyiCount)\n metaMessage = \" (%s)\" % \", \".join(metas) if metas else \"\"\n if abortMessage:\n # Restore stdout so we can write the final message\n if buildOnly:\n sys.stdout = stdout\n # print abortMessage\n elif debugPath:\n # print \"\\nProgram crashed!\"\n\n # Editors can read this directive to get the path of the exe that crashed\n # print \"[[DEBUG|%s]]\" % debugPath\n return 1\n elif criticalError:\n # print \"\"\n logException(writer)\n # print \"\\nException!\"\n return 1\n elif failureCount:\n # print \"\\n%d test%s out of %d failed%s. (%.2f seconds)\" \\\n # % (failureCount, \"s\" if failureCount > 1 else \"\", testCount, metaMessage, c2-c1)\n return failureCount\n else:\n pass\n # if testCount == 1:\n # print \"\\nTest passed%s. (%.2f seconds)\" % (metaMessage, c2-c1)\n # else:\n # print \"\\nAll %d tests passed%s. (%.2f seconds)\" % (testCount, metaMessage, c2-c1)\n\n return 0\n\ndef makeTestCatalog(module):\n catalog = []\n catalog.append({'name': 'All Tests', 'path': ''})\n for runner in getTestRunners(module):\n testNames = list(runner.getTestNames())\n if testNames:\n catalog.append({'name': runner.fixtureName, 'path': runner.fixtureName})\n for testName, testType, testStatus in testNames:\n prettyTestName = testName[4:]\n prettyTestName = prettyTestName[0].lower() + prettyTestName[1:]\n catalog.append({\n 'name': \" %s\" % prettyTestName,\n 'path': '%s/%s' % (runner.fixtureName, testName)\n })\n\n return catalog\n\ndef getTestRunners(module, writer=None, fixtureName=None):\n def sortOrder(a, b):\n return 1 if a.order > b.order else (-1 if a.order < b.order else 0)\n\n runners = list(walkTestRunners(module, writer, fixtureName))\n runners.sort(sortOrder)\n return runners\n\ndef walkTestRunners(module, writer, fixtureName=None):\n \"\"\" Yields a TestRunner for each test case found within a single module. \"\"\"\n\n for attrName in dir(module):\n attrValue = getattr(module, attrName)\n\n if issubclass(type(attrValue), types.TypeType) \\\n and issubclass(attrValue, TestFixture) \\\n and attrValue.__module__ == module.__name__:\n\n if not fixtureName or attrName == fixtureName:\n runner = TestRunner(attrName, attrValue, writer)\n yield runner\n" }, { "alpha_fraction": 0.7966321110725403, "alphanum_fraction": 0.7966321110725403, "avg_line_length": 26.535715103149414, "blob_id": "0533186e52b91210025818156fbdd57fd2172d19", "content_id": "5c583c732ccd889decdbdda882c12d7a49d3cb3b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 772, "license_type": "permissive", "max_line_length": 74, "num_lines": 28, "path": "/src/vm/include/UpException.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPEXCEPTION_H\n#define UP_UPEXCEPTION_H\n\n#include \"Up/UpObject.h\"\n\nstruct UpException {\n UpObject __base;\n char* description;\n UpCallFrame* callFrame;\n UpCompileFrame* compileFrame;\n};\n\nUpException* UpExceptionCreate(const char* description);\n\nvoid UpExceptionInit(UpException* self, const char* description);\n\nchar* UpExceptionGetDescription(UpException* self);\n\nUpCallFrame* UpExceptionGetCallFrame(UpException* self);\nvoid UpExceptionSetCallFrame(UpException* self, UpCallFrame* frame);\n\nUpCompileFrame* UpExceptionGetCompileFrame(UpException* self);\nvoid UpExceptionSetCompileFrame(UpException* self, UpCompileFrame* frame);\n\nvoid UpExceptionLog(UpException* self);\nchar* UpExceptionGetStackTrace(UpException* self);\n\n#endif // UP_UPEXCEPTION_H\n" }, { "alpha_fraction": 0.43613138794898987, "alphanum_fraction": 0.43613138794898987, "avg_line_length": 26.350000381469727, "blob_id": "5bc939a8936b6b76ffe731b1764954887843784a", "content_id": "d1240f21542f5e53b8bb009493722dd261f82de0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 548, "license_type": "permissive", "max_line_length": 99, "num_lines": 20, "path": "/src/vm/UpBool.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpBool.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nstatic bool _IsTrue(UpObject* self) {\n return self == UpTrue();\n}\n\n// ************************************************************************************************\n\nUpObject* UpBoolCreate() {\n UpObject* self = UpObjectCreateWithClass(UP_BUILTIN(bool));\n return self;\n}\n" }, { "alpha_fraction": 0.8384279608726501, "alphanum_fraction": 0.8384279608726501, "avg_line_length": 56, "blob_id": "a49024be2e3c5fedf41ad949daebbb3f93cba4a1", "content_id": "e8f86e277f97a216f36fa0c9e29c20146e67d320", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 229, "license_type": "permissive", "max_line_length": 86, "num_lines": 4, "path": "/make/build/__init__.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nfrom .Project import project\nfrom .Maker import Compile, LinkExecutable, LinkDynamicLib, LinkStaticLib\nfrom .Maker import LinkPythonModule, FlexParse, BisonParse, ConfigureMake, StaticFiles\nfrom .Maker import GitVersion, Probes\n" }, { "alpha_fraction": 0.48594269156455994, "alphanum_fraction": 0.48917633295059204, "avg_line_length": 34.90967559814453, "blob_id": "dd4365a96f96b9f24bc6b6d9a2464b39ac70c0d3", "content_id": "7130d8d1ddb3400ded31e037c0c55a14abb4d8cf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11133, "license_type": "permissive", "max_line_length": 100, "num_lines": 310, "path": "/make/test/TestFileFunc.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nimport os.path, functools, re, sys, shutil\nfrom .utils import testOutputPath, testFileFunctionHiddenName, copyToClipboard\n\n# **************************************************************************************************\n\nreCommentSep = re.compile(\"^={10,}$\")\nreTestSep1 = re.compile(\"^-{10,}$\")\nreTestSep2 = re.compile(\"^(- ){10,}[-]?$\")\nreFocus = re.compile(\"^\\\\\\{10,}?$\")\nreDisabler = re.compile(\"^///{10,}?$\")\nreComment = re.compile(\"#\\s*(.*)\\s*$\")\nreArg = re.compile(\"%\\s*(.*)\\s*$\")\nreArgPair = re.compile(\"%\\s*(.+?)\\s*:\\s*(.+?)\\s*$\")\nreTag = re.compile(\"@(([^/\\s]+/)*([^/\\s]+)?)\")\n\n# **************************************************************************************************\n\nclass TestFileFunc:\n def __init__(self, fixture, filePath, methodName):\n self.fixture = fixture\n self.testFilePath = filePath\n self.methodName = methodName\n self.testContainer = True\n\n def __call__(self):\n fn = getattr(self.fixture, self.methodName)\n disregardFocus = getattr(self.fixture, \"disregardFocus\", True)\n \n outputPath = None\n\n for source,expected,args,files,lineNo in self.walkTextTests(disregardFocus):\n if \"adhoc\" in args and disregardFocus:\n runner = lambda *args, **kwds: 0\n runner.skip = True\n yield runner, self.testFilePath, lineNo\n elif \"skip\" in args:\n runner = lambda *args, **kwds: 0\n runner.skip = True\n yield runner, self.testFilePath, lineNo\n elif \"nyi\" in args:\n runner = lambda *args, **kwds: 0\n runner.nyi = True\n yield runner, self.testFilePath, lineNo\n else:\n if files:\n outputPath = testOutputPath(self.testFilePath)\n if not os.path.isdir(outputPath):\n os.makedirs(outputPath)\n\n for testPath in files:\n subPath = os.path.dirname(testPath)\n dirPath = os.path.join(outputPath, subPath)\n fileName = os.path.basename(testPath)\n filePath = os.path.join(dirPath, fileName)\n if not os.path.isdir(dirPath):\n os.makedirs(dirPath)\n \n f = file(filePath, \"w\")\n f.write(files[testPath])\n f.close()\n \n printFn, copyOutput, testFn = self.extractArgs(args, \"print\", \"copy\", \"test\")\n printOutput = printFn and not callable(printFn) \n if testFn:\n runner = functools.partial(self.runTest, testFn, source, expected, args,\n copyOutput, printOutput, printFn)\n else:\n runner = functools.partial(self.runTest, fn, source, expected, args,\n copyOutput, printOutput, printFn)\n\n sys.path.append(os.path.dirname(self.testFilePath))\n if outputPath:\n sys.path.append(outputPath)\n\n if not os.path.isdir(outputPath):\n os.makedirs(outputPath)\n\n wd = os.getcwd()\n os.chdir(outputPath)\n\n yield runner, self.testFilePath, lineNo\n\n sys.path.remove(os.path.dirname(self.testFilePath))\n if outputPath:\n sys.path.remove(outputPath)\n os.chdir(wd)\n\n if \"keepfiles\" not in args:\n if os.path.isdir(outputPath):\n shutil.rmtree(outputPath)\n\n outputPath = None\n\n def walkTextGroups(self, disregardFocus=False):\n lines = [line for line in file(self.testFilePath)]\n return walkTextBlocks(lines, self.testFilePath, disregardFocus)\n \n def walkTextTests(self, disregardFocus=False):\n groups, hasFocus = self.walkTextGroups(disregardFocus)\n if hasFocus:\n for group in groups:\n for test in group.tests:\n if test.isFocused:\n yield test.source, test.expected, test.args, test.files, test.lineNo\n else:\n for group in groups:\n for test in group.tests:\n yield test.source, test.expected, test.args, test.files, test.lineNo\n\n def extractArgs(self, args, *names):\n for name in names:\n if not name in args:\n yield None\n else:\n value = args[name]\n del args[name]\n\n if not isinstance(value, str):\n yield True\n else:\n hiddenName = testFileFunctionHiddenName(value)\n if hasattr(self.fixture, hiddenName):\n fn = getattr(self.fixture, hiddenName)\n yield fn\n else:\n self.fixture.warn(\"Test '%s' is not defined\" % value)\n yield None\n\n def runTest(self, fn, source, expected, args, copyOutput, printOutput, printFn):\n from .TestFixture import TestException, RunException, TestAbortException\n\n try:\n if callable(printFn):\n print printFn(source, expected, **args)\n\n actual = fn(source, expected, **args)\n if copyOutput:\n copyToClipboard(actual)\n if printOutput:\n print actual\n if actual != None:\n self.fixture.assertEqualString(actual, expected)\n except TestAbortException, exc:\n raise\n except TestException, exc:\n raise\n except RunException, exc:\n raise\n except:\n self.fixture.assertException(expected)\n\n# **************************************************************************************************\n\nclass TestTag(object):\n def __init__(self, names):\n self.names = names\n \n def isContained(self, other):\n pass\n \nclass TestGroup(object):\n def __init__(self):\n self.comment = ''\n self.tags = []\n self.tests = []\n \nclass TestBlock(object):\n def __init__(self):\n self.source = ''\n self.expected = ''\n self.comment = ''\n self.args = {}\n self.files = {}\n self.lineNo = 0\n self.isFocused = False\n\n# **************************************************************************************************\n\nExpectRoot = 1\nExpectEndComment = 2\nExpectSource = 3\nExpectExpected = 4\nExpectFile = 5\n\nclass ParseState(object):\n def __init__(self):\n self.expect = ExpectRoot\n \n self.isDisabled = False\n self.hasFocus = False\n self.currentGroup = None\n self.currentTest = None\n self.currentFile = None\n self.currentFilePath = None\n self.groups = []\n self.lineNo = 0\n\ndef walkTextBlocks(lines, testFilePath, disregardFocus=False):\n state = ParseState()\n \n def consumeArg(line):\n m = reArgPair.match(line)\n if m:\n argName = m.groups()[0]\n argValue = m.groups()[1] \n \n if argName == \"file\":\n state.expect = ExpectFile\n state.currentFile = ''\n state.currentFilePath = argValue\n else:\n state.currentTest.args[argName] = argValue\n return True\n else:\n m = reArg.match(line)\n if m:\n instruction = m.groups()[0]\n state.currentTest.args[instruction] = True \n return True\n \n while lines:\n line = lines[0]\n del lines[0]\n \n state.lineNo += 1\n\n if state.expect == ExpectRoot:\n if reCommentSep.match(line):\n state.currentGroup = TestGroup()\n \n state.expect = ExpectEndComment\n\n elif reTestSep1.match(line):\n state.currentTest = TestBlock()\n state.currentTest.lineNo = state.lineNo\n\n state.expect = ExpectSource\n\n elif state.expect == ExpectEndComment:\n if reCommentSep.match(line):\n state.groups.append(state.currentGroup)\n\n state.expect = ExpectRoot\n else:\n m = reTag.findall(line)\n if m:\n state.currentGroup.tags = list([mm[0] for mm in m])\n else:\n state.currentGroup.comment += line\n \n elif state.expect == ExpectSource:\n if consumeArg(line):\n pass\n elif reComment.match(line):\n state.currentTest.comment += line\n elif reFocus.match(line):\n state.currentTest.isFocused = True\n state.hasFocus = True\n elif reDisabler.match(line):\n state.isDisabled = True\n elif reTestSep2.match(line): \n state.currentTest.lineNo = state.lineNo\n state.expect = ExpectExpected\n else:\n state.currentTest.source += line\n\n elif state.expect == ExpectExpected:\n if consumeArg(line):\n pass \n elif reTestSep2.match(line): \n state.currentTest.lineNo = state.lineNo\n state.expect = ExpectExpected\n elif reTestSep1.match(line) or reCommentSep.match(line):\n if not state.isDisabled:\n if not state.currentGroup:\n state.currentGroup = TestGroup()\n state.groups.append(state.currentGroup)\n state.currentGroup.tests.append(state.currentTest)\n \n state.currentTest.source = state.currentTest.source.strip('\\n')\n state.currentTest.expected = state.currentTest.expected.strip('\\n')\n state.currentTest = None\n state.expect = ExpectRoot\n \n lines.insert(0, line)\n state.lineNo -= 1\n else:\n state.currentTest.expected += line\n\n elif state.expect == ExpectFile:\n if reTestSep2.match(line): \n state.currentTest.files[state.currentFilePath] = state.currentFile \n state.currentTest.lineNo = state.lineNo\n state.currentFile = None\n \n state.expect = ExpectExpected\n else:\n state.currentFile += line\n \n return state.groups, state.hasFocus\n\ndef coerceArgs(args):\n for name,value in args.iteritems():\n try:\n args[name] = int(value)\n except:\n if value.lower() == \"false\":\n args[name] = False\n elif value.lower() == \"true\":\n args[name] = True\n" }, { "alpha_fraction": 0.7102803587913513, "alphanum_fraction": 0.7102803587913513, "avg_line_length": 16.83333396911621, "blob_id": "df5ed7f5ae72da8ee083cb526c41e1ce0732f080", "content_id": "3817f091dc1d09cbed8885757645faf963f46508", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 214, "license_type": "permissive", "max_line_length": 25, "num_lines": 12, "path": "/src/vm/UpInternal.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPINTERNAL_H\n#define UP_UPINTERNAL_H\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n#include <stdbool.h>\n#include <stdarg.h>\n#include <unistd.h>\n#include <math.h>\n\n#endif // UP_UPINTERNAL_H\n" }, { "alpha_fraction": 0.4841395616531372, "alphanum_fraction": 0.4885011911392212, "avg_line_length": 27.17318344116211, "blob_id": "3b3d95a14be48e3ac920f88018bd305cadc7bf00", "content_id": "2dac577407bdf6cf866ec45b241e8049c4c8a240", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5044, "license_type": "permissive", "max_line_length": 100, "num_lines": 179, "path": "/make/util.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nimport os.path, subprocess, sys\n\n# **************************************************************************************************\n\nprojects = []\nprojectPaths = {}\nexports = {}\n\nrootProjectPath = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"../..\"))\n\n# **************************************************************************************************\n\ndef sourceLink(path, line=-1, col=-1):\n if line >= 0:\n if col >= 0:\n return \"[[%s:%s:%s]]\" % (path, line, col)\n else:\n return \"[[%s:%s]]\" % (path, line)\n else:\n return \"[[%s]]\" % (path,)\n\ndef getSystemAbbreviation():\n import sys\n if sys.platform == \"win32\":\n return \"win\"\n if sys.platform == \"darwin\":\n return \"mac\"\n return \"unix\"\n\ndef importLocalModule(modName, modPath):\n sys.path.append(modPath)\n module = __import__(modName)\n sys.path.remove(modPath)\n return module\n\n# **************************************************************************************************\n\ndef formatTable(columns, divider='=', separator='|', margin=' ', padding=5):\n rowFormats = []\n headerRow = []\n rows = []\n totalWidth = 0\n pad = ' ' * padding\n\n maxHeight = 0\n processedColumns = []\n for header,data in columns:\n lines = data.split(\"\\n\")\n count = len(lines)\n \n processedColumns.append((header, lines, count))\n if count > maxHeight:\n maxHeight = count \n\n format = []\n for header,lines,count in processedColumns:\n maxWidth = len(header) if header else 0\n\n lr = len(rows)\n ll = len(lines)+1\n if lr < ll:\n for i in xrange(lr, ll-1):\n rows.append([])\n\n headerRow.append(header)\n \n i = 0\n for line in lines:\n maxWidth = max(len(line), maxWidth)\n row = rows[i]\n row.append(line)\n i += 1\n\n for i in xrange(count, maxHeight):\n row = rows[i]\n row.append('')\n\n totalWidth += maxWidth\n\n if rowFormats:\n rowFormat = \"\".join((\"%-\", str(maxWidth), \"s\"))\n totalWidth += padding*2 + len(separator)\n else:\n rowFormat = \"\".join((\"%-\", str(maxWidth), \"s\"))\n rowFormats.append(rowFormat)\n\n totalWidth -= padding + len(separator)\n totalWidth += len(margin)*2\n\n s = \"\".join((pad, separator, pad))\n rowFormat = margin + s.join(rowFormats) + margin\n\n # Filling missing columns \n formattedRows = [rowFormat % tuple(row) for row in rows]\n\n if headerRow:\n head = [rowFormat % tuple(headerRow)]\n else:\n head = []\n if divider:\n div = [divider * (totalWidth + padding + len(separator))]\n else:\n div = []\n\n lines = div + head + div + formattedRows + div\n return \"\\n\".join(lines)\n\n# **************************************************************************************************\n\ndef sublaunch(command, cwd=None, env=None, echo=False):\n import subprocess, pty, traceback, fcntl, select, sys\n\n master, slave = pty.openpty()\n process = subprocess.Popen(command, bufsize=1, shell=True, stdout=slave, stderr=slave,\n cwd=cwd, env=env)\n \n # Make the pty non-blocking, or we will hang sometimes reading the last bits of output\n fl = fcntl.fcntl(master, fcntl.F_GETFL)\n fcntl.fcntl(master, fcntl.F_SETFL, fl | os.O_NDELAY)\n\n stdout = os.fdopen(master)\n\n reads = []\n error = 0\n while True:\n try:\n r, w, e = select.select([stdout], [], [], 0.1)\n if r:\n text = stdout.read()\n reads.append(text)\n if echo:\n sys.stdout.write(text)\n\n error = process.poll()\n if error != None:\n break\n \n except SystemExit,exc:\n process.send_signal(exc.code)\n raise\n except KeyboardInterrupt:\n process.terminate()\n import sys\n from signal import SIGTERM\n sys.exit(SIGTERM)\n except:\n traceback.print_exc()\n break\n\n stdout.close()\n os.close(slave)\n\n output = \"\".join(reads)\n return output, error\n\ndef subread(command, cwd=None, env=None, echo=False):\n import subprocess, sys\n\n process = subprocess.Popen(command, bufsize=1, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, cwd=cwd, env=env)\n\n reads = []\n while process.returncode == None:\n text = process.stdout.read()\n reads.append(text)\n if echo:\n sys.stdout.write(text)\n process.poll()\n \n error = process.returncode\n output = \"\".join(reads)\n return output, error\n\n# **************************************************************************************************\n\nclass BuildError(Exception):\n def __init__(self, code=0, description=None):\n self.code = code\n self.description = description\n" }, { "alpha_fraction": 0.5079689621925354, "alphanum_fraction": 0.5098079442977905, "avg_line_length": 25.165775299072266, "blob_id": "dc3fedc3a12a89498788ecb740ea737df3240c4c", "content_id": "83e584681b740bbb2cb186f5527a7d60c1dd825e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4894, "license_type": "permissive", "max_line_length": 88, "num_lines": 187, "path": "/make/Message.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nclass Message(object):\n def __init__(self):\n pass\n\n def __str__(self):\n pass\n\n def getJSON(self):\n pass\n\n def affect(self, stream):\n pass\n\nclass error(Message):\n def __init__(self, text, source=None, line=0, col=0):\n self.kind = 'error'\n self.text = text\n self.source = source\n self.line = line\n self.col = col\n\n def __str__(self):\n if self.source:\n if self.line:\n return '* %s: %s (%s:%s)' \\\n % (self.kind.upper(), self.text, self.source, self.line)\n else:\n return '* %s: %s (%s)' \\\n % (self.kind.upper(), self.text, self.source)\n else:\n return '* %s: %s' % (self.kind.upper(), self.text)\n\n def getJSON(self):\n packet = {\"type\": self.kind, \"description\": self.text}\n if self.source:\n packet['path'] = self.source\n packet['line'] = self.line\n packet['col'] = self.col\n return packet\n\n def affect(self, stream):\n stream.errorCount += 1\n\nclass warning(error):\n def __init__(self, *args):\n super(warning, self).__init__(*args)\n self.kind = 'warning'\n\n def affect(self, stream):\n stream.warningCount += 1\n\nclass opener(Message):\n def __init__(self, text, source=None, line=0):\n self.text = text\n self.source = source\n self.line = 0\n\n def __str__(self):\n if self.source:\n return '> %s [[%s:%s]]' % (self.text, self.source, self.line)\n else:\n return '> %s' % (self.text)\n\n def getJSON(self):\n packet = {\"type\": \"opener\", \"description\": self.text}\n if self.source:\n packet['path'] = self.source\n packet['line'] = self.line\n return packet\n\nclass closer(Message):\n def __str__(self):\n return ''\n\n def getJSON(self):\n return {\"type\": \"closer\"}\n\nclass command(Message):\n def __init__(self, text, source=None, line=0, col=0):\n self.text = text\n self.source = source\n self.line = line\n self.col = col\n\n def __str__(self):\n if self.source:\n if self.line:\n return '* %s (%s:%s)' \\\n % (self.text, self.source, self.line)\n else:\n return '* %s (%s)' \\\n % (self.text, self.source)\n else:\n return '* %' % (self.text)\n\n def getJSON(self):\n packet = {\"type\": \"command\", \"description\": self.text}\n if self.source:\n packet['path'] = self.source\n packet['line'] = self.line\n return packet\n\n def affect(self, stream):\n stream.commandCount += 1\n\nclass summary(Message):\n def __init__(self, text):\n self.text = text\n\n def __str__(self):\n return self.text\n\n def getJSON(self):\n return {\"type\": \"summary\", \"description\": self.text}\n\n#####################################################################################\n\nclass testBegin(Message):\n def __init__(self, result, testName, sourcePath):\n self.result = result\n self.testName = testName\n self.sourcePath = sourcePath\n\n def __str__(self):\n return '* Begin %s' % self.testName\n\n def getJSON(self):\n return {\"type\": \"test-begin\", \"path\": self.sourcePath, \"test\": self.testName}\n\nclass testComplete(Message):\n def __init__(self):\n pass\n\n def __str__(self):\n return ''\n\n def getJSON(self):\n return {\"type\": \"test-complete\"}\n\nclass testPassed(Message):\n def __str__(self):\n return 'PASS'\n\n def getJSON(self):\n return {\"type\": \"test-passed\"}\n\nclass testNYI(Message):\n def __str__(self):\n return 'PASS'\n\n def getJSON(self):\n return {\"type\": \"test-nyi\"}\n\nclass testFailure(Message):\n def __init__(self, reason, sourcePath, line, expected=None, actual=None,\n args=None, source=None):\n self.reason = reason\n self.sourcePath = sourcePath\n self.line = line\n self.expected = expected\n self.actual = actual\n self.args = args\n self.source = source\n\n def __str__(self):\n return self.result\n\n def getJSON(self):\n packet = {\"type\": \"test-failed\", \"reason\": self.reason, \"path\": self.sourcePath,\n \"line\": self.line}\n if self.expected:\n packet['expected'] = self.expected\n if self.actual:\n packet['actual'] = self.actual\n if self.args:\n packet['args'] = self.args\n if self.source:\n packet['source'] = self.source\n return packet\n\n\nclass testMetadata(Message):\n def __init__(self, metadata):\n self.metadata = metadata\n\n def getJSON(self):\n return {\"type\": \"test-metadata\", \"metadata\": self.metadata}\n" }, { "alpha_fraction": 0.7741312980651855, "alphanum_fraction": 0.7779922485351562, "avg_line_length": 20.58333396911621, "blob_id": "75864c3256aea123e447220d39a38644351ca09a", "content_id": "4fbff077887b4cf3f354f6c99c43e41c1d1693f3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 518, "license_type": "permissive", "max_line_length": 53, "num_lines": 24, "path": "/src/vm/include/UpDebug.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPDEBUG_H\n#define UP_UPDEBUG_H\n\n#include \"Up/UpGlobal.h\"\n\nuint64_t UpGetProbeTime();\n\nchar* UpSyntaxToXML(UpSyntax* node);\n\nchar* UpBytecodeToString(UpFunctionDef* functionDef);\n\nchar* UpObjectToString(UpObject* object);\n\nchar* UpFunctionDefToString(UpFunctionDef* funcDef);\nchar* UpClassDefToString(UpClassDef* classDef);\nchar* UpObjectClassDefsToString(UpObject* object);\n\nchar* UpStrTableToString(UpStrTable* self);\n\nvoid UpMapMappings();\n\n#define CRASH ((UpSyntax*)NULL)->line);\n\n#endif // UP_UPDEBUG_H\n" }, { "alpha_fraction": 0.5023099780082703, "alphanum_fraction": 0.5033179521560669, "avg_line_length": 30.244094848632812, "blob_id": "44e5abcdde6187576028b654d276fbb5f37a61d1", "content_id": "e2c02ff33ee20fb9cf7107e451a0684319806bd1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 11905, "license_type": "permissive", "max_line_length": 99, "num_lines": 381, "path": "/src/vm/UpCFunction.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpCFunction.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpLong.h\"\n#include \"Up/UpFloat.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpNull.h\"\n#include \"Up/UpCPointer.h\"\n#include \"UpCLibrary.h\"\n#include \"Up/UpArena.h\"\n#include \"libffi/ffi.h\"\n\n// ************************************************************************************************\n\nstruct UpCFunctionDef {\n char* name;\n UpCLibrary* library;\n UpCType* returnType;\n int argumentCount;\n UpCType* argumentTypes;\n void** argumentValues;\n void** argumentPValues;\n ffi_cif cif;\n void* func;\n bool isInitialized:1;\n};\n\n// ************************************************************************************************\n\nstatic UpStatus _UpObjectToCValue(UpObject* value, UpCType* ctype, void** out) {\n if (ctype->pointers == 0) {\n switch (ctype->primitive) {\n case UpCVoidType: {\n *out = 0;\n return UpSuccess;\n }\n case UpCCharType: {\n if (UpIsTypeOf(value, UP_BUILTIN(string))) {\n UpString* str = (UpString*)value;\n memcpy(out, str->value, sizeof(char));\n return UpSuccess;\n }\n }\n case UpCBoolType: \n case UpCUCharType:\n case UpCShortType:\n case UpCUShortType:\n case UpCIntType:\n case UpCUIntType:\n case UpCLongType:\n case UpCULongType: {\n int num;\n if (!UpAsInt(value, &num)) {\n UpSetError(\"Can't convert object to integer\");\n return UpFailure;\n }\n memcpy(out, &num, sizeof(int));\n return UpSuccess;\n }\n case UpCLongLongType:\n case UpCULongLongType: {\n long long num;\n if (!UpAsLong(value, &num)) {\n UpSetError(\"Can't convert object to long integer\");\n return UpFailure;\n }\n memcpy(out, &num, sizeof(long long));\n return UpSuccess;\n }\n case UpCFloatType:\n case UpCDoubleType:\n case UpCLongDoubleType: {\n double num;\n if (!UpAsFloat(value, &num)) {\n UpSetError(\"Can't convert object to float\");\n return UpFailure;\n }\n memcpy(out, &num, sizeof(double));\n return UpSuccess;\n }\n default: {\n UpSetError(\"Unable to convert argument\");\n return UpFailure;\n }\n }\n } else if (ctype->pointers == 1) {\n if (ctype->primitive == UpCUpObjectType) {\n *out = (UpObject*)value;\n return UpSuccess;\n } else if (ctype->primitive == UpCCharType) {\n if (UpIsTypeOf(value, UP_BUILTIN(string))) {\n UpString* str = (UpString*)value;\n memcpy(out, &(str->value), sizeof(char*));\n return UpSuccess;\n } else if (UpIsTypeOf(value, UP_BUILTIN(integer))) {\n const char* str = UpIntegerToCString((UpInteger*)value, 10);\n memcpy(out, &str, sizeof(char*));\n return UpSuccess;\n } else if (UpIsTypeOf(value, UP_BUILTIN(long))) {\n const char* str = UpLongToCString((UpLong*)value, 10);\n memcpy(out, &str, sizeof(char*));\n return UpSuccess;\n } else if (UpIsTypeOf(value, UP_BUILTIN(float))) {\n const char* str = UpFloatToCString((UpFloat*)value);\n memcpy(out, &str, sizeof(char*));\n return UpSuccess;\n } else if (UpIsTypeOf(value, UP_BUILTIN(null))) {\n *out = NULL;\n return UpSuccess;\n } else {\n UpSetError(\"Expected string argument\");\n return UpFailure; \n }\n } else if (ctype->primitive == UpCUpContextType) {\n *out = UPCTX;\n return UpSuccess;\n } else {\n if (UpIsTypeOf(value, UP_BUILTIN(cpointer))) {\n UpCPointer* v = (UpCPointer*)value;\n *out = v->ptr;\n return UpSuccess;\n } else if (UpIsTypeOf(value, UP_BUILTIN(null))) {\n *out = NULL;\n return UpSuccess;\n } else {\n UpSetError(\"Unable to convert object to C data type\");\n return UpFailure; \n }\n }\n } else {\n UpSetError(\"C array conversion NYI\");\n return UpFailure; \n }\n}\n\nstatic UpStatus _CValueToUpObject(ffi_arg* value, UpCType* ctype, UpObject** out) {\n if (ctype->pointers == 0) {\n switch (ctype->primitive) {\n case UpCVoidType: {\n *out = UpUndefined();\n return UpSuccess;\n }\n case UpCStructType: {\n UpSetError(\"C struct conversion NYI\");\n return UpFailure; \n }\n case UpCBoolType: {\n bool* v = (bool*)value;\n *out = (UpObject*)UpTrueOrFalse(*v);\n return UpSuccess;\n }\n case UpCCharType: \n case UpCUCharType:\n case UpCShortType:\n case UpCUShortType:\n case UpCIntType:\n case UpCUIntType:\n case UpCLongType:\n case UpCULongType: {\n int* v = (int*)value;\n *out = (UpObject*)UpIntegerCreate(*v);\n return UpSuccess;\n }\n case UpCLongLongType:\n case UpCULongLongType: {\n long long* v = (long long*)value;\n *out = (UpObject*)UpLongCreate(*v);\n return UpSuccess;\n }\n case UpCFloatType:\n case UpCDoubleType:\n case UpCLongDoubleType: {\n double* v = (double*)value;\n *out = (UpObject*)UpFloatCreate(*v);\n return UpSuccess;\n }\n default: {\n UpSetError(\"Unsupported data type\");\n return UpFailure; \n }\n }\n } else if (ctype->pointers == 1) {\n if (ctype->primitive == UpCUpObjectType) {\n UpObject** v = (UpObject**)value;\n *out = *v;\n return UpSuccess;\n } else if (ctype->primitive == UpCCharType) {\n char** v = (char**)value;\n *out = (UpObject*)UpStringCreate(*v);\n return UpSuccess;\n } else {\n void** v = (void**)value;\n *out = (UpObject*)UpCPointerCreate(*v);\n return UpSuccess;\n }\n } else {\n UpSetError(\"C array conversion NYI\");\n return UpFailure; \n }\n}\n\nstatic ffi_type* _CTypeToFFIType(UpCType* ctype) {\n if (ctype->pointers) {\n return &ffi_type_pointer;\n }\n\n switch (ctype->primitive) {\n case UpCVoidType: {\n return &ffi_type_void;\n }\n case UpCUpObjectType:\n case UpCStructType: {\n return &ffi_type_pointer;\n }\n case UpCBoolType: {\n return &ffi_type_ushort;\n }\n case UpCCharType: {\n return &ffi_type_schar;\n }\n case UpCUCharType: {\n return &ffi_type_uchar;\n }\n case UpCShortType: {\n return &ffi_type_sshort;\n }\n case UpCUShortType: {\n return &ffi_type_ushort;\n }\n case UpCIntType: {\n return &ffi_type_sint;\n }\n case UpCUIntType: {\n return &ffi_type_uint;\n }\n case UpCLongType:\n case UpCLongLongType: {\n return &ffi_type_slong;\n }\n case UpCULongType:\n case UpCULongLongType: {\n return &ffi_type_ulong;\n }\n case UpCFloatType: {\n return &ffi_type_float;\n }\n case UpCDoubleType: {\n return &ffi_type_double;\n }\n case UpCLongDoubleType: {\n return &ffi_type_longdouble;\n }\n default:\n return &ffi_type_void;\n }\n}\n\nvoid _Bind(UpCFunctionDef* def) {\n if (!def->func) {\n def->func = UpCLibraryLoad(def->library, def->name); \n }\n}\n\nstatic UpStatus _InitCFunctionDef(UpCFunctionDef* def) {\n def->isInitialized = true;\n _Bind(def);\n\n int argCount = def->argumentCount;\n void** ffiArgs = def->argumentValues = UpArenaNewArray(UpGetHeap(), void, argCount);\n void** ffiPArgs = def->argumentPValues = UpArenaNewArray(UpGetHeap(), void, argCount);\n\n ffi_type* retType = _CTypeToFFIType(def->returnType);\n ffi_type** argTypes = UpArenaNewArray(UpGetHeap(), ffi_type, argCount);\n\n int i = 0;\n for (UpCType* arg = def->argumentTypes; arg; arg = arg->next) {\n argTypes[i] = _CTypeToFFIType(arg);\n ffiPArgs[i] = ffiArgs+i;\n ++i;\n }\n\n if (ffi_prep_cif(&def->cif, FFI_DEFAULT_ABI, argCount, retType, argTypes) != FFI_OK) {\n UpSetError(\"Error initializing FFI types\");\n return UpFailure;\n }\n\n return UpSuccess;\n}\n\n// ************************************************************************************************\n\nUpCFunction* UpCFunctionCreate(UpCFunctionDef* def) {\n UpCFunction* self = (UpCFunction*)UpObjectCreateWithClass(UP_BUILTIN(cfunction));\n self->def = def;\n return self;\n}\n\nUpCType* UpCTypeCreate(UpCPrimitive primitive, const char* name,\n int pointers) {\n UpCType* type = (UpCType*)UpArenaNew(UpGetHeap(), UpCType);\n type->primitive = primitive;\n type->name = UpArenaCopyString(UpGetHeap(), name);\n type->pointers = pointers;\n return type;\n}\n\nUpStatus UpCFunctionDefCreate(const char* name, const char* library,\n UpCType* returnType, int argumentCount, UpCType* argumentTypes,\n UpCFunctionDef** out) {\n UpCFunctionDef* self = UpArenaNew(UpGetHeap(), UpCFunctionDef);\n self->library = UpGetCLibrary(library);\n self->name = UpArenaCopyString(UpGetHeap(), name);\n self->argumentCount = argumentCount;\n self->argumentTypes = argumentTypes;\n self->returnType = returnType;\n \n *out = self;\n return UpSuccess;\n}\n\nUpStatus UpCFunctionCall(UpCFunction* self, UpObject** args, UpObject** result) {\n UpCFunctionDef* def = self->def;\n\n if (!def->isInitialized) {\n if (!_InitCFunctionDef(def)) {\n return UpFailure;\n }\n }\n\n if (!def->func) {\n UpSetError(\"C function '%s' not found\", def->name);\n return UpFailure; \n }\n\n int i = 0;\n void** ffiArgs = def->argumentValues;\n for (UpCType* arg = def->argumentTypes; arg; arg = arg->next) {\n if (!_UpObjectToCValue(args[i], arg, ffiArgs+i)) {\n return UpFailure;\n }\n ++i;\n }\n\n ffi_arg ffiResult;\n ffi_call(&def->cif, FFI_FN(def->func), &ffiResult, def->argumentPValues);\n if (UpGetError()) {\n return UpFailure;\n }\n \n if (!_CValueToUpObject(&ffiResult, def->returnType, result)) {\n return UpFailure;\n }\n\n return UpSuccess;\n}\n\nchar* UpCFunctionGetName(UpCFunction* self) {\n return self->def->name;\n}\n\nbool UpCFunctionIsBound(UpCFunction* self) {\n if (self->def->func) {\n return true;\n } else {\n _Bind(self->def);\n return !!self->def->func;\n }\n}\n\n// ************************************************************************************************\n\nconst char* UpCFunctionDefGetName(UpCFunctionDef* self) {\n return self->name;\n}\n\nint UpCFunctionDefGetArgumentCount(UpCFunctionDef* self) {\n return self->argumentCount;\n}\n" }, { "alpha_fraction": 0.5641371011734009, "alphanum_fraction": 0.5723159909248352, "avg_line_length": 26.434659957885742, "blob_id": "0e3034b0b2885cc4b1ba2a1691e8c133e23d6aff", "content_id": "a977517d0de8ba6cc458401c3f106c3a980d2433", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 9659, "license_type": "permissive", "max_line_length": 100, "num_lines": 352, "path": "/src/vm/UpString.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpLong.h\"\n#include \"Up/UpFloat.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpArena.h\"\n#include \"lookup3.h\"\n\n// ************************************************************************************************\n\nstatic bool _EndsWith(const char* self, const char* suffix) {\n if (!self || !suffix) {\n return false;\n }\n\n size_t len = strlen(self);\n size_t lensuffix = strlen(suffix);\n if (lensuffix > len) {\n return false;\n }\n return strncmp(self + len - lensuffix, suffix, lensuffix) == 0;\n}\n\nstatic int _CountOccurances(const char* start, const char* find, int findLen) {\n int count = 0;\n while (1) {\n const char* c = strstr(start, find);\n if (c) {\n start = c+findLen;\n ++count;\n } else {\n break;\n }\n }\n return count;\n}\n\nstatic char* _ReverseStringFind(char* s1, char* s2, size_t len1, size_t len2) {\n size_t s1len = len1 == -1 ? strlen(s1) : len1;\n size_t s2len = len2 == -1 ? strlen(s2) : len2;\n\n if (s2len > s1len) {\n return NULL;\n }\n\n for (char* s = s1 + s1len - s2len; s >= s1; --s) {\n if (strncmp(s, s2, s2len) == 0) {\n return s;\n }\n }\n return NULL;\n}\n\n// ************************************************************************************************\n\nUpString* UpStringCreate(const char* value) {\n UpClass* cls = UpGetBuiltinClasses()->stringClass;\n UpString* self = (UpString*)(cls\n ? UpClassInstantiate(cls)\n : UpObjectCreateWithClass(UP_BUILTIN(string)));\n self->value = UpArenaCopyString(UpGetHeap(), value ? value : \"\");\n self->length = -1;\n COUNT_STRING(self->value);\n return self;\n}\n\nUpString* UpStringCreateWithLength(const char* value, int length) {\n UpClass* cls = UpGetBuiltinClasses()->stringClass;\n UpString* self = (UpString*)(cls\n ? UpClassInstantiate(cls)\n : UpObjectCreateWithClass(UP_BUILTIN(string)));\n self->value = UpArenaCopyStringN(UpGetHeap(), value ? value : \"\", length);\n self->length = length;\n COUNT_STRING(self->value);\n return self;\n}\n\nUpString* UpStringCreateWithChar(const char value) {\n UpClass* cls = UpGetBuiltinClasses()->stringClass;\n UpString* self = (UpString*)(cls\n ? UpClassInstantiate(cls)\n : UpObjectCreateWithClass(UP_BUILTIN(string)));\n self->value = UpArenaNewString(UpGetHeap(), 1);\n self->value[0] = value;\n self->value[1] = 0;\n self->length = 1;\n COUNT_STRING(self->value);\n return self;\n}\n\nUpString* UpStringFormat(const char* value, ...) {\n va_list args;\n\n va_start(args, value);\n int count = vsnprintf(NULL, 0, value, args);\n va_end(args);\n\n char* buf = UpArenaNewString(UpGetHeap(), count);\n\n va_start(args, value);\n vsnprintf(buf, count+1, value, args);\n va_end(args);\n\n return UpStringCreate(buf);\n}\n\nUpString* UpStringCreateWithConcatenation(const char* left, int len1, const char* right, int len2) {\n char* buf = UpArenaNewString(UpGetHeap(), len1+len2);\n memcpy(buf, left, sizeof(char)*len1);\n memcpy(buf + len1, right, sizeof(char)*len2);\n buf[len1+len2] = 0;\n\n UpClass* cls = UpGetBuiltinClasses()->stringClass;\n UpString* self = (UpString*)(cls\n ? UpClassInstantiate(cls)\n : UpObjectCreateWithClass(UP_BUILTIN(string)));\n self->value = buf;\n self->length = len1+len2;\n COUNT_STRING(self->value);\n return self;\n}\n\nUpString* UpStringCreateWithCode(char code) {\n return UpStringCreateWithChar(code);\n}\n\nvoid UpStringInit(UpString* self, const char* value) {\n self->value = UpArenaCopyString(UpGetHeap(), value);\n self->length = -1;\n COUNT_STRING(self->value);\n}\n\nUpObject* UpStringToNumber(UpString* self, bool validate) {\n if (strchr(self->value, '.')) {\n double num = atof(self->value);\n return (UpObject*)UpFloatCreate(num);\n } else if (_EndsWith(self->value, \"L\")) {\n long long num = atoll(self->value);\n return (UpObject*)UpLongCreate(num);\n } else {\n if (validate) {\n // XXXjoe Fail if string contains no numeric digits\n }\n\n int num = atoi(self->value);\n return (UpObject*)UpIntegerCreate(num);\n }\n}\n\nUpString* UpStringToLowercase(UpString* self) {\n UpString* copy = UpStringCreate(self->value);\n\n int length = UpStringGetLength(copy);\n int diff = 'A' - 'a';\n char* str = (char*)copy->value;\n\n int i;\n for (i = 0; i < length; ++i) {\n char c = str[i];\n if (c >= 'A' && c <= 'Z') {\n str[i] = c - diff;\n }\n }\n\n return copy;\n}\n\nUpString* UpStringToUppercase(UpString* self) {\n UpString* copy = UpStringCreate(self->value);\n\n int length = UpStringGetLength(copy);\n int diff = 'a' - 'A';\n char* str = (char*)copy->value;\n\n int i;\n for (i = 0; i < length; ++i) {\n char c = str[i];\n if (c >= 'a' && c <= 'z') {\n str[i] = c - diff;\n }\n }\n\n return copy;\n}\n\nunsigned char UpStringCodeAt(UpString* self, int index) {\n if (index < UpStringGetLength(self)) {\n return self->value[index];\n } else {\n UpSetError(\"String index of range\");\n return -1;\n }\n}\n\nint UpStringGetLength(UpString* self) {\n if (self->length == -1) {\n return self->length = strlen(self->value);\n } else {\n return self->length;\n }\n}\n\nUpHash UpStringHash(UpString* self) {\n if (self->hash) {\n return self->hash;\n } else {\n return self->hash = hashlittle((const void*)self->value, UpStringGetLength(self), 1);\n }\n}\n\nbool UpStringIsEmpty(UpString* self) {\n return self->length == -1 ? (!self->value || !*self->value) : self->length == 0;\n}\n\nUpObject* UpStringConcat(UpString* self, UpObject* other) {\n if (!UpIsTypeOf(other, UP_BUILTIN(string))) {\n UpSetError(\"Illegal concatenation on non-string\");\n return NULL;\n }\n\n UpString* otherStr = (UpString*)other;\n if (UpStringIsEmpty(otherStr)) {\n return (UpObject*)self;\n } else if (UpStringIsEmpty(self)) {\n return (UpObject*)otherStr;\n } else {\n int l1 = UpStringGetLength(self);\n int l2 = UpStringGetLength(otherStr);\n return (UpObject*)UpStringCreateWithConcatenation(self->value, l1, otherStr->value, l2);\n }\n}\n\nbool UpStringEquals(UpString* self, UpObject* other) {\n if (UpIsTypeOf(other, UP_BUILTIN(string))) {\n UpString* otherString = (UpString*)other;\n return !strcmp(self->value, otherString->value);\n } else {\n return false;\n }\n}\n\nUpString* UpStringIndex(UpString* self, int index) {\n int length = UpStringGetLength(self);\n if (index < -length || index >= length) {\n UpSetError(\"String index out of range\");\n return NULL;\n }\n\n if (index < 0) {\n index = length-index;\n }\n\n return UpStringCreateWithLength(self->value+index, 1);\n}\n\nUpString* UpStringSubstring(UpString* self, int begin, int end) {\n int originalLength = UpStringGetLength(self);\n\n const char* substr = self->value;\n int len = originalLength;\n\n if (begin < 0) {\n substr += (len - end);\n }\n if (end < 0) {\n len += end+1;\n } else if (end < begin) {\n len = 0;\n } else if (end < len) {\n len -= (len - end) - 1;\n } else if (end >= originalLength) {\n end = originalLength-1;\n } else {\n len = 0;\n }\n\n if (begin < originalLength && len > 0) {\n substr += begin;\n len -= begin;\n } else {\n len = 0;\n }\n\n if (len == originalLength) {\n return self;\n } else {\n return UpStringCreateWithLength(substr, len);\n }\n}\n\nint UpStringFind(UpString* self, char* findString, int startIndex) {\n char* str = self->value + startIndex;\n char* found = strstr(str, findString);\n return found ? found - self->value : -1;\n}\n\nint UpStringFindLast(UpString* self, char* findString, int startIndex) {\n size_t len1 = UpStringGetLength(self) - startIndex;\n size_t len2 = strlen(findString);\n char* found = _ReverseStringFind(self->value, findString, len1, len2);\n return found ? found - self->value : -1;\n}\n\nUpString* UpStringReplace(UpString* self, char* findString, char* replaceString, bool global) {\n UpArena* heap = UpGetHeap();\n\n int findLen = strlen(findString);\n int replLen = strlen(replaceString);\n\n // Figure out the new size of the new string if the replace string is smaller or larger\n int lenDiff = 0;\n if (replLen != findLen) {\n int occurances = _CountOccurances(self->value, findString, findLen);\n if (occurances) {\n lenDiff = (replLen-findLen) * occurances;\n } else {\n return self;\n }\n }\n\n char* newString = NULL;\n\n const char* start = self->value;\n char* insert = NULL;\n while (1) {\n const char* c = strstr(start, findString);\n if (c) {\n if (!insert) {\n insert = newString = UpArenaNewString(heap, UpStringGetLength(self)+lenDiff+1);\n }\n memcpy(insert, start, c - start);\n insert += c - start;\n memcpy(insert, replaceString, replLen);\n insert += replLen;\n start = c + findLen;\n if (!global) {\n memcpy(insert, start, strlen(start));\n break;\n }\n } else {\n if (insert) {\n memcpy(insert, start, strlen(start));\n }\n break;\n }\n }\n\n return UpStringCreate(newString);\n}\n\n" }, { "alpha_fraction": 0.6791443824768066, "alphanum_fraction": 0.6791443824768066, "avg_line_length": 13.384614944458008, "blob_id": "94d3c205286df702552fc8139112c04886a52351", "content_id": "6eb583aebf68ba8c9e68e36f134b0d5df9407c54", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 187, "license_type": "permissive", "max_line_length": 40, "num_lines": 13, "path": "/src/vm/include/UpType.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPTYPE_H\n#define UP_UPTYPE_H\n\n#include \"Up/UpGlobal.h\"\n\nstruct UpType {\n UP_PROBE_ID;\n UpSymbol name;\n};\n\nconst char* UpTypeGetName(UpType* self);\n\n#endif // UP_UPTYPE_H\n" }, { "alpha_fraction": 0.7482331991195679, "alphanum_fraction": 0.7482331991195679, "avg_line_length": 29.594594955444336, "blob_id": "ccad42392e2a99e14173d74a73cfdc585a5dcf23", "content_id": "4593371ea78d6fc6b6c4d3b99366e947df5e5711", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1132, "license_type": "permissive", "max_line_length": 61, "num_lines": 37, "path": "/src/vm/UpBuffer.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPBUFFER_H\n#define UP_UPBUFFER_H\n\n#include \"Up/UpGlobal.h\"\n\nstruct UpBuffer {\n UpArena* heap;\n char* buf;\n char* cursor;\n int length;\n int indentLevel;\n};\n\nUpBuffer* UpBufferCreate(UpArena* heap);\nUpBuffer* UpBufferCreateWithSize(UpArena* heap, size_t size);\n\nint UpBufferCount(UpBuffer* self);\nchar* UpBufferString(UpBuffer* self);\nint UpBufferCursor(UpBuffer* self);\n\nvoid UpBufferSeek(UpBuffer* self, int cursor);\nvoid UpBufferSeekChar(UpBuffer* self, char c);\nvoid UpBufferRSeekChar(UpBuffer* self, char c);\n\nvoid UpBufferWrite(UpBuffer* self, void* value, size_t size);\nvoid UpBufferWriteChar(UpBuffer* self, char value);\nvoid UpBufferWriteInt(UpBuffer* self, int value);\nvoid UpBufferWriteLongLong(UpBuffer* self, long long value);\nvoid UpBufferWriteDouble(UpBuffer* self, double value);\nvoid UpBufferWritePointer(UpBuffer* self, void* value);\n\nvoid UpBufferPrint(UpBuffer* self, const char* str, ...);\nvoid UpBufferOpen(UpBuffer* self, const char* str, ...);\nvoid UpBufferClose(UpBuffer* self, const char* str, ...);\nvoid UpBufferLine(UpBuffer* self, const char* str, ...);\n\n#endif // UP_UPBUFFER_H\n" }, { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 12.399999618530273, "blob_id": "6a8427d915f48ef68099767d336c5aa9f64ad531", "content_id": "b681424baf000a5139cbb0867016de3f997d80e7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 403, "license_type": "permissive", "max_line_length": 42, "num_lines": 30, "path": "/Makefile", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nall: build\n\nbuild:\n\t@python make.py build\n\nclean:\n\t@python make.py clean\n\ntest:\nifeq ($(test), )\n\t@python make.py test\nelse\n\t@python make.py test --test $(test)\nendif\t\n\ntestCatalog:\n\t@python make.py testCatalog\n\nmetric:\nifeq ($(metric), )\n\t@python make.py metric\nelse\n\t@python make.py metric --metric $(metric)\nendif\t\n\nmetricsCatalog:\n\t@python make.py metricsCatalog\n\narchive:\n\t@python make.py archive\n" }, { "alpha_fraction": 0.8095238208770752, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 61, "blob_id": "b8a307ccf7f9b5cfbb66d65ec219123423b1fc03", "content_id": "6ce75b527ade610cec3da0f2e863919df9182b59", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 126, "license_type": "permissive", "max_line_length": 80, "num_lines": 2, "path": "/make/metric/__init__.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nfrom .Analyst import probe, table, column, metric, sortAscending, sortDescending\nfrom .Analyst import Analyst, ProbeAnalyst\n\n" }, { "alpha_fraction": 0.5399311780929565, "alphanum_fraction": 0.5432973504066467, "avg_line_length": 38.42216491699219, "blob_id": "1614f737b3ace2bb32a570514dad4dd5c4b509e5", "content_id": "b855c25136159d744741101385bad32e0c92e36f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 108136, "license_type": "permissive", "max_line_length": 103, "num_lines": 2743, "path": "/src/vm/UpCompiler.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpCompiler.h\"\n#include \"Up/Up.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpFunction.h\"\n#include \"Up/UpCFunction.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpLong.h\"\n#include \"Up/UpFloat.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpException.h\"\n#include \"Up/UpList.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpDebug.h\"\n#include \"UpSyntax.h\"\n#include \"UpArray.h\"\n#include \"UpStrTable.h\"\n#include \"UpIntTable.h\"\n#include \"UpBuffer.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nstatic void _DeclareExpansion(UpCompiler* self, UpClassDef* classDef, const char*, UpSyntax*,\n int*, int*);\nstatic UpStatus _CompileSyntax(UpCompiler* self, UpSyntax* node, bool statement);\nstatic UpStatus _NameForClass(UpCompiler* self, UpSyntax* sig, const char** out);\nstatic UpStatus _NameForFunction(UpCompiler* self, UpSyntax* sig, const char** out);\nstatic UpStatus _GetName(UpCompiler* self, UpSyntax* node, const char* name);\nstatic UpStatus _CompileFunctionHead(UpCompiler* self, const char* name, UpSyntax* sig,\n UpSyntax* body, bool isAnon, bool declareThis);\nstatic UpStatus _CompileFunctionBody(UpCompiler* self, const char* name, UpSyntax* body,\n bool isGenerator, bool isExpression, bool isImmediate);\n\n// ************************************************************************************************\n\ntypedef struct UpLoopFrame UpLoopFrame;\n\nstruct UpLoopFrame {\n UpArray* continues;\n UpArray* breaks;\n UpLoopFrame* previous;\n};\n\nstruct UpCompilerFrame {\n UpFunctionDef* funcDef;\n UpScope* scope;\n UpLoopFrame* loop;\n UpClassDef* classDef;\n int line;\n int lineOp;\n int didReturn;\n bool isGenerator:1;\n bool isContextual:1;\n UpCompilerFrame* previous;\n};\n\n// ************************************************************************************************\n\nstatic UpStatus _PushFunction(UpCompiler*, const char*, UpSyntax*, UpSyntax*, bool, bool, bool, bool);\nstatic UpStatus _CompileFunctionDef(UpCompiler*, const char*, UpSyntax*, UpSyntax*, bool, bool, bool,\n bool, UpFunctionDef**);\nstatic UpStatus _CompileCFunctionDef(UpCompiler* self, const char* name, const char* library,\n UpSyntax* returns, UpSetSyntax* args, UpCFunctionDef** outDef);\nstatic UpStatus _CompileClassDef(UpCompiler*, const char*, UpSyntax*, UpSyntax*, bool, UpClassDef**,\n int*, int*);\n\nstatic void _NewOp(UpCompiler* self, UpSyntax* node, UpInstruction op) ;\nstatic void _NewOp1(UpCompiler* self, UpSyntax* node, UpInstruction op, int arg1);\nstatic void _NewOp1v(UpCompiler* self, UpSyntax* node, UpInstruction op, void* arg1);\nstatic void _NewOp2(UpCompiler* self, UpSyntax* node, UpInstruction op, int arg1, int arg2);\nstatic void _NewOp2v(UpCompiler* self, UpSyntax* node, UpInstruction op, void* arg1, int arg2);\n\nstatic int TEMPchannelCounter = 0;\n\n// ************************************************************************************************\n\n#define SAFECALL(_CALL) \\\n if (!(_CALL)) { return UpFailure; }\n\n#define NYI \\\n _SetError(self, \"NYI\", (UpSyntax*)node); \\\n return UpFailure; \\\n\n#ifdef UP_ENABLE_PROBES\n\n#define PUSH_DEBUG_ID(_FORMAT, _NAME) \\\n UpBufferPrint(self->probeId, _FORMAT, _NAME);\n\n#define POP_DEBUG_ID() { \\\n UpBufferRSeekChar(self->probeId, '|'); \\\n UpBufferPrint(self->probeId, \"\\0\"); \\\n}\n\n#else\n\n#define PUSH_DEBUG_ID(_FORMAT, _NAME)\n#define POP_DEBUG_ID()\n\n#endif\n\n// ************************************************************************************************\n\nstatic bool _IsStringSyntaxType(UpSyntax* node) {\n if (node->type == UpStringSyntaxType) {\n return true;\n }\n\n return false;\n}\n\nstatic bool _IsChannelOp(UpOperator op) {\n return op == UpReadOp\n || op == UpWriteOp;\n}\n\nstatic UpStatus _CompileCloseGenerator(UpCompiler* self) {\n SAFECALL(_GetName(self, NULL, \"__GENERATOR__\"));\n UpSymbol symbol = UpGetSymbol(\"close\");\n _NewOp2(self, NULL, UpInstructionCallProperty, 0, symbol);\n return UpSuccess;\n}\n\nstatic void _PushFrame(UpCompiler* self, const char* name, UpSyntax* ast) {\n PUSH_DEBUG_ID(\"|%s\", name);\n\n UpFunctionDef* def = UpArenaNew(UpGetHeap(), UpFunctionDef);\n def->name = UpArenaCopyString(UpGetHeap(), name ? name : \"\");\n if (self->probeId) {\n UP_SET_PROBE_ID(def, UpGetSymbol(UpBufferString(self->probeId)));\n }\n def->ast = ast;\n def->sourcePath = self->sourcePath;\n def->moduleName = self->moduleName;\n def->lines = UpArrayCreate(UpGetHeap(), sizeof(UpLineItem));\n def->ops = UpBufferCreate(UpGetHeap());\n\n UpCompilerFrame* newFrame = UpArenaNew(UpGetHeap(), UpCompilerFrame);\n\n newFrame->funcDef = def;\n newFrame->didReturn = 0;\n newFrame->previous = self->frame;\n\n UpScope* scope = UpArenaNew(UpGetHeap(), UpScope);\n scope->locals = UpArrayCreate(UpGetHeap(), sizeof(UpVariable*));\n scope->previous = self->frame ? self->frame->scope : NULL;\n newFrame->scope = scope;\n def->scope = scope;\n\n self->frame = newFrame;\n BEGIN_COMPILER_FRAME(def->probeId);\n}\n\nstatic UpCompilerFrame* _PopFrame(UpCompiler* self, UpFunctionDef** outDef) {\n POP_DEBUG_ID();\n END_COMPILER_FRAME(self->frame->funcDef->probeId);\n\n UpCompilerFrame* frame = self->frame;\n\n // Return undefined if it's possible to make it to the end without an explicit return\n if (!frame->didReturn) {\n if (frame->isGenerator) {\n SAFECALL(_CompileCloseGenerator(self));\n }\n _NewOp1v(self, NULL, UpInstructionLoadObject, UpUndefined());\n _NewOp(self, NULL, UpInstructionReturn);\n }\n\n self->frame = frame->previous;\n frame->previous = NULL;\n\n *outDef = frame->funcDef;\n\n // printf(\"\\n\\n***** FUNCTION %s\\n\", frame->funcDef->name);\n // printf(\"%s\\n\", UpBytecodeToString(frame->funcDef)); fflush(stdout);\n return frame;\n}\n\nstatic UpLoopFrame* _PushLoop(UpCompiler* self) {\n UpLoopFrame* frame = UpArenaNew(UpGetHeap(), UpLoopFrame);\n frame->previous = self->frame->loop;\n self->frame->loop = frame;\n return frame;\n}\n\nstatic void _PopLoop(UpCompiler* self, int startCursor, int endCursor, bool isIterator) {\n UpArray* continues = self->frame->loop->continues;\n if (continues) {\n for (int i = 0; i < UpArrayCount(continues); ++i) {\n int jumpCursor;\n UpArrayGet(continues, i, &jumpCursor);\n UpBufferSeek(self->frame->funcDef->ops, jumpCursor);\n _NewOp1(self, NULL, UpInstructionJump, startCursor);\n }\n }\n\n UpArray* breaks = self->frame->loop->breaks;\n if (breaks) {\n for (int i = 0; i < UpArrayCount(breaks); ++i) {\n int jumpCursor;\n UpArrayGet(breaks, i, &jumpCursor);\n UpBufferSeek(self->frame->funcDef->ops, jumpCursor);\n _NewOp1(self, NULL, UpInstructionJump, endCursor);\n }\n }\n\n UpBufferSeek(self->frame->funcDef->ops, endCursor);\n self->frame->loop = self->frame->loop->previous;\n}\n\nstatic UpClass* _GetBuiltinClass(UpCompiler* self, const char* name) {\n if (!strcmp(name, \"Object\")) {\n return UpGetBuiltinClasses()->objectClass;\n } else if (!strcmp(name, \"Class\")) {\n return UpGetBuiltinClasses()->classClass;\n } else if (!strcmp(name, \"Exception\")) {\n return UpGetBuiltinClasses()->exceptionClass;\n } else if (!strcmp(name, \"Channel\")) {\n return UpGetBuiltinClasses()->channelClass;\n } else if (!strcmp(name, \"Function\")) {\n return UpGetBuiltinClasses()->functionClass;\n } else if (!strcmp(name, \"CFunction\")) {\n return UpGetBuiltinClasses()->cfunctionClass;\n } else if (!strcmp(name, \"CPointer\")) {\n return UpGetBuiltinClasses()->cpointerClass;\n } else if (!strcmp(name, \"Null\")) {\n return UpGetBuiltinClasses()->nullClass;\n } else if (!strcmp(name, \"Bool\")) {\n return UpGetBuiltinClasses()->boolClass;\n } else if (!strcmp(name, \"Integer\")) {\n return UpGetBuiltinClasses()->integerClass;\n } else if (!strcmp(name, \"Long\")) {\n return UpGetBuiltinClasses()->longClass;\n } else if (!strcmp(name, \"Float\")) {\n return UpGetBuiltinClasses()->floatClass;\n } else if (!strcmp(name, \"String\")) {\n return UpGetBuiltinClasses()->stringClass;\n } else if (!strcmp(name, \"List\")) {\n return UpGetBuiltinClasses()->listClass;\n } else if (!strcmp(name, \"Map\")) {\n return UpGetBuiltinClasses()->mapClass;\n } else {\n return false;\n }\n}\n\nstatic bool _IsKeyword(const char* name) {\n if (!strcmp(name, \"this\")) {\n return true;\n } else {\n return false;\n }\n}\n\nstatic void _SetError(UpCompiler* self, const char* description, UpSyntax* node) {\n UpSetError(description);\n UpSetCompileLocation(node->line, node->col);\n}\n\nstatic int _CountList(UpSetSyntax* list) {\n int count = 0;\n for (UpSyntaxItem* item = list->first; item; item = item->next) {\n ++count;\n }\n return count;\n}\n\nstatic UpStatus _FailIfKeyword(UpCompiler* self, UpSyntax* node, const char* name) {\n if (_IsKeyword(name)) {\n UpSetError(\"'%s' can't be modified\", name);\n UpSetCompileLocation(node->line, node->col);\n return UpFailure;\n } else {\n return UpSuccess;\n }\n}\n\nstatic void _DeclareBuiltin(UpCompiler* self) {\n UpObject* builtin = UpGetBuiltinModule();\n if (builtin) {\n int frameIndex, localIndex;\n UpClassDef* builtinDef = builtin->cls->def;\n _DeclareExpansion(self, builtinDef, NULL, NULL, &frameIndex, &localIndex);\n _NewOp1v(self, NULL, UpInstructionLoadObject, builtin);\n _NewOp2(self, NULL, UpInstructionSetLocal, frameIndex, localIndex);\n }\n}\n\nstatic bool _ClassLookup(UpCompiler* self, UpClassDef* classDef, UpSymbol symbol) {\n UpGetterDef* getter;\n return UpIntTableGet(classDef->getters, symbol, &getter);\n}\n\nstatic bool _Lookup(UpCompiler* self, const char* name) {\n if (!name) {\n return false;\n }\n\n if (!strcmp(name, \"this\")) {\n self->frame->isContextual = true;\n }\n\n UpSymbol symbol = UpGetSymbol(name);\n\n UpScope* scope = self->frame->scope;\n while (scope) {\n UpArray* locals = scope->locals;\n int localsCount = UpArrayCount(locals);\n for (int i = 0; i < localsCount; ++i) {\n UpVariable* var;\n UpArrayGet(locals, i, &var);\n if (var->classDef && _ClassLookup(self, var->classDef, symbol)) {\n if (scope == self->frame->scope && i == 0) {\n self->frame->isContextual = true;\n }\n\n return true;\n } else if (var->name == symbol) {\n return true;\n }\n }\n\n scope = scope->previous;\n }\n\n return false;\n}\n\nstatic void _DeclareLocal(UpCompiler* self, const char* name, int* frameIndex, int* localIndex,\n bool private) {\n if (!self->frame->scope->locals) {\n self->frame->scope->locals = UpArrayCreate(UpGetHeap(), sizeof(UpVariable*));\n }\n\n // UP_LOG((\"DECLARE %s\", name));\n\n int index = UpArrayCount(self->frame->scope->locals);\n\n UpVariable* var = UpArenaNew(UpGetHeap(), UpVariable);\n if (name) {\n var->name = UpGetSymbol(name);\n }\n UpArrayAppend(self->frame->scope->locals, &var);\n\n if (!private) {\n // XXXjoe ???\n }\n\n if (frameIndex) {\n *frameIndex = 0;\n }\n if (localIndex) {\n *localIndex = index;\n }\n}\n\nstatic void _DeclareExpansion(UpCompiler* self, UpClassDef* classDef, const char* name,\n UpSyntax* import, int* frameIndex, int* localIndex) {\n if (!self->frame->scope->locals) {\n self->frame->scope->locals = UpArrayCreate(UpGetHeap(), sizeof(UpVariable*));\n }\n\n // UP_LOG((\"EXPAND %s\", name));\n\n int index = UpArrayCount(self->frame->scope->locals);\n\n UpVariable* var = UpArenaNew(UpGetHeap(), UpVariable);\n var->classDef = classDef;\n var->import = import;\n if (name) {\n var->name = UpGetSymbol(name);\n }\n\n UpArrayAppend(self->frame->scope->locals, &var);\n\n if (frameIndex) {\n *frameIndex = 0;\n }\n if (localIndex) {\n *localIndex = index;\n }\n}\n\nstatic void _Declare(UpCompiler* self, const char* name, bool private, bool override) {\n if (override || !_Lookup(self, name)) {\n if (self->frame->classDef && !private) {\n UpGetterDef* getterDef = UpArenaNew(UpGetHeap(), UpGetterDef);\n getterDef->name = UpArenaCopyString(UpGetHeap(), name);\n getterDef->funcDef = NULL;\n\n UpSymbol nameSymbol = UpGetSymbol(name);\n UpIntTableSet(self->frame->classDef->getters, nameSymbol, &getterDef);\n } else {\n _DeclareLocal(self, name, NULL, NULL, private);\n }\n }\n}\n\nstatic UpStatus _GetName(UpCompiler* self, UpSyntax* node, const char* name) {\n if (!strcmp(name, \"this\")) {\n self->frame->isContextual = true;\n }\n\n UpSymbol symbol = UpGetSymbol(name);\n\n UpScope* scope = self->frame->scope;\n int frameCount = 0;\n while (scope) {\n UpArray* locals = scope->locals;\n int localsCount = UpArrayCount(locals);\n for (int i = 0; i < localsCount; ++i) {\n UpVariable* var;\n UpArrayGet(locals, i, &var);\n if (var->classDef && _ClassLookup(self, var->classDef, symbol)) {\n if (scope == self->frame->scope && i == 0) {\n self->frame->isContextual = true;\n }\n\n _NewOp2(self, node, UpInstructionGetLocal, frameCount, i);\n _NewOp1(self, node, UpInstructionGetProperty, symbol);\n\n return UpSuccess;\n } else if (var->name == symbol) {\n _NewOp2(self, node, UpInstructionGetLocal, frameCount, i);\n return UpSuccess;\n }\n }\n\n ++frameCount;\n scope = scope->previous;\n }\n\n UpSetError(\"'%s' is not defined\", name);\n UpSetCompileLocation(node->line, node->col);\n return UpFailure;\n}\n\nstatic UpStatus _SetName(UpCompiler* self, UpSyntax* node, const char* name) {\n SAFECALL(_FailIfKeyword(self, node, name));\n\n UpSymbol symbol = UpGetSymbol(name);\n\n UpScope* scope = self->frame->scope;\n int frameCount = 0;\n while (scope) {\n UpArray* locals = scope->locals;\n int localsCount = UpArrayCount(locals);\n for (int i = 0; i < localsCount; ++i) {\n UpVariable* var;\n\n UpArrayGet(locals, i, &var);\n if (var->classDef && _ClassLookup(self, var->classDef, symbol)) {\n if (scope == self->frame->scope && i == 0) {\n self->frame->isContextual = true;\n }\n\n _NewOp2(self, node, UpInstructionGetLocal, frameCount, i);\n _NewOp1(self, node, UpInstructionSetProperty, symbol);\n\n return UpSuccess;\n } else if (var->name == symbol) {\n _NewOp2(self, node, UpInstructionSetLocal, frameCount, i);\n return UpSuccess;\n }\n }\n\n ++frameCount;\n scope = scope->previous;\n }\n\n // This should really never happen if _DeclareNames did its job\n UpSetError(\"'%s' is not defined\", name);\n UpSetCompileLocation(node->line, node->col);\n return UpFailure;\n}\n\nstatic UpStatus _DeleteName(UpCompiler* self, UpSyntax* node, const char* name) {\n SAFECALL(_FailIfKeyword(self, node, name));\n\n UpSymbol symbol = UpGetSymbol(name);\n\n UpScope* scope = self->frame->scope;\n int frameCount = 0;\n while (scope) {\n UpArray* locals = scope->locals;\n int localsCount = UpArrayCount(locals);\n for (int i = 0; i < localsCount; ++i) {\n UpVariable* var;\n UpArrayGet(locals, i, &var);\n if (var->classDef && _ClassLookup(self, var->classDef, symbol)) {\n if (scope == self->frame->scope && i == 0) {\n self->frame->isContextual = true;\n }\n\n _NewOp2(self, node, UpInstructionGetLocal, frameCount, i);\n _NewOp1(self, node, UpInstructionDeleteProperty, symbol);\n\n return UpSuccess;\n } else if (var->name == symbol) {\n _NewOp2(self, node, UpInstructionDeleteLocal, frameCount, i);\n return UpSuccess;\n }\n }\n\n ++frameCount;\n scope = scope->previous;\n }\n\n UpSetError(\"'%s' is not defined\", name);\n UpSetCompileLocation(node->line, node->col);\n return UpFailure;\n}\n\nstatic UpStatus _SetExpansion(UpCompiler* self, UpSyntax* node) {\n UpScope* scope = self->frame->scope;\n int frameCount = 0;\n while (scope) {\n UpArray* locals = scope->locals;\n int localsCount = UpArrayCount(locals);\n for (int i = 0; i < localsCount; ++i) {\n UpVariable* var;\n UpArrayGet(locals, i, &var);\n if (var->import == node) {\n _NewOp2(self, (UpSyntax*)node, UpInstructionSetLocal, frameCount, i);\n return UpSuccess;\n }\n }\n\n ++frameCount;\n scope = scope->previous;\n }\n\n // This should really never happen if _DeclareNames did its job\n _SetError(self, \"import not found\", node);\n return UpFailure;\n}\n\nstatic void _TrackLine(UpCompiler* self, UpSyntax* node) {\n if (node && self->frame->line != node->line) {\n self->frame->line = node->line;\n UpBufferCount(self->frame->funcDef->ops);\n UpLineItem item = { node->line, UpBufferCount(self->frame->funcDef->ops) };\n UpArrayAppend(self->frame->funcDef->lines, &item);\n }\n}\n\nstatic void _NewOp(UpCompiler* self, UpSyntax* node, UpInstruction op) {\n COUNT_OP(UpBufferCursor(self->frame->funcDef->ops), op);\n _TrackLine(self, node);\n UpBufferWriteInt(self->frame->funcDef->ops, op);\n}\n\nstatic void _NewOp1(UpCompiler* self, UpSyntax* node, UpInstruction op, int arg1) {\n COUNT_OP1(UpBufferCursor(self->frame->funcDef->ops), op, arg1);\n _TrackLine(self, node);\n UpBufferWriteInt(self->frame->funcDef->ops, op);\n UpBufferWriteInt(self->frame->funcDef->ops, arg1);\n}\n\nstatic void _NewOp1ll(UpCompiler* self, UpSyntax* node, UpInstruction op, long long arg1) {\n COUNT_OP1LL(UpBufferCursor(self->frame->funcDef->ops), op, arg1);\n _TrackLine(self, node);\n UpBufferWriteInt(self->frame->funcDef->ops, op);\n UpBufferWriteLongLong(self->frame->funcDef->ops, arg1);\n}\n\nstatic void _NewOp1f(UpCompiler* self, UpSyntax* node, UpInstruction op, double arg1) {\n COUNT_OP1F(UpBufferCursor(self->frame->funcDef->ops), op, arg1);\n _TrackLine(self, node);\n UpBufferWriteInt(self->frame->funcDef->ops, op);\n UpBufferWriteDouble(self->frame->funcDef->ops, arg1);\n}\n\nstatic void _NewOp1v(UpCompiler* self, UpSyntax* node, UpInstruction op, void* arg1) {\n COUNT_OP1V(UpBufferCursor(self->frame->funcDef->ops), op, arg1);\n _TrackLine(self, node);\n UpBufferWriteInt(self->frame->funcDef->ops, op);\n UpBufferWritePointer(self->frame->funcDef->ops, arg1);\n}\n\nstatic void _NewOp2(UpCompiler* self, UpSyntax* node, UpInstruction op, int arg1, int arg2) {\n COUNT_OP2(UpBufferCursor(self->frame->funcDef->ops), op, arg1, arg2);\n _TrackLine(self, node);\n UpBufferWriteInt(self->frame->funcDef->ops, op);\n UpBufferWriteInt(self->frame->funcDef->ops, arg1);\n UpBufferWriteInt(self->frame->funcDef->ops, arg2);\n}\n\nstatic void _NewOp2v(UpCompiler* self, UpSyntax* node, UpInstruction op, void* arg1, int arg2) {\n COUNT_OP2(UpBufferCursor(self->frame->funcDef->ops), op, arg1, arg2);\n _TrackLine(self, node);\n UpBufferWriteInt(self->frame->funcDef->ops, op);\n UpBufferWritePointer(self->frame->funcDef->ops, arg1);\n UpBufferWriteInt(self->frame->funcDef->ops, arg2);\n}\n\nstatic const char* _GetImportName(UpCompiler* self, UpSyntaxItem* first, UpSyntaxItem* last) {\n UpBuffer* buf = UpBufferCreate(UpGetHeap());\n for (UpSyntaxItem* item = first; item; item = item->next) {\n UpIdSyntax* id = (UpIdSyntax*)item->value;\n UpBufferPrint(buf, id->name);\n if (last && item == last) {\n break;\n }\n if (item->next) {\n UpBufferPrint(buf, \".\");\n }\n }\n return UpBufferString(buf);\n}\n\nstatic UpStatus _CompileMapItem(UpCompiler* self, UpSyntax* node, int frameIndex, int localIndex) {\n if (node->type != UpBinarySyntaxType) {\n _SetError(self, \"Map literals may contain assignments only\", node);\n return UpFailure;\n }\n\n UpBinarySyntax* pair = (UpBinarySyntax*)node;\n SAFECALL(_CompileSyntax(self, pair->right, false));\n SAFECALL(_CompileSyntax(self, pair->left, false));\n\n UpSymbol symbol = UpGetSymbol(\"op:[]=\");\n _NewOp2(self, node, UpInstructionCallOperator2, 2, symbol);\n\n _NewOp(self, node, UpInstructionPop);\n _NewOp2(self, node, UpInstructionGetLocal, frameIndex, localIndex);\n\n return UpSuccess;\n}\n\nstatic UpStatus _CompileSubscriber(UpCompiler* self, UpIteratorSyntax* iterator) {\n SAFECALL(_CompileFunctionHead(self, NULL, iterator->left, iterator->body, true, false));\n SAFECALL(_CompileFunctionBody(self, NULL, iterator->body, false, false, false));\n UpFunctionDef* funcDef;\n _PopFrame(self, &funcDef);\n\n _NewOp1v(self, iterator->body, UpInstructionNewFunction, funcDef);\n SAFECALL(_CompileSyntax(self, iterator->iterable, false));\n\n UpSymbol symbol = UpGetSymbol(\"op:in\");\n _NewOp2(self, NULL, UpInstructionCallProperty, 0, symbol);\n\n _NewOp(self, iterator->iterable, UpInstructionSubscribe);\n return UpSuccess;\n}\n\nstatic UpStatus _CompileIterator(UpCompiler* self, UpIteratorSyntax* iterator, bool statement) {\n _PushLoop(self);\n\n if (!iterator->iterable && statement) {\n SAFECALL(_GetName(self, iterator->body, \"__GENERATOR__\"));\n } else {\n SAFECALL(_CompileSyntax(self, iterator->iterable, false));\n }\n\n UpSymbol symbol = UpGetSymbol(\"op:in\");\n _NewOp2(self, NULL, UpInstructionCallProperty, 0, symbol);\n\n _SetName(self, iterator->iterable, iterator->TEMPchannelName);\n _NewOp(self, iterator->iterable, UpInstructionPop);\n\n int readCursor = UpBufferCursor(self->frame->funcDef->ops);\n _GetName(self, iterator->iterable, iterator->TEMPchannelName);\n _NewOp(self, iterator->iterable, UpInstructionRead);\n _GetName(self, iterator->iterable, iterator->TEMPchannelName);\n int jumpIfCursor = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp1(self, iterator->iterable, UpInstructionJumpIfChannelClosed, -1);\n\n if (iterator->left->type == UpIdSyntaxType) {\n UpIdSyntax* left = (UpIdSyntax*)iterator->left;\n SAFECALL(_FailIfKeyword(self, iterator->left, left->name));\n\n SAFECALL(_SetName(self, iterator->left, left->name));\n _NewOp(self, iterator->iterable, UpInstructionPop);\n } else {\n _SetError(self, \"Illegal loop assignment\", iterator->left);\n return UpFailure;\n }\n\n if (iterator->clause) {\n SAFECALL(_CompileSyntax(self, iterator->clause, false));\n\n int jumpToElseCursor = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp1(self, iterator->clause, UpInstructionJumpIfNot, -1);\n _NewOp(self, iterator->clause, UpInstructionPop);\n\n if (!statement) {\n SAFECALL(_GetName(self, iterator->body, \"__GENERATOR__\"));\n }\n\n SAFECALL(_CompileSyntax(self, iterator->body, statement));\n if (!statement) {\n _NewOp(self, iterator->iterable, UpInstructionWriteAwaitRead);\n }\n\n int endCursor = UpBufferCursor(self->frame->funcDef->ops);\n UpBufferSeek(self->frame->funcDef->ops, jumpToElseCursor);\n _NewOp1(self, iterator->clause, UpInstructionJumpIfNot, endCursor);\n\n UpBufferSeek(self->frame->funcDef->ops, endCursor);\n } else {\n if (!statement) {\n SAFECALL(_GetName(self, iterator->body, \"__GENERATOR__\"));\n }\n SAFECALL(_CompileSyntax(self, iterator->body, statement));\n if (!statement) {\n _NewOp(self, iterator->iterable, UpInstructionWriteAwaitRead);\n }\n }\n\n int resumeCursor = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp1(self, iterator->iterable, UpInstructionJump, readCursor);\n\n int endCursor = UpBufferCursor(self->frame->funcDef->ops);\n\n UpBufferSeek(self->frame->funcDef->ops, jumpIfCursor);\n _NewOp1(self, iterator->iterable, UpInstructionJumpIfChannelClosed, endCursor);\n\n UpBufferSeek(self->frame->funcDef->ops, endCursor);\n _PopLoop(self, resumeCursor, endCursor, true);\n\n return UpSuccess;\n}\n\nstatic UpStatus _DeclareNames(UpCompiler* self, UpSyntax* node, bool statement) {\n if (!node) {\n return UpSuccess;\n }\n\n BEGIN_COMPILER_NODE(node->type);\n\n switch (node->type) {\n case UpSetSyntaxType: {\n for (UpSyntaxItem* item = ((UpSetSyntax*)node)->first; item; item = item->next) {\n SAFECALL(_DeclareNames(self, item->value, statement));\n }\n break;\n }\n case UpGroupSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n SAFECALL(_DeclareNames(self, n->value, false));\n break;\n }\n case UpDeclarationSyntaxType: {\n UpDeclarationSyntax* decl = (UpDeclarationSyntax*)node;\n SAFECALL(_DeclareNames(self, decl->head, false));\n break;\n }\n case UpPrintSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n SAFECALL(_DeclareNames(self, n->value, false));\n break;\n }\n case UpAssignmentSyntaxType: {\n UpAssignmentSyntax* n = (UpAssignmentSyntax*)node;\n SAFECALL(_DeclareNames(self, n->right, false));\n\n if (n->left->type == UpIdSyntaxType) {\n UpIdSyntax* left = (UpIdSyntax*)n->left;\n _Declare(self, left->name, false, false);\n } else {\n // Check for property and index expressions\n }\n break;\n }\n // case UpClassSyntaxType: {\n // UpClassSyntax* cls = (UpClassSyntax*)node;\n // const char* name = NULL;\n // SAFECALL(_NameForClass(self, cls->name, &name));\n // _Declare(self, name, false, false);\n // break;\n // }\n case UpCallSyntaxType: {\n UpCallSyntax* fn = (UpCallSyntax *)node;\n if (statement) {\n const char* name = NULL;\n SAFECALL(_NameForFunction(self, fn->callable, &name));\n if (name) {\n _Declare(self, name, false, false);\n } else {\n _SetError(self, \"Illegal function declaration\", fn->callable);\n return UpFailure;\n }\n }\n break;\n }\n case UpBinarySyntaxType: {\n UpBinarySyntax* n = (UpBinarySyntax*)node;\n SAFECALL(_DeclareNames(self, n->left, false));\n SAFECALL(_DeclareNames(self, n->right, false));\n break;\n }\n case UpUnarySyntaxType: {\n UpUnarySyntax* n = (UpUnarySyntax*)node;\n SAFECALL(_DeclareNames(self, n->operand, false));\n break;\n }\n case UpImportSyntaxType: {\n UpImportSyntax* n = (UpImportSyntax*)node;\n\n UpObject* module;\n UpFunctionDef* def;\n UpClassDef* classDef;\n UpSetSyntax* names = (UpSetSyntax*)n->names;\n for (UpSyntaxItem* item = names->first; item; item = item->next) {\n if (!item->next) {\n const char* importName = _GetImportName(self, names->first, item);\n UpSymbol importSymbol = UpGetSymbol(importName);\n if (!UpImport(importSymbol, &module, &def, &classDef)) {\n UpSetCompileLocation(node->line, node->col);\n return UpFailure;\n }\n }\n }\n\n if (n->wildcard) {\n _DeclareExpansion(self, classDef, NULL, node, NULL, NULL);\n } else {\n UpIdSyntax* firstName = (UpIdSyntax*)names->first->value;\n _Declare(self, firstName->name, true, true);\n }\n break;\n }\n // case UpCallSyntaxType: {\n // UpCallSyntax* n = (UpCallSyntax*)node;\n // SAFECALL(_DeclareNames(self, n->callable, false));\n // SAFECALL(_DeclareNames(self, n->args, false));\n // break;\n // }\n case UpPropertySyntaxType: {\n UpPropertySyntax* n = (UpPropertySyntax*)node;\n SAFECALL(_DeclareNames(self, n->left, false));\n break;\n }\n case UpReturnSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n SAFECALL(_DeclareNames(self, n->value, false));\n break;\n }\n case UpThrowSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n SAFECALL(_DeclareNames(self, n->value, false));\n break;\n }\n case UpCFunctionSyntaxType: {\n UpCFunctionSyntax* n = (UpCFunctionSyntax*)node;\n _Declare(self, n->name, true, true);\n break;\n }\n case UpIfSyntaxType: {\n UpIfSyntax* n = (UpIfSyntax*)node;\n SAFECALL(_DeclareNames(self, n->condition, false));\n SAFECALL(_DeclareNames(self, n->transforms, statement));\n SAFECALL(_DeclareNames(self, n->elsex, statement));\n break;\n }\n case UpWhileSyntaxType: {\n UpWhileSyntax* n = (UpWhileSyntax*)node;\n SAFECALL(_DeclareNames(self, n->condition, false));\n SAFECALL(_DeclareNames(self, n->body, statement));\n break;\n }\n case UpIteratorSyntaxType: {\n UpIteratorSyntax* n = (UpIteratorSyntax*)node;\n if (statement) {\n SAFECALL(_DeclareNames(self, n->iterable, false));\n\n n->TEMPchannelName = UpArenaFormatString(UpGetHeap(), \"__CHANNEL%d__\",\n ++TEMPchannelCounter);\n _Declare(self, n->TEMPchannelName, false, false);\n\n if (n->left->type == UpIdSyntaxType) {\n UpIdSyntax* left = (UpIdSyntax*)n->left;\n _Declare(self, left->name, false, false);\n } else {\n // Check for property and index expressions\n }\n\n SAFECALL(_DeclareNames(self, n->body, statement));\n }\n break;\n }\n case UpTrySyntaxType: {\n UpTrySyntax* n = (UpTrySyntax*)node;\n SAFECALL(_DeclareNames(self, n->tryBlock, statement));\n\n if (n->catchBlocks) {\n UpSetSyntax* catchBlocks = (UpSetSyntax*)n->catchBlocks;\n for (UpSyntaxItem* item = catchBlocks->first; item; item = item->next) {\n UpCatchSyntax* catchBlock = (UpCatchSyntax*)item->value;\n // if (catchBlock->excName) {\n // _Declare(self, catchBlock->excName, false, false);\n // }\n\n SAFECALL(_DeclareNames(self, catchBlock->statements, statement));\n }\n }\n\n break;\n }\n case UpFinallySyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n SAFECALL(_DeclareNames(self, n->value, statement));\n break;\n }\n default:\n break;\n }\n\n END_COMPILER_NODE(node->type);\n return UpSuccess;\n}\n\nstatic UpStatus _CompileSyntax(UpCompiler* self, UpSyntax* node, bool statement) {\n if (!node) {\n return UpSuccess;\n }\n\n switch (node->type) {\n case UpSetSyntaxType: {\n for (UpSyntaxItem* item = ((UpSetSyntax*)node)->first; item; item = item->next) {\n SAFECALL(_CompileSyntax(self, item->value, statement));\n }\n break;\n }\n case UpGroupSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n SAFECALL(_CompileSyntax(self, n->value, false));\n break;\n }\n case UpPrintSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n SAFECALL(_CompileSyntax(self, n->value, false));\n\n SAFECALL(_GetName(self, node, \"print\"));\n _NewOp1(self, node, UpInstructionCall, 1);\n // if (statement) {\n // _NewOp(self, node, UpInstructionPop);\n // }\n break;\n }\n case UpAssignmentSyntaxType: {\n UpAssignmentSyntax* n = (UpAssignmentSyntax*)node;\n SAFECALL(_CompileSyntax(self, n->right, false));\n if (n->left->type == UpIdSyntaxType) {\n UpIdSyntax* left = (UpIdSyntax*)n->left;\n SAFECALL(_FailIfKeyword(self, n->left, left->name));\n\n SAFECALL(_SetName(self, n->left, left->name));\n } else if (n->left->type == UpPropertySyntaxType) {\n UpPropertySyntax* prop = (UpPropertySyntax*)n->left;\n SAFECALL(_CompileSyntax(self, prop->left, false));\n UpSymbol symbol = UpGetSymbol(prop->right);\n _NewOp1(self, node, UpInstructionSetProperty, symbol);\n } else if (n->left->type == UpBinarySyntaxType) {\n UpBinarySyntax* binary = (UpBinarySyntax*)n->left;\n if (binary->op == UpLookupOp) {\n SAFECALL(_CompileSyntax(self, binary->right, false));\n SAFECALL(_CompileSyntax(self, binary->left, false));\n UpSymbol symbol = UpGetSymbol(\"op:.[]=\");\n _NewOp2(self, node, UpInstructionCallProperty, 2, symbol);\n } else if (binary->op == UpIndexOp) {\n SAFECALL(_CompileSyntax(self, binary->right, false));\n SAFECALL(_CompileSyntax(self, binary->left, false));\n UpSymbol symbol = UpGetSymbol(\"op:[]=\");\n _NewOp2(self, node, UpInstructionCallProperty, 2, symbol);\n } else if (binary->op == UpSliceOp) {\n UpRangeSyntax* range = (UpRangeSyntax*)binary->right;\n SAFECALL(_CompileSyntax(self, range->from, false));\n SAFECALL(_CompileSyntax(self, range->to, false));\n SAFECALL(_CompileSyntax(self, range->by, false));\n SAFECALL(_CompileSyntax(self, binary->left, false));\n UpSymbol symbol = UpGetSymbol(\"op:[to]=\");\n _NewOp2(self, node, UpInstructionCallProperty, 4, symbol);\n } else {\n _SetError(self, \"Illegal assignment\", n->left);\n }\n } else if (n->left->type == UpCastSyntaxType) {\n UpCastSyntax* cast = (UpCastSyntax*)n->left;\n return _CompileSyntax(self, cast->expr, false);\n } else {\n _SetError(self, \"Illegal assignment\", n->left);\n }\n break;\n }\n case UpBinarySyntaxType: {\n UpBinarySyntax* n = (UpBinarySyntax*)node;\n\n if (n->op == UpAndOp) {\n SAFECALL(_CompileSyntax(self, n->left, false));\n int jumpCursor1 = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp1(self, node, UpInstructionJumpIfNot, -1);\n _NewOp(self, node, UpInstructionPop);\n\n SAFECALL(_CompileSyntax(self, n->right, false));\n int jumpCursor2 = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp1(self, node, UpInstructionJumpIf, -1);\n\n _NewOp(self, node, UpInstructionPop);\n _NewOp1v(self, node, UpInstructionLoadObject, UpFalse());\n\n int endCursor = UpBufferCursor(self->frame->funcDef->ops);\n\n UpBufferSeek(self->frame->funcDef->ops, jumpCursor1);\n _NewOp1(self, node, UpInstructionJumpIfNot, endCursor);\n UpBufferSeek(self->frame->funcDef->ops, jumpCursor2);\n _NewOp1(self, node, UpInstructionJumpIf, endCursor);\n\n UpBufferSeek(self->frame->funcDef->ops, endCursor);\n } else if (n->op == UpOrOp) {\n SAFECALL(_CompileSyntax(self, n->left, false));\n int jumpCursor1 = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp1(self, node, UpInstructionJumpIf, -1);\n _NewOp(self, node, UpInstructionPop);\n\n SAFECALL(_CompileSyntax(self, n->right, false));\n int jumpCursor2 = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp1(self, node, UpInstructionJumpIf, -1);\n\n int endCursor = UpBufferCursor(self->frame->funcDef->ops);\n\n UpBufferSeek(self->frame->funcDef->ops, jumpCursor1);\n _NewOp1(self, node, UpInstructionJumpIf, endCursor);\n UpBufferSeek(self->frame->funcDef->ops, jumpCursor2);\n _NewOp1(self, node, UpInstructionJumpIf, endCursor);\n\n UpBufferSeek(self->frame->funcDef->ops, endCursor);\n } else if (n->op == UpLookupOp) {\n if (n->right->type == UpDefaultSyntaxType) {\n UpSyntax2* defaults = (UpSyntax2*)n->right;\n SAFECALL(_CompileSyntax(self, defaults->value, false));\n SAFECALL(_CompileSyntax(self, defaults->value2, false));\n } else {\n SAFECALL(_CompileSyntax(self, n->right, false));\n _NewOp1v(self, node, UpInstructionLoadObject, UpUndefined());\n }\n SAFECALL(_CompileSyntax(self, n->left, false));\n _NewOp(self, node, UpInstructionLookupProperty);\n } else if (n->op == UpIndexOp) {\n int argCount;\n if (n->right->type == UpDefaultSyntaxType) {\n UpSyntax2* defaults = (UpSyntax2*)n->right;\n SAFECALL(_CompileSyntax(self, defaults->value, false));\n SAFECALL(_CompileSyntax(self, defaults->value2, false));\n argCount = 2;\n } else {\n SAFECALL(_CompileSyntax(self, n->right, false));\n argCount = 1;\n }\n SAFECALL(_CompileSyntax(self, n->left, false));\n\n UpSymbol symbol = UpGetSymbol(\"op:[]\");\n _NewOp2(self, node, UpInstructionCallProperty, argCount, symbol);\n break;\n } else if (n->op == UpSliceOp) {\n UpRangeSyntax* range = (UpRangeSyntax*)n->right;\n SAFECALL(_CompileSyntax(self, range->from, false));\n SAFECALL(_CompileSyntax(self, range->to, false));\n SAFECALL(_CompileSyntax(self, range->by, false));\n SAFECALL(_CompileSyntax(self, n->left, false));\n\n UpSymbol symbol = UpGetSymbol(\"op:[to]\");\n _NewOp2(self, node, UpInstructionCallProperty, 3, symbol);\n break;\n } else if (n->op == UpConcatStringOp) {\n SAFECALL(_CompileSyntax(self, n->left, false));\n if (!_IsStringSyntaxType(n->left)) {\n UpSymbol symbol = UpGetSymbol(\"toString\");\n _NewOp2(self, node, UpInstructionCallProperty, 0, symbol);\n }\n SAFECALL(_CompileSyntax(self, n->right, false));\n if (!_IsStringSyntaxType(n->right)) {\n UpSymbol symbol = UpGetSymbol(\"toString\");\n _NewOp2(self, node, UpInstructionCallProperty, 0, symbol);\n }\n UpSymbol symbol = UpGetSymbol(\"op:++\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } else {\n if (!n->left && _IsChannelOp(n->op)) {\n SAFECALL(_GetName(self, node, \"__GENERATOR__\"));\n } else {\n SAFECALL(_CompileSyntax(self, n->left, false));\n }\n\n SAFECALL(_CompileSyntax(self, n->right, false));\n\n switch (n->op) {\n case UpEqualsOp: {\n UpSymbol symbol = UpGetSymbol(\"op:==\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpNotEqualsOp: {\n UpSymbol symbol = UpGetSymbol(\"op:==\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n symbol = UpGetSymbol(\"op:not\");\n _NewOp2(self, node, UpInstructionCallProperty, 0, symbol);\n break;\n } case UpGreaterThanOp: {\n UpSymbol symbol = UpGetSymbol(\"op:>\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpGreaterThanEqualsOp: {\n UpSymbol symbol = UpGetSymbol(\"op:>=\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpLessThanOp: {\n UpSymbol symbol = UpGetSymbol(\"op:<\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpLessThanEqualsOp: {\n UpSymbol symbol = UpGetSymbol(\"op:<=\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpIsOp: {\n _NewOp(self, node, UpInstructionIs);\n break;\n } case UpIsNotOp: {\n _NewOp(self, node, UpInstructionIs);\n UpSymbol symbol = UpGetSymbol(\"op:not\");\n _NewOp2(self, node, UpInstructionCallProperty, 0, symbol);\n break;\n } case UpIsInOp: {\n UpSymbol symbol = UpGetSymbol(\"op:isin\");\n _NewOp2(self, node, UpInstructionCallProperty, 1, symbol);\n break;\n } case UpNotInOp: {\n UpSymbol symbol = UpGetSymbol(\"op:isin\");\n _NewOp2(self, node, UpInstructionCallProperty, 1, symbol);\n symbol = UpGetSymbol(\"op:not\");\n _NewOp2(self, node, UpInstructionCallProperty, 0, symbol);\n break;\n } case UpAddOp: {\n UpSymbol symbol = UpGetSymbol(\"op:+\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpSubtractOp: {\n UpSymbol symbol = UpGetSymbol(\"op:-\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpMultiplyOp: {\n UpSymbol symbol = UpGetSymbol(\"op:*\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpDivideOp: {\n UpSymbol symbol = UpGetSymbol(\"op:/\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpModOp: {\n UpSymbol symbol = UpGetSymbol(\"op:%\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpPowOp: {\n UpSymbol symbol = UpGetSymbol(\"op:**\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpConcatOp: {\n UpSymbol symbol = UpGetSymbol(\"op:++\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpAddEqOp: {\n UpSymbol symbol = UpGetSymbol(\"op:+=\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpSubtractEqOp: {\n UpSymbol symbol = UpGetSymbol(\"op:-=\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpMultiplyEqOp: {\n UpSymbol symbol = UpGetSymbol(\"op:*=\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpDivideEqOp: {\n UpSymbol symbol = UpGetSymbol(\"op:/=\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpModEqOp: {\n UpSymbol symbol = UpGetSymbol(\"op:%=\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpPowEqOp: {\n UpSymbol symbol = UpGetSymbol(\"op:**=\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpConcatEqOp: {\n UpSymbol symbol = UpGetSymbol(\"op:++=\");\n _NewOp2(self, node, UpInstructionCallOperator, 1, symbol);\n break;\n } case UpWriteOp: {\n _NewOp(self, node, UpInstructionWrite);\n break;\n } default: {\n _SetError(self, \"Illegal operator\", node);\n return UpFailure;\n }\n }\n }\n break;\n }\n case UpUnarySyntaxType: {\n UpUnarySyntax* n = (UpUnarySyntax*)node;\n if (n->op == UpDeleteOp) {\n if (n->operand->type == UpIdSyntaxType) {\n UpIdSyntax* id = (UpIdSyntax*)n->operand;\n SAFECALL(_DeleteName(self, node, id->name));\n } else if (n->operand->type == UpPropertySyntaxType) {\n UpPropertySyntax* prop = (UpPropertySyntax*)n->operand;\n SAFECALL(_CompileSyntax(self, prop->left, false));\n UpSymbol symbol = UpGetSymbol(prop->right);\n _NewOp1(self, node, UpInstructionDeleteProperty, symbol);\n } else if (n->operand->type == UpBinarySyntaxType) {\n UpBinarySyntax* binary = (UpBinarySyntax*)n->operand;\n if (binary->op == UpIndexOp) {\n SAFECALL(_CompileSyntax(self, binary->right, false));\n SAFECALL(_CompileSyntax(self, binary->left, false));\n\n UpSymbol symbol = UpGetSymbol(\"op:-=[]\");\n _NewOp2(self, node, UpInstructionCallProperty, 1, symbol);\n } else if (binary->op == UpSliceOp) {\n UpRangeSyntax* range = (UpRangeSyntax*)binary->right;\n SAFECALL(_CompileSyntax(self, range->from, false));\n SAFECALL(_CompileSyntax(self, range->to, false));\n SAFECALL(_CompileSyntax(self, range->by, false));\n SAFECALL(_CompileSyntax(self, binary->left, false));\n\n UpSymbol symbol = UpGetSymbol(\"op:-=[to]\");\n _NewOp2(self, node, UpInstructionCallProperty, 3, symbol);\n } else if (binary->op == UpLookupOp) {\n SAFECALL(_CompileSyntax(self, binary->right, false));\n SAFECALL(_CompileSyntax(self, binary->left, false));\n\n UpSymbol symbol = UpGetSymbol(\"op:-=.[]\");\n _NewOp2(self, node, UpInstructionCallProperty, 1, symbol);\n } else {\n _SetError(self, \"Illegal delete\", node);\n }\n } else {\n _SetError(self, \"Illegal delete\", node);\n return UpFailure;\n }\n } else {\n if (!n->operand && _IsChannelOp(n->op)) {\n SAFECALL(_GetName(self, node, \"__GENERATOR__\"));\n } else {\n SAFECALL(_CompileSyntax(self, n->operand, false));\n }\n\n switch (n->op) {\n case UpPositiveOp: {\n UpSymbol symbol = UpGetSymbol(\"op:positive\");\n _NewOp2(self, node, UpInstructionCallProperty, 0, symbol);\n break;\n } case UpNegativeOp: {\n UpSymbol symbol = UpGetSymbol(\"op:negative\");\n _NewOp2(self, node, UpInstructionCallProperty, 0, symbol);\n break;\n } case UpInOp: {\n UpSymbol symbol = UpGetSymbol(\"op:in\");\n _NewOp2(self, node, UpInstructionCallProperty, 0, symbol);\n break;\n } case UpNotOp: {\n UpSymbol symbol = UpGetSymbol(\"op:not\");\n _NewOp2(self, node, UpInstructionCallProperty, 0, symbol);\n break;\n }\n case UpReadOp: {\n _NewOp(self, node, UpInstructionRead);\n break;\n }\n case UpWriteOp: {\n _NewOp(self, node, UpInstructionPop);\n _NewOp1v(self, node, UpInstructionLoadObject, UpUndefined());\n break;\n } default: {\n _SetError(self, \"Illegal operator\", node);\n return UpFailure;\n }\n }\n }\n break;\n }\n case UpImportSyntaxType: {\n UpImportSyntax* n = (UpImportSyntax*)node;\n\n UpSetSyntax* names = (UpSetSyntax*)n->names;\n\n for (UpSyntaxItem* item = names->first; item; item = item->next) {\n const char* importName = _GetImportName(self, names->first, item);\n UpSymbol fullName = UpGetSymbol(importName);\n _NewOp1(self, node, UpInstructionImport, fullName);\n if (n->wildcard && item->next) {\n _NewOp(self, node, UpInstructionPop);\n } else if (!n->wildcard && item != names->first) {\n _NewOp(self, node, UpInstructionPop);\n }\n }\n\n if (n->wildcard) {\n SAFECALL(_SetExpansion(self, node));\n } else {\n UpIdSyntax* firstName = (UpIdSyntax*)names->first->value;\n SAFECALL(_SetName(self, names->first->value, firstName->name));\n }\n break;\n }\n case UpCallSyntaxType: {\n UpCallSyntax* n = (UpCallSyntax*)node;\n int argsCount = _CountList((UpSetSyntax*)n->args);\n // if (n->schedule == UpCallConcurrent) {\n // SAFECALL(_CompileSyntax(self, n->args, false));\n // SAFECALL(_CompileSyntax(self, n->callable, false));\n // _NewOp1(self, node, UpInstructionSchedule, argsCount);\n // } else if (n->schedule == UpCallParallel) {\n // _SetError(self, \"Illegal operator\", node);\n // return UpFailure;\n // } else {\n if (n->callable->type == UpPropertySyntaxType) {\n UpPropertySyntax* prop = (UpPropertySyntax*)n->callable;\n\n SAFECALL(_CompileSyntax(self, n->args, false));\n SAFECALL(_CompileSyntax(self, prop->left, false));\n UpSymbol symbol = UpGetSymbol(prop->right);\n _NewOp2(self, node, UpInstructionCallProperty, argsCount, symbol);\n } else {\n SAFECALL(_CompileSyntax(self, n->args, false));\n SAFECALL(_CompileSyntax(self, n->callable, false));\n _NewOp1(self, node, UpInstructionCall, argsCount);\n }\n // }\n // if (statement) {\n // _NewOp(self, node, UpInstructionPop);\n // }\n break;\n }\n case UpPropertySyntaxType: {\n UpPropertySyntax* n = (UpPropertySyntax*)node;\n SAFECALL(_CompileSyntax(self, n->left, false));\n UpSymbol symbol = UpGetSymbol(n->right);\n _NewOp1(self, node, UpInstructionGetProperty, symbol);\n break;\n }\n case UpIdSyntaxType: {\n UpIdSyntax* n = (UpIdSyntax*)node;\n SAFECALL(_GetName(self, node, n->name));\n // if (statement) {\n // _NewOp(self, node, UpInstructionPop);\n // }\n break;\n }\n case UpRangeSyntaxType: {\n UpRangeSyntax* n = (UpRangeSyntax*)node;\n SAFECALL(_CompileSyntax(self, n->from, false));\n SAFECALL(_CompileSyntax(self, n->to, false));\n SAFECALL(_CompileSyntax(self, n->by, false));\n\n SAFECALL(_GetName(self, node, \"range\"));\n _NewOp1(self, node, UpInstructionCall, 3);\n break;\n }\n case UpUndefinedSyntaxType: {\n _NewOp1v(self, node, UpInstructionLoadObject, UpUndefined());\n break;\n }\n case UpIntSyntaxType: {\n UpNumberSyntax* num = (UpNumberSyntax*)node;\n\n if (num->unit || (num->value.i >= -3 && num->value.i <= 100)) {\n UpSymbol unit = UpGetSymbol(num->unit);\n UpInteger* n = UpIntegerCreateWithUnit(num->value.i, unit);\n _NewOp1v(self, node, UpInstructionLoadObject, n);\n } else {\n _NewOp1(self, node, UpInstructionLoadInteger, num->value.i);\n UpBufferWriteInt(self->frame->funcDef->ops, 0);\n }\n break;\n }\n case UpLongSyntaxType: {\n UpNumberSyntax* num = (UpNumberSyntax*)node;\n\n if (num->unit || (num->value.l >= -3 && num->value.l <= 100)) {\n UpSymbol unit = UpGetSymbol(num->unit);\n UpLong* n = UpLongCreateWithUnit(num->value.l, unit);\n _NewOp1v(self, node, UpInstructionLoadObject, n);\n } else {\n _NewOp1ll(self, node, UpInstructionLoadLong, num->value.l);\n }\n break;\n }\n case UpFloatSyntaxType: {\n UpNumberSyntax* num = (UpNumberSyntax*)node;\n\n if (num->unit || (num->value.f == 0 || num->value.f == 1 || num->value.f == -0)) {\n UpSymbol unit = UpGetSymbol(num->unit);\n UpFloat* n = UpFloatCreateWithUnit(num->value.f, unit);\n _NewOp1v(self, node, UpInstructionLoadObject, n);\n } else {\n _NewOp1f(self, node, UpInstructionLoadFloat, num->value.f);\n }\n break;\n }\n case UpStringSyntaxType: {\n UpStringSyntax* str = (UpStringSyntax*)node;\n UpString* n = UpStringCreate(str->value);\n _NewOp1v(self, node, UpInstructionLoadObject, n);\n break;\n }\n case UpListSyntaxType: {\n UpSyntax1* list = (UpSyntax1*)node;\n int itemCount = 0;\n if (!list->value) {\n\n } else if (list->value->type != UpSetSyntaxType) {\n ++itemCount;\n SAFECALL(_CompileSyntax(self, list->value, false));\n } else {\n UpSetSyntax* set = (UpSetSyntax*)list->value;\n for (UpSyntaxItem* item = set->first; item; item = item->next) {\n SAFECALL(_CompileSyntax(self, item->value, false));\n ++itemCount;\n }\n }\n _NewOp1(self, node, UpInstructionNewList, itemCount);\n break;\n }\n case UpMapSyntaxType: {\n UpSyntax1* map = (UpSyntax1*)node;\n _NewOp(self, node, UpInstructionNewMap);\n\n int frameIndex, localIndex;\n _DeclareLocal(self, NULL, &frameIndex, &localIndex, true);\n _NewOp2(self, node, UpInstructionSetLocal, frameIndex, localIndex);\n\n if (!map->value) {\n\n } else if (map->value->type != UpSetSyntaxType) {\n SAFECALL(_CompileMapItem(self, map->value, frameIndex, localIndex));\n } else {\n UpSetSyntax* set = (UpSetSyntax*)map->value;\n for (UpSyntaxItem* item = set->first; item; item = item->next) {\n SAFECALL(_CompileMapItem(self, item->value, frameIndex, localIndex));\n }\n }\n break;\n }\n case UpChannelSyntaxType: {\n // UpSyntax1* channel = (UpSyntax1*)node;\n\n SAFECALL(_GetName(self, node, \"Channel\"));\n _NewOp1(self, node, UpInstructionCall, 0);\n break;\n }\n case UpReturnSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n if (self->frame->isGenerator) {\n SAFECALL(_CompileCloseGenerator(self));\n }\n SAFECALL(_CompileSyntax(self, n->value, false));\n _NewOp(self, node, UpInstructionReturn);\n ++self->frame->didReturn;\n break;\n }\n case UpBreakSyntaxType: {\n if (!self->frame->loop) {\n _SetError(self, \"break without a loop\", node);\n return UpFailure;\n }\n int jumpCursor = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp1(self, node, UpInstructionJump, -1);\n\n UpArray* breaks = self->frame->loop->breaks;\n if (!breaks) {\n breaks = UpArrayCreate(UpGetHeap(), sizeof(int));\n self->frame->loop->breaks = breaks;\n }\n UpArrayAppend(breaks, &jumpCursor);\n break;\n }\n case UpContinueSyntaxType: {\n if (!self->frame->loop) {\n _SetError(self, \"continue without a loop\", node);\n return UpFailure;\n }\n\n int jumpCursor = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp1(self, node, UpInstructionJump, -1);\n\n UpArray* continues = self->frame->loop->continues;\n if (!continues) {\n continues = UpArrayCreate(UpGetHeap(), sizeof(int));\n self->frame->loop->continues = continues;\n }\n UpArrayAppend(continues, &jumpCursor);\n break;\n }\n case UpThrowSyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n SAFECALL(_CompileSyntax(self, n->value, false));\n _NewOp(self, node, UpInstructionThrow);\n ++self->frame->didReturn;\n break;\n }\n // case UpClassSyntaxType: {\n // UpClassSyntax* n = (UpClassSyntax*)node;\n //\n // const char* className = NULL;\n // SAFECALL(_NameForClass(self, n->name, &className));\n //\n // UpClassDef* classDef;\n // SAFECALL(_CompileClassDef(self, className, n->sub, n->body, false, &classDef, NULL,\n // NULL));\n //\n // UpScope* scope = self->frame->scope;\n // if (!scope->classDefs) {\n // scope->classDefs = UpArrayCreate(UpGetHeap(), sizeof(UpClassDef*));\n // }\n // UpArrayAppend(scope->classDefs, &classDef);\n //\n // if (self->isCompilingUpDotUp) {\n // UpClass* builtinClass = _GetBuiltinClass(self, className);\n // if (builtinClass) {\n // builtinClass->def = classDef;\n // _NewOp1v(self, node, UpInstructionBindClass, builtinClass);\n // } else {\n // _NewOp1v(self, node, UpInstructionNewClass, classDef);\n // }\n // } else {\n // _NewOp1v(self, node, UpInstructionNewClass, classDef);\n // }\n //\n // SAFECALL(_SetName(self, node, className));\n // break;\n // }\n case UpFunctionSyntaxType: {\n UpFunctionSyntax* n = (UpFunctionSyntax*)node;\n\n const char* funcName = NULL;\n if (statement) {\n SAFECALL(_NameForFunction(self, n->head, &funcName));\n }\n\n UpFunctionDef* def;\n SAFECALL(_CompileFunctionDef(self, funcName, n->head, n->body, !statement,\n n->isGenerator, n->isExpression, false, &def));\n\n _NewOp1v(self, node, UpInstructionNewFunction, def);\n\n if (funcName) {\n SAFECALL(_SetName(self, node, funcName));\n }\n\n break;\n }\n case UpCFunctionSyntaxType: {\n UpCFunctionSyntax* n = (UpCFunctionSyntax*)node;\n\n UpCFunctionDef* def;\n SAFECALL(_CompileCFunctionDef(self, n->name, n->library, n->returns,\n (UpSetSyntax*)n->args, &def));\n _NewOp1v(self, node, UpInstructionNewCFunction, def);\n\n if (statement) {\n SAFECALL(_SetName(self, node, n->name));\n }\n break;\n }\n case UpIfSyntaxType: {\n UpIfSyntax* n = (UpIfSyntax*)node;\n int didReturn = self->frame->didReturn;\n\n SAFECALL(_CompileSyntax(self, n->condition, false));\n int jumpToElseCursor = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp1(self, node, UpInstructionJumpIfNot, -1);\n _NewOp(self, node, UpInstructionPop);\n\n SAFECALL(_CompileSyntax(self, n->transforms, statement));\n int jumpAfterThenCursor = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp1(self, node, UpInstructionJump, -1);\n\n int elseCursor = UpBufferCursor(self->frame->funcDef->ops);\n SAFECALL(_CompileSyntax(self, n->elsex, statement));\n\n int endCursor = UpBufferCursor(self->frame->funcDef->ops);\n\n UpBufferSeek(self->frame->funcDef->ops, jumpToElseCursor);\n _NewOp1(self, node, UpInstructionJumpIfNot, elseCursor);\n UpBufferSeek(self->frame->funcDef->ops, jumpAfterThenCursor);\n _NewOp1(self, node, UpInstructionJump, endCursor);\n\n UpBufferSeek(self->frame->funcDef->ops, endCursor);\n\n if (self->frame->didReturn - didReturn > 1) {\n self->frame->didReturn = didReturn+1;\n } else {\n self->frame->didReturn = didReturn;\n }\n break;\n }\n case UpWhileSyntaxType: {\n UpWhileSyntax* n = (UpWhileSyntax*)node;\n _PushLoop(self);\n\n int startCursor = UpBufferCursor(self->frame->funcDef->ops);\n SAFECALL(_CompileSyntax(self, n->condition, false));\n\n int jumpCursor = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp1(self, n->condition, UpInstructionJumpIfNot, -1);\n\n SAFECALL(_CompileSyntax(self, n->body, statement));\n _NewOp1(self, n->condition, UpInstructionJump, startCursor);\n\n int endCursor = UpBufferCursor(self->frame->funcDef->ops);\n\n UpBufferSeek(self->frame->funcDef->ops, jumpCursor);\n _NewOp1(self, n->condition, UpInstructionJumpIfNot, endCursor);\n\n UpBufferSeek(self->frame->funcDef->ops, endCursor);\n _PopLoop(self, startCursor, endCursor, false);\n break;\n }\n case UpIteratorSyntaxType: {\n UpIteratorSyntax* n = (UpIteratorSyntax*)node;\n if (n->isMapper) {\n _SetError(self, \"Map shorthand NYI\", node);\n return UpFailure;\n } else if (statement) {\n if (n->isOn) {\n SAFECALL(_CompileSubscriber(self, n));\n } else {\n SAFECALL(_CompileIterator(self, n, true));\n }\n } else {\n if (!n->iterable) {\n _SetError(self, \"Iterable is required for generator expressions.\", node);\n return UpFailure;\n } else if (n->isOn) {\n SAFECALL(_CompileFunctionBody(self, \"generator\", node, true, true, true));\n } else {\n SAFECALL(_CompileFunctionBody(self, \"generator\", node, true, true, false));\n }\n }\n break;\n }\n case UpTrySyntaxType: {\n UpTrySyntax* n = (UpTrySyntax*)node;\n int didReturn = self->frame->didReturn;\n\n int tryCursor = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp2(self, node, UpInstructionBeginTry, -1, -1);\n SAFECALL(_CompileSyntax(self, n->tryBlock, statement));\n _NewOp(self, node, UpInstructionEndTry);\n\n int catchCursor = -1;\n UpArray* catchJumpCursors = NULL;\n if (n->catchBlocks) {\n catchCursor = UpBufferCursor(self->frame->funcDef->ops);\n\n UpSetSyntax* catchBlocks = (UpSetSyntax*)n->catchBlocks;\n catchJumpCursors = UpArrayCreate(UpGetHeap(), sizeof(int));\n\n for (UpSyntaxItem* item = catchBlocks->first; item; item = item->next) {\n UpCatchSyntax* catchBlock = (UpCatchSyntax*)item->value;\n\n // if (catchBlock->excType) {\n // NYI\n // }\n\n _NewOp(self, (UpSyntax*)catchBlock, UpInstructionCatch);\n\n // if (catchBlock->excName) {\n // SAFECALL(_SetName(self, item->value, catchBlock->excName));\n // }\n SAFECALL(_CompileSyntax(self, catchBlock->statements, statement));\n\n if (item->next) {\n int catchJumpCursor = UpBufferCursor(self->frame->funcDef->ops);\n UpArrayAppend(catchJumpCursors, &catchJumpCursor);\n _NewOp1(self, node, UpInstructionJump, -1);\n }\n }\n }\n\n int finallyCursor = UpBufferCursor(self->frame->funcDef->ops);\n SAFECALL(_CompileSyntax(self, n->finallyBlock, statement));\n _NewOp(self, node, UpInstructionFinally);\n int endCursor = UpBufferCursor(self->frame->funcDef->ops);\n\n if (catchJumpCursors) {\n for (int i = 0; i < UpArrayCount(catchJumpCursors); ++i) {\n int catchJumpCursor;\n UpArrayGet(catchJumpCursors, i, &catchJumpCursor);\n UpBufferSeek(self->frame->funcDef->ops, catchJumpCursor);\n _NewOp1(self, node, UpInstructionJump, finallyCursor);\n }\n }\n\n UpBufferSeek(self->frame->funcDef->ops, tryCursor);\n _NewOp2(self, node, UpInstructionBeginTry, catchCursor, finallyCursor);\n\n UpBufferSeek(self->frame->funcDef->ops, endCursor);\n\n if (self->frame->didReturn - didReturn > 1) {\n self->frame->didReturn = didReturn+1;\n } else {\n self->frame->didReturn = didReturn;\n }\n\n break;\n }\n case UpFinallySyntaxType: {\n UpSyntax1* n = (UpSyntax1*)node;\n SAFECALL(_CompileSyntax(self, n->value, statement));\n break;\n }\n default:\n break;\n }\n\n return UpSuccess;\n}\n\nstatic UpStatus _CompileType(UpCompiler* self, UpSyntax* typeSig, UpType** outType) {\n return UpSuccess;\n}\n\nstatic UpStatus _NameForClass(UpCompiler* self, UpSyntax* sig, const char** out) {\n switch (sig->type) {\n case UpIdSyntaxType: {\n UpIdSyntax* id = (UpIdSyntax*)sig;\n if (strcmp(id->name, \"class\") == 0 || strcmp(id->name, \"trait\") == 0) {\n _SetError(self, \"Illegal class name\", sig);\n return UpFailure;\n }\n *out = id->name;\n return UpSuccess;\n }\n case UpTypeSyntaxType: {\n // UpTypeSyntax* type = (UpTypeSyntax*)sig;\n // if (strcmp(type->name, \"class\") == 0 || strcmp(type->name, \"trait\") == 0) {\n // _SetError(self, \"Illegal class name\", sig);\n // return UpFailure;\n // }\n // *out = type->name;\n return UpSuccess;\n }\n default: {\n _SetError(self, \"Illegal class declaration\", sig);\n return UpFailure;\n }\n }\n}\n\nstatic UpStatus _NameForFunction(UpCompiler* self, UpSyntax* sig, const char** out) {\n if (!sig) {\n *out = NULL;\n return UpSuccess;\n }\n switch (sig->type) {\n case UpIdSyntaxType: {\n UpIdSyntax* id = (UpIdSyntax*)sig;\n *out = UpArenaFormatString(UpGetHeap(), \"%s\", id->name);\n return UpSuccess;\n }\n case UpCastSyntaxType: {\n UpCastSyntax* cast = (UpCastSyntax*)sig;\n return _NameForFunction(self, cast->expr, out);\n }\n case UpAssignmentSyntaxType: {\n UpAssignmentSyntax* assign = (UpAssignmentSyntax*)sig;\n if (assign->left->type == UpIdSyntaxType) {\n if (assign->right->type == UpBinarySyntaxType) {\n UpBinarySyntax* binary = (UpBinarySyntax*)assign->right;\n if (binary->left->type == UpIdSyntaxType) {\n UpIdSyntax* left = (UpIdSyntax*)binary->left;\n if (strcmp(left->name, \"this\") == 0) {\n return _NameForFunction(self, (UpSyntax*)binary, out);\n } else {\n _SetError(self, \"Operator overrides must have 'this' on the left\", sig);\n return UpFailure;\n }\n }\n }\n\n UpIdSyntax* left = (UpIdSyntax*)assign->left;\n *out = UpArenaFormatString(UpGetHeap(), \"set:%s\", left->name);\n return UpSuccess;\n } else if (assign->left->type == UpBinarySyntaxType) {\n UpBinarySyntax* binary = (UpBinarySyntax*)assign->left;\n if (binary->left->type == UpIdSyntaxType) {\n UpIdSyntax* id = (UpIdSyntax*)binary->left;\n if (strcmp(id->name, \"this\") == 0) {\n if (binary->op == UpIndexOp) {\n *out = \"op:[]=\";\n return UpSuccess;\n } else if (binary->op == UpSliceOp) {\n *out = \"op:[to]=\";\n return UpSuccess;\n } else if (binary->op == UpLookupOp) {\n *out = \"op:.[]=\";\n return UpSuccess;\n }\n } else {\n _SetError(self, \"Operator overrides must have 'this' on the left\", sig);\n return UpFailure;\n }\n }\n }\n }\n case UpCallSyntaxType: {\n UpCallSyntax* call = (UpCallSyntax*)sig;\n if (call->callable->type == UpIdSyntaxType) {\n UpIdSyntax* callable = (UpIdSyntax*)call->callable;\n if (strcmp(callable->name, \"this\") == 0) {\n *out = \"op:call\";\n } else {\n *out = callable->name;\n }\n return UpSuccess;\n }\n }\n case UpUnarySyntaxType: {\n UpUnarySyntax* unary = (UpUnarySyntax*)sig;\n if (unary->operand->type == UpIdSyntaxType) {\n UpIdSyntax* id = (UpIdSyntax*)unary->operand;\n if (strcmp(id->name, \"this\") == 0) {\n const char* opName = UpGetOperatorOverrideName(unary->op);\n if (opName) {\n *out = opName;\n return UpSuccess;\n } else {\n _SetError(self, \"Can't override operator\", sig);\n return UpFailure;\n }\n } else {\n _SetError(self, \"Operator overrides must have 'this' on the left\", sig);\n return UpFailure;\n }\n } else if (unary->operand->type == UpBinarySyntaxType) {\n if (unary->op == UpDeleteOp) {\n UpBinarySyntax* binary = (UpBinarySyntax*)unary->operand;\n if (binary->op == UpIndexOp) {\n *out = \"op:-=[]\";\n return UpSuccess;\n } else if (binary->op == UpSliceOp) {\n *out = \"op:-=[to]\";\n return UpSuccess;\n } else if (binary->op == UpLookupOp) {\n *out = \"op:-=.[]\";\n return UpSuccess;\n } else {\n _SetError(self, \"Can't override operator\", sig);\n return UpFailure;\n }\n }\n }\n }\n case UpBinarySyntaxType: {\n UpBinarySyntax* binary = (UpBinarySyntax*)sig;\n UpSyntax* thisOp = binary->op == UpIsInOp ? binary->right : binary->left;\n if (thisOp->type == UpIdSyntaxType) {\n UpIdSyntax* id = (UpIdSyntax*)thisOp;\n if (strcmp(id->name, \"this\") == 0) {\n const char* opName = UpGetOperatorOverrideName(binary->op);\n if (opName) {\n *out = opName;\n return UpSuccess;\n } else {\n _SetError(self, \"Can't override operator\", sig);\n return UpFailure;\n }\n } else {\n _SetError(self, \"Operator overrides must have 'this' on the left\", sig);\n return UpFailure;\n }\n }\n }\n default: {\n _SetError(self, \"Illegal function declaration\", sig);\n return UpFailure;\n }\n }\n}\n\nstatic UpStatus _CompileArgumentOps(UpCompiler* self, UpSyntax* node, const char* name,\n UpSyntax* defaultValue) {\n int localIndex;\n _DeclareLocal(self, name, NULL, &localIndex, false);\n\n if (defaultValue) {\n int jumpCursor = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp1(self, defaultValue, UpInstructionJumpIfDefined, -1);\n\n SAFECALL(_CompileSyntax(self, defaultValue, false));\n\n int endCursor = UpBufferCursor(self->frame->funcDef->ops);\n UpBufferSeek(self->frame->funcDef->ops, jumpCursor);\n _NewOp1(self, defaultValue, UpInstructionJumpIfDefined, endCursor);\n\n UpBufferSeek(self->frame->funcDef->ops, endCursor);\n }\n\n _NewOp1(self, node, UpInstructionSetArgument, localIndex);\n\n ++self->frame->funcDef->argsCount;\n\n return UpSuccess;\n}\n\nstatic UpStatus _CompileArgument(UpCompiler* self, UpSyntax* argSyntax, UpArray* argDefs,\n UpSyntax* defaultValue) {\n switch (argSyntax->type) {\n case UpIdSyntaxType: {\n UpIdSyntax* id = (UpIdSyntax*)argSyntax;\n\n UpArgumentDef* def = UpArenaNew(UpGetHeap(), UpArgumentDef);\n def->name = UpGetSymbol(id->name);\n UpArrayAppend(argDefs, &def);\n\n _CompileArgumentOps(self, argSyntax, id->name, defaultValue);\n break;\n }\n case UpAssignmentSyntaxType: {\n UpAssignmentSyntax* assign = (UpAssignmentSyntax*)argSyntax;\n return _CompileArgument(self, assign->left, argDefs, assign->right);\n }\n case UpCastSyntaxType: {\n UpCastSyntax* cast = (UpCastSyntax*)argSyntax;\n if (cast->expr->type == UpIdSyntaxType) {\n UpIdSyntax* id = (UpIdSyntax*)cast->expr;\n\n UpArgumentDef* def = UpArenaNew(UpGetHeap(), UpArgumentDef);\n def->name = UpGetSymbol(id->name);\n UpArrayAppend(argDefs, &def);\n\n _CompileArgumentOps(self, cast->expr, id->name, defaultValue);\n break;\n }\n }\n case UpDefaultSyntaxType: {\n UpSyntax2* defaults = (UpSyntax2*)argSyntax;\n SAFECALL(_CompileArgument(self, defaults->value, argDefs, NULL));\n SAFECALL(_CompileArgument(self, defaults->value2, argDefs, NULL));\n break;\n }\n case UpRangeSyntaxType: {\n UpRangeSyntax* range = (UpRangeSyntax*)argSyntax;\n SAFECALL(_CompileArgument(self, range->from, argDefs, NULL));\n SAFECALL(_CompileArgument(self, range->to, argDefs, NULL));\n SAFECALL(_CompileArgument(self, range->by, argDefs, NULL));\n break;\n }\n case UpGroupSyntaxType: {\n UpSyntax1* group = (UpSyntax1*)argSyntax;\n SAFECALL(_CompileArgument(self, group->value, argDefs, NULL));\n break;\n }\n case UpUndefinedSyntaxType: {\n break;\n }\n default: {\n _SetError(self, \"Illegal argument declaration\", argSyntax);\n return UpFailure;\n }\n }\n\n return UpSuccess;\n}\n\nstatic UpStatus _CompileArguments(UpCompiler* self, UpSyntax* sig, UpCompilerFrame* outerFrame) {\n UpFunctionDef* funcDef = self->frame->funcDef;\n\n if (sig->type == UpCastSyntaxType) {\n UpCastSyntax* cast = (UpCastSyntax*)sig;\n sig = cast->expr;\n\n UpStatus result = _CompileType(self, cast->typeSig, &funcDef->returnType);\n if (!result) {\n return result;\n }\n }\n\n UpArray* argDefs = UpArrayCreate(UpGetHeap(), sizeof(UpArgumentDef*));\n funcDef->arguments = argDefs;\n\n UpSyntaxType type = sig ? sig->type : UpNoSyntaxType;\n switch (type) {\n case UpIdSyntaxType:\n case UpCastSyntaxType:\n case UpTypeSyntaxType: {\n if (!outerFrame->classDef) {\n _SetError(self, \"Illegal getter outside class\", sig);\n return UpFailure;\n }\n }\n case UpAssignmentSyntaxType: {\n if (!outerFrame->classDef) {\n _SetError(self, \"Illegal setter outside class\", sig);\n return UpFailure;\n }\n\n UpAssignmentSyntax* assign = (UpAssignmentSyntax*)sig;\n if (assign->left->type == UpIdSyntaxType) {\n if (assign->right->type == UpBinarySyntaxType) {\n UpBinarySyntax* binary = (UpBinarySyntax*)assign->right;\n if (binary->left->type == UpIdSyntaxType) {\n UpIdSyntax* left = (UpIdSyntax*)binary->left;\n if (strcmp(left->name, \"this\") == 0) {\n if (_CompileArgument(self, binary->right, argDefs, NULL)) {\n break;\n } else {\n return UpFailure;\n }\n }\n }\n } else if (_CompileArgument(self, assign->right, argDefs, NULL)) {\n break;\n } else {\n return UpFailure;\n }\n } else if (assign->left->type == UpBinarySyntaxType) {\n UpBinarySyntax* binary = (UpBinarySyntax*)assign->left;\n if (binary->left->type == UpIdSyntaxType) {\n UpIdSyntax* left = (UpIdSyntax*)binary->left;\n if (strcmp(left->name, \"this\") == 0) {\n if (binary->op == UpIndexOp) {\n SAFECALL(_CompileArgument(self, assign->right, argDefs, NULL));\n SAFECALL(_CompileArgument(self, binary->right, argDefs, NULL));\n break;\n } else if (binary->op == UpSliceOp) {\n SAFECALL(_CompileArgument(self, assign->right, argDefs, NULL));\n SAFECALL(_CompileArgument(self, binary->right, argDefs, NULL));\n break;\n } else if (binary->op == UpLookupOp) {\n SAFECALL(_CompileArgument(self, assign->right, argDefs, NULL));\n SAFECALL(_CompileArgument(self, binary->right, argDefs, NULL));\n break;\n }\n }\n }\n }\n }\n case UpCallSyntaxType: {\n UpCallSyntax* call = (UpCallSyntax*)sig;\n UpSetSyntax* args = (UpSetSyntax*)call->args;\n for (UpSyntaxItem* item = args->first; item; item = item->next) {\n if (!_CompileArgument(self, item->value, argDefs, NULL)) {\n return UpFailure;\n }\n }\n break;\n }\n case UpUnarySyntaxType: {\n UpUnarySyntax* unary = (UpUnarySyntax*)sig;\n if (unary->operand->type == UpIdSyntaxType) {\n UpIdSyntax* id = (UpIdSyntax*)unary->operand;\n if (strcmp(id->name, \"this\") == 0) {\n break;\n }\n } else if (unary->operand->type == UpBinarySyntaxType) {\n if (unary->op == UpDeleteOp) {\n UpBinarySyntax* binary = (UpBinarySyntax*)unary->operand;\n if (binary->op == UpIndexOp) {\n SAFECALL(_CompileArgument(self, binary->right, argDefs, NULL));\n break;\n } else if (binary->op == UpSliceOp) {\n SAFECALL(_CompileArgument(self, binary->right, argDefs, NULL));\n break;\n } else if (binary->op == UpLookupOp) {\n SAFECALL(_CompileArgument(self, binary->right, argDefs, NULL));\n break;\n }\n }\n }\n }\n case UpBinarySyntaxType: {\n UpBinarySyntax* binary = (UpBinarySyntax*)sig;\n UpSyntax* thisOp = binary->op == UpIsInOp ? binary->right : binary->left;\n UpSyntax* otherOp = binary->op == UpIsInOp ? binary->left : binary->right;\n if (thisOp->type == UpIdSyntaxType) {\n UpIdSyntax* id = (UpIdSyntax*)thisOp;\n if (strcmp(id->name, \"this\") == 0) {\n if (_CompileArgument(self, otherOp, argDefs, NULL)) {\n break;\n } else {\n return UpFailure;\n }\n }\n }\n }\n default: {\n _SetError(self, \"Illegal function declaration\", sig);\n return UpFailure;\n }\n }\n return UpSuccess;\n}\n\nstatic UpStatus _CompileLambdaArguments(UpCompiler* self, UpSyntax* args, UpArray* argDefs) {\n if (!args) return UpSuccess;\n\n switch (args->type) {\n case UpIdSyntaxType: {\n _CompileArgument(self, args, argDefs, NULL);\n break;\n }\n case UpSetSyntaxType: {\n UpSetSyntax* set = (UpSetSyntax*)args;\n for (UpSyntaxItem* item = set->first; item; item = item->next) {\n if (!_CompileArgument(self, item->value, argDefs, NULL)) {\n return UpFailure;\n }\n }\n break;\n }\n case UpGroupSyntaxType: {\n UpSyntax1* group = (UpSyntax1*)args;\n _CompileLambdaArguments(self, group->value, argDefs);\n break;\n }\n default: {\n _SetError(self, \"Illegal arguments to anonymous function\", args);\n return UpFailure;\n }\n }\n return UpSuccess;\n}\n\nstatic UpSymbol _GetClassProbeId(UpCompiler* self, const char* name, const char* moduleName) {\n const char* s = moduleName\n ? UpArenaFormatString(UpGetHeap(), \"%s:%s\", name, moduleName)\n : name;\n return UpGetSymbol(s);\n}\n\nstatic void _MakeClassDef(UpCompiler* self, const char* name, UpSyntax* ast, UpArray* members,\n UpIntTable* getters, UpIntTable* setters, UpFunctionDef* constructorDef,\n UpClassDef** ret) {\n UpClassDef* def = UpArenaNew(UpGetHeap(), UpClassDef);\n UP_SET_PROBE_ID(def, _GetClassProbeId(self, name, self->moduleName));\n def->name = UpArenaCopyString(UpGetHeap(), name);\n def->ast = ast;\n def->sourcePath = self->sourcePath;\n def->moduleName = self->moduleName;\n def->members = members;\n def->getters = getters;\n def->setters = setters;\n def->constructorDef = constructorDef;\n\n if (ret) {\n *ret = def;\n }\n}\n\nstatic UpStatus _CompileConstructor(UpCompiler* self, UpFunctionSyntax* constructor,\n const char* typeName, UpIntTable* getters, UpFunctionDef** outDef) {\n if (constructor) {\n SAFECALL(_PushFunction(self, typeName, constructor->head, constructor->body,\n false, false, constructor->isExpression, true));\n } else {\n SAFECALL(_PushFunction(self, typeName, NULL, NULL, false, false, false, true));\n }\n\n SAFECALL(_GetName(self, (UpSyntax*)constructor, \"this\"));\n _NewOp(self, NULL, UpInstructionReturn);\n ++self->frame->didReturn;\n\n UpFunctionDef* funcDef = NULL;\n _PopFrame(self, &funcDef);\n\n UpGetterDef* getterDef = UpArenaNew(UpGetHeap(), UpGetterDef);\n getterDef->name = UpArenaCopyString(UpGetHeap(), typeName);\n getterDef->funcDef = funcDef;\n getterDef->isContextual = true;\n getterDef->isMember = true;\n\n UpSymbol propSymbol = UpGetSymbol(typeName);\n UpIntTableSet(getters, propSymbol, &getterDef);\n\n if (outDef) {\n *outDef = funcDef;\n }\n return UpSuccess;\n}\n\nstatic UpStatus _CompileProperty(UpCompiler* self, const char* name, UpSyntax* node,\n UpIntTable* getters, bool isExpr, bool isCached,\n UpFunctionDef** outDef) {\n SAFECALL(_PushFunction(self, name, NULL, node, false, false, isExpr, true));\n bool isContextual = self->frame->isContextual;\n\n if (isCached) {\n // Validate and return the property at the end of evaluator function\n SAFECALL(_GetName(self, node, \"this\"));\n UpSymbol symbol = UpGetSymbol(name);\n _NewOp1(self, NULL, UpInstructionValidateProperty, symbol);\n _NewOp(self, NULL, UpInstructionReturn);\n ++self->frame->didReturn;\n }\n\n UpFunctionDef* evalDef = NULL;\n _PopFrame(self, &evalDef);\n\n UpGetterDef* getterDef = UpArenaNew(UpGetHeap(), UpGetterDef);\n getterDef->name = UpArenaCopyString(UpGetHeap(), name);\n getterDef->funcDef = evalDef;\n getterDef->isContextual = isContextual;\n getterDef->isCached = isCached;\n\n UpSymbol propSymbol = UpGetSymbol(name);\n UpIntTableSet(getters, propSymbol, &getterDef);\n\n if (outDef) {\n *outDef = evalDef;\n }\n return UpSuccess;\n}\n\nstatic UpStatus _CompileMemberFunction(UpCompiler* self, const char* name, UpSyntax* sig,\n UpSyntax* body, bool isGenerator, bool isExpr,\n UpIntTable* getters, UpFunctionDef** outDef) {\n UpFunctionDef* def;\n SAFECALL(_CompileFunctionDef(self, name, sig, body, false, isGenerator, isExpr, true, &def));\n\n UpGetterDef* getterDef = UpArenaNew(UpGetHeap(), UpGetterDef);\n getterDef->name = UpArenaCopyString(UpGetHeap(), def->name);\n getterDef->funcDef = def;\n getterDef->isContextual = true;\n getterDef->isMember = true;\n\n UpSymbol propSymbol = UpGetSymbol(def->name);\n UpIntTableSet(getters, propSymbol, &getterDef);\n\n *outDef = def;\n return UpSuccess;\n}\n\nstatic UpStatus _CompilePropertyAssignment(UpCompiler* self, UpAssignmentSyntax* node,\n UpIntTable* getters) {\n if (node->left->type == UpIdSyntaxType) {\n UpIdSyntax* id = (UpIdSyntax*)node->left;\n SAFECALL(_CompileProperty(self, id->name, node->right, getters, false, true, NULL));\n } else if (node->left->type == UpPropertySyntaxType) {\n // UpPropertySyntax* prop = (UpPropertySyntax*)node->left;\n NYI\n } else {\n _SetError(self, \"Illegal assignment\", node->left);\n return UpFailure;\n }\n return UpSuccess;\n}\n\nstatic UpStatus _CompileMember(UpCompiler* self, UpSyntax* node, const char* typeName,\n UpArray* members, UpIntTable* getters, UpIntTable* setters,\n UpFunctionSyntax** constructor) {\n if (!node) {\n return UpSuccess;\n }\n\n switch (node->type) {\n case UpSetSyntaxType: {\n for (UpSyntaxItem* item = ((UpSetSyntax*)node)->first; item; item = item->next) {\n SAFECALL(_CompileMember(self, item->value, typeName, members, getters, setters,\n constructor));\n }\n break;\n }\n case UpDeclarationSyntaxType: {\n UpDeclarationSyntax* decl = (UpDeclarationSyntax*)node;\n return _CompileMember(self, decl->head, typeName, members, getters, setters,\n constructor);\n break;\n }\n case UpIdSyntaxType: {\n // Only nulls are allowed here\n if (strcmp(((UpIdSyntax*)node)->name, \"null\")) {\n _SetError(self, \"Illegal statement in class\", node);\n return UpFailure;\n }\n break;\n }\n case UpAssignmentSyntaxType: {\n UpAssignmentSyntax* assign = (UpAssignmentSyntax*)node;\n SAFECALL(_CompilePropertyAssignment(self, assign, getters));\n break;\n }\n case UpCallSyntaxType: {\n break;\n }\n case UpFunctionSyntaxType: {\n UpFunctionSyntax* fn = (UpFunctionSyntax*)node;\n\n if (fn->head->type == UpIdSyntaxType) {\n UpIdSyntax* id = (UpIdSyntax*)fn->head;\n\n UpFunctionDef* def;\n SAFECALL(_CompileProperty(self, id->name, fn->body, getters, fn->isExpression,\n false, &def));\n UpArrayAppend(members, &def);\n } else {\n if (fn->head->type == UpAssignmentSyntaxType) {\n UpAssignmentSyntax* assign = (UpAssignmentSyntax*)fn->head;\n if (assign->left->type == UpIdSyntaxType\n && assign->right->type != UpBinarySyntaxType) {\n UpIdSyntax* id = (UpIdSyntax*)assign->left;\n UpFunctionDef* def;\n SAFECALL(_CompileFunctionDef(self, id->name, fn->head, fn->body, false,\n fn->isGenerator, fn->isExpression, true, &def));\n\n UpSymbol propSymbol = UpGetSymbol(id->name);\n UpIntTableSet(setters, propSymbol, &def);\n\n UpArrayAppend(members, &def);\n break;\n }\n }\n\n const char* funcName = NULL;\n SAFECALL(_NameForFunction(self, fn->head, &funcName));\n\n if (strcmp(funcName, typeName) == 0) {\n *constructor = fn;\n } else {\n UpFunctionDef* def;\n SAFECALL(_CompileMemberFunction(self, funcName, fn->head, fn->body,\n fn->isGenerator, fn->isExpression, getters,\n &def));\n UpArrayAppend(members, &def);\n }\n }\n break;\n }\n // case UpClassSyntaxType: {\n // UpClassSyntax* cls = (UpClassSyntax*)node;\n // const char* name = NULL;\n // SAFECALL(_NameForClass(self, cls->name, &name));\n //\n // SAFECALL(_CompileProperty(self, name, node, getters, false, true, NULL));\n // break;\n // }\n case UpCFunctionSyntaxType: {\n UpCFunctionSyntax* n = (UpCFunctionSyntax*)node;\n SAFECALL(_CompileProperty(self, n->name, node, getters, false, true, NULL));\n break;\n }\n\n case UpImportSyntaxType: {\n SAFECALL(_CompileSyntax(self, node, true));\n break;\n }\n default: {\n _SetError(self, \"Illegal statement in class\", node);\n return UpFailure;\n }\n }\n return UpSuccess;\n}\n\nstatic UpStatus _CompileClassDef(UpCompiler* self, const char* typeName, UpSyntax* base,\n UpSyntax* body, bool isModule, UpClassDef** outDef,\n int* outFrameIndex, int* outLocalIndex) {\n UpArena* heap = UpGetHeap();\n UpFunctionSyntax* constructor = NULL;\n UpArray* members = UpArrayCreate(heap, sizeof(UpFunctionDef*));\n UpIntTable* getters = UpIntTableCreate(heap, sizeof(UpGetterDef*));\n UpIntTable* setters = UpIntTableCreate(heap, sizeof(UpFunctionDef*));\n\n UpClassDef* classDef;\n _MakeClassDef(self, typeName, body, members, getters, setters, NULL, &classDef);\n self->frame->classDef = classDef;\n BEGIN_COMPILER_CLASS(classDef->probeId);\n\n if (isModule) {\n int frameIndex, localIndex;\n _DeclareExpansion(self, classDef, NULL, body, &frameIndex, &localIndex);\n\n _NewOp1v(self, NULL, UpInstructionNewObject, classDef);\n _NewOp2(self, NULL, UpInstructionSetLocal, frameIndex, localIndex);\n\n UpSymbol moduleSymbol = UpGetSymbol(typeName);\n _NewOp1(self, NULL, UpInstructionSetImport, moduleSymbol);\n\n if (outFrameIndex) {\n *outFrameIndex = frameIndex;\n }\n if (outLocalIndex) {\n *outLocalIndex = localIndex;\n }\n }\n\n // Before compiling, declare all names so that they can be referenced out of order\n SAFECALL(_DeclareNames(self, body, true));\n SAFECALL(_CompileMember(self, body, typeName, members, getters, setters, &constructor));\n\n self->frame->classDef = NULL;\n\n UpFunctionDef* constructorDef = NULL;\n SAFECALL(_CompileConstructor(self, constructor, typeName, getters, &constructorDef));\n classDef->constructorDef = constructorDef;\n\n // _SetName(self, body, typeName);\n\n if (base) {\n if (base->type == UpIdSyntaxType) {\n UpIdSyntax* baseType = (UpIdSyntax*)base;\n SAFECALL(_GetName(self, base, baseType->name));\n } else if (base->type == UpTypeSyntaxType) {\n // UpTypeSyntax* baseType = (UpTypeSyntax*)base;\n // SAFECALL(_GetName(self, base, baseType->name));\n } else {\n _SetError(self, \"Invalid base class declaration\", base);\n return UpFailure;\n }\n } else {\n _NewOp1v(self, body, UpInstructionLoadObject, UpUndefined());\n }\n\n *outDef = classDef;\n\n END_COMPILER_CLASS(classDef->probeId);\n return UpSuccess;\n}\n\nstatic UpStatus _CompileFunctionHead(UpCompiler* self, const char* name, UpSyntax* sig, UpSyntax* body,\n bool isAnon, bool declareThis) {\n UpScope* scope = self->frame && !self->frame->classDef ? self->frame->scope : NULL;\n if (scope) {\n if (!scope->funcDefs) {\n scope->funcDefs = UpArrayCreate(UpGetHeap(), sizeof(UpFunctionDef*));\n }\n }\n\n UpCompilerFrame* outerFrame = self->frame;\n _PushFrame(self, name, body);\n\n if (scope) {\n UpArrayAppend(scope->funcDefs, &self->frame->funcDef);\n }\n\n if (declareThis) {\n int localIndex;\n _DeclareExpansion(self, self->frame->classDef, \"this\", body, NULL, &localIndex);\n self->frame->funcDef->thisIndex = localIndex;\n }\n\n if (sig) {\n if (isAnon) {\n UpArray* argDefs = UpArrayCreate(UpGetHeap(), sizeof(UpArgumentDef*));\n self->frame->funcDef->arguments = argDefs;\n SAFECALL(_CompileLambdaArguments(self, sig, argDefs));\n } else {\n SAFECALL(_CompileArguments(self, sig, outerFrame));\n }\n }\n return UpSuccess;\n}\n\nstatic UpStatus _CompileFunctionBody(UpCompiler* self, const char* name, UpSyntax* body,\n bool isGenerator, bool isExpression, bool isImmediate) {\n // Before compiling the function, look for all assignments so we can\n // register variable names and imported names, giving them numeric addresses\n // that can be linked to properly.\n SAFECALL(_DeclareNames(self, body, true));\n\n if (isGenerator) {\n SAFECALL(_CompileFunctionHead(self, name, NULL, body, false, false));\n self->frame->isGenerator = true;\n SAFECALL(_CompileArgumentOps(self, body, \"__GENERATOR__\", NULL));\n if (body->type == UpIteratorSyntaxType) {\n SAFECALL(_CompileIterator(self, (UpIteratorSyntax*)body, false));\n } else {\n SAFECALL(_CompileFunctionBody(self, name, body, false, isExpression, false));\n }\n UpFunctionDef* funcDef;\n _PopFrame(self, &funcDef);\n\n _NewOp2v(self, NULL, UpInstructionNewGenerator, funcDef, isImmediate);\n if (!isExpression) {\n _NewOp(self, NULL, UpInstructionReturn);\n ++self->frame->didReturn;\n }\n\n return UpSuccess;\n } else {\n if (isExpression && body && body->type == UpSetSyntaxType) {\n UpSetSyntax* set = (UpSetSyntax*)body;\n UpSyntaxItem* item = set->first;\n if (item) {\n SAFECALL(_CompileSyntax(self, item->value, false));\n item = item->next;\n }\n for (; item; item = item->next) {\n SAFECALL(_CompileSyntax(self, item->value, false));\n _NewOp1(self, item->value, UpInstructionCall, 1);\n }\n } else {\n SAFECALL(_CompileSyntax(self, body, !isExpression));\n }\n\n if (isExpression) {\n _NewOp(self, NULL, UpInstructionReturn);\n ++self->frame->didReturn;\n }\n return UpSuccess;\n }\n}\n\nstatic UpStatus _PushFunction(UpCompiler* self, const char* name, UpSyntax* sig, UpSyntax* body,\n bool isAnon, bool isGenerator, bool isExpression, bool declareThis) {\n SAFECALL(_CompileFunctionHead(self, name, sig, body, isAnon, declareThis));\n SAFECALL(_CompileFunctionBody(self, name, body, isGenerator, isExpression, false));\n return UpSuccess;\n}\n\nstatic UpStatus _CompileFunctionDef(UpCompiler* self, const char* name, UpSyntax* sig,\n UpSyntax* body, bool isAnon, bool isGenerator, bool isExpression,\n bool declareThis, UpFunctionDef** outDef) {\n SAFECALL(_PushFunction(self, name, sig, body, isAnon, isGenerator, isExpression, declareThis));\n\n UpFunctionDef* funcDef;\n _PopFrame(self, &funcDef);\n\n if (outDef) {\n *outDef = funcDef;\n }\n return UpSuccess;\n}\n\nstatic UpCPrimitive _CompileCPrimitive(UpCompiler* self, const char* name) {\n if (!strcmp(name, \"void\")) {\n return UpCVoidType;\n } else if (!strcmp(name, \"UpObject\")) {\n return UpCUpObjectType;\n } else if (!strcmp(name, \"UpContext\")) {\n return UpCUpContextType;\n } else if (!strcmp(name, \"bool\")) {\n return UpCBoolType;\n } else if (!strcmp(name, \"char\")) {\n return UpCCharType;\n } else if (!strcmp(name, \"unsigned char\")) {\n return UpCUCharType;\n } else if (!strcmp(name, \"short\")) {\n return UpCShortType;\n } else if (!strcmp(name, \"unsigned short\")) {\n return UpCUShortType;\n } else if (!strcmp(name, \"int\")) {\n return UpCIntType;\n } else if (!strcmp(name, \"unsigned int\")) {\n return UpCUIntType;\n } else if (!strcmp(name, \"long\")) {\n return UpCLongType;\n } else if (!strcmp(name, \"unsigned long\")) {\n return UpCULongType;\n } else if (!strcmp(name, \"long long\")) {\n return UpCLongLongType;\n } else if (!strcmp(name, \"unsigned long long\")) {\n return UpCULongLongType;\n } else if (!strcmp(name, \"float\")) {\n return UpCFloatType;\n } else if (!strcmp(name, \"double\")) {\n return UpCDoubleType;\n } else if (!strcmp(name, \"long double\")) {\n return UpCLongDoubleType;\n } else if (!strcmp(name, \"size_t\")) {\n return UpCIntType;\n } else {\n return UpCStructType;\n }\n}\n\nstatic UpStatus _CompileCType(UpCompiler* self, UpCTypeSyntax* type, UpCType** out) {\n UpCType* ctype = UpArenaNew(UpGetHeap(), UpCType);\n ctype->pointers = type->pointerCount;\n ctype->primitive = _CompileCPrimitive(self, type->name);\n // if (ctype->primitive == UpCUpObjectType) {\n ctype->name = UpArenaCopyString(UpGetHeap(), type->name);\n // }\n\n *out = ctype;\n return UpSuccess;\n}\n\nstatic UpStatus _CompileCFunctionDef(UpCompiler* self, const char* name, const char* library,\n UpSyntax* returns, UpSetSyntax* args,\n UpCFunctionDef** outDef) {\n UpCType* returnType;\n SAFECALL(_CompileCType(self, (UpCTypeSyntax*)returns, &returnType));\n\n int argumentCount = 0;\n UpCType* argumentTypes = NULL;\n if (args) {\n UpCType* lastArgumentType = NULL;\n for (UpSyntaxItem* item = args->first; item; item = item->next) {\n UpCArgumentSyntax* arg = (UpCArgumentSyntax*)item->value;\n UpCType* argumentType;\n SAFECALL(_CompileCType(self, (UpCTypeSyntax*)arg->type, &argumentType));\n if (!argumentTypes) {\n argumentTypes = lastArgumentType = argumentType;\n } else {\n lastArgumentType->next = argumentType;\n lastArgumentType = argumentType;\n }\n ++argumentCount;\n }\n }\n\n UpCFunctionDef* funcDef;\n SAFECALL(UpCFunctionDefCreate(name, library, returnType, argumentCount,\n argumentTypes, &funcDef));\n\n if (outDef) {\n *outDef = funcDef;\n }\n return UpSuccess;\n}\n\n// ************************************************************************************************\n\nUpCompiler* UpCompilerCreate() {\n UpCompiler* self = UpArenaNew(UpGetHeap(), UpCompiler);\n #ifdef UP_ENABLE_PROBES\n self->probeId = UpBufferCreateWithSize(UpGetHeap(), 100);\n #endif\n return self;\n}\n\nUpStatus UpCompilerCompileDeclarative(UpCompiler* self, UpSyntax* ast, const char* sourcePath,\n const char* moduleName, UpFunctionDef** outDef,\n UpClassDef** outClassDef) {\n self->isCompilingUpDotUp = !strcmp(moduleName, \"up\");\n self->sourcePath = sourcePath ? UpArenaCopyString(UpGetHeap(), sourcePath) : NULL;\n self->moduleName = moduleName ? UpArenaCopyString(UpGetHeap(), moduleName) : NULL;\n\n char* headName = NULL;\n char* tailName = NULL;\n char* constructorName = NULL;\n UpSymbol headSymbol = 0;\n UpSymbol tailSymbol = 0;\n UpSymbol constructorSymbol = 0;\n char* dotTail = strrchr(moduleName, '.');\n if (!dotTail) {\n headName = UpArenaCopyString(UpGetHeap(), moduleName);\n headSymbol = UpGetSymbol(headName);\n constructorName = headName;\n constructorSymbol = headSymbol;\n } else {\n headName = UpArenaCopyString(UpGetHeap(), moduleName);\n headName[dotTail-moduleName] = 0;\n tailName = headName + (dotTail-moduleName)+1;\n headSymbol = UpGetSymbol(headName);\n tailSymbol = UpGetSymbol(tailName);\n constructorName = tailName;\n constructorSymbol = tailSymbol;\n }\n\n _PushFrame(self, constructorName, ast);\n _DeclareBuiltin(self);\n\n // printf(\"headName %s tailName %s\\n\", headName, tailName); fflush(stdout);\n UpClassDef* classDef;\n int moduleFrameIndex, moduleLocalIndex;\n SAFECALL(_CompileClassDef(self, constructorName, NULL, ast, true, &classDef, &moduleFrameIndex,\n &moduleLocalIndex));\n\n // If the module name is like \"foo.bar\", assign the module to \"bar\" property of \"foo\"\n if (tailName) {\n _NewOp2(self, NULL, UpInstructionGetLocal, moduleFrameIndex, moduleLocalIndex);\n _NewOp1(self, NULL, UpInstructionImport, headSymbol);\n _NewOp1(self, NULL, UpInstructionSetProperty, tailSymbol);\n }\n\n _NewOp1v(self, NULL, UpInstructionLoadObject, UpStringCreate(sourcePath));\n _NewOp2(self, NULL, UpInstructionGetLocal, moduleFrameIndex, moduleLocalIndex);\n _NewOp1(self, NULL, UpInstructionSetProperty, UpGetSymbol(\"path\"));\n\n _NewOp1v(self, NULL, UpInstructionLoadObject, UpStringCreate(moduleName));\n _NewOp2(self, NULL, UpInstructionGetLocal, moduleFrameIndex, moduleLocalIndex);\n _NewOp1(self, NULL, UpInstructionSetProperty, UpGetSymbol(\"name\"));\n\n _NewOp2(self, NULL, UpInstructionGetLocal, moduleFrameIndex, moduleLocalIndex);\n int jumpCursor = UpBufferCursor(self->frame->funcDef->ops);\n _NewOp2(self, NULL, UpInstructionJumpIfHasNot, -1, constructorSymbol);\n _NewOp2(self, NULL, UpInstructionCallProperty, 0, constructorSymbol);\n _NewOp(self, NULL, UpInstructionReturn);\n\n int endCursor = UpBufferCursor(self->frame->funcDef->ops);\n UpBufferSeek(self->frame->funcDef->ops, jumpCursor);\n _NewOp2(self, NULL, UpInstructionJumpIfHasNot, endCursor, constructorSymbol);\n UpBufferSeek(self->frame->funcDef->ops, endCursor);\n\n _NewOp2(self, NULL, UpInstructionGetLocal, moduleFrameIndex, moduleLocalIndex);\n _NewOp(self, NULL, UpInstructionReturn);\n ++self->frame->didReturn;\n\n _PopFrame(self, outDef);\n *outClassDef = classDef;\n\n // printf(\"%s\\n\", UpBytecodeToString(*outDef)); fflush(stdout);\n return UpSuccess;\n}\n\nUpStatus UpCompilerCompileImperative(UpCompiler* self, UpSyntax* ast, const char* sourcePath,\n const char* moduleName, UpFunctionDef** outDef) {\n self->isCompilingUpDotUp = !strcmp(moduleName, \"up\");\n self->sourcePath = sourcePath ? UpArenaCopyString(UpGetHeap(), sourcePath) : NULL;\n self->moduleName = moduleName ? UpArenaCopyString(UpGetHeap(), moduleName) : NULL;\n\n _PushFrame(self, moduleName, ast);\n _DeclareBuiltin(self);\n SAFECALL(_DeclareNames(self, ast, true));\n SAFECALL(_CompileSyntax(self, ast, true));\n _PopFrame(self, outDef);\n\n // printf(\"%s\\n\", UpBytecodeToString(*outDef)); fflush(stdout);\n return UpSuccess;\n}\n\nUpStatus UpCompilerCompileEval(UpCompiler* self, UpSyntax* ast, UpFunctionDef* caller,\n UpFunctionDef** outDef) {\n self->sourcePath = \"<eval>\";\n self->moduleName = \"<eval>\";\n\n PUSH_DEBUG_ID(\"%s\", \"<eval>\");\n\n _PushFrame(self, caller->name, ast);\n\n self->frame->scope = caller->scope;\n self->frame->funcDef->scope = caller->scope;\n\n SAFECALL(_DeclareNames(self, ast, true));\n SAFECALL(_CompileSyntax(self, ast, true));\n\n // Return the module from ready function\n _NewOp(self, NULL, UpInstructionReturn);\n ++self->frame->didReturn;\n\n UpFunctionDef* evalDef;\n _PopFrame(self, &evalDef);\n\n *outDef = evalDef;\n\n // printf(\"%s\\n\", UpBytecodeToString(*outDef)); fflush(stdout);\n return UpSuccess;\n}\n" }, { "alpha_fraction": 0.6145063638687134, "alphanum_fraction": 0.6145063638687134, "avg_line_length": 24.672412872314453, "blob_id": "552898e272dbb83558e8082d682f9dadce547cf4", "content_id": "512241552be575b7aaf4126efeec409a07404cc4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1489, "license_type": "permissive", "max_line_length": 99, "num_lines": 58, "path": "/src/vm/include/UpCFunction.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPCFUNCTION_H\n#define UP_UPCFUNCTION_H\n\n#include \"Up/UpObject.h\"\n\ntypedef enum {\n UpCVoidType,\n UpCUpObjectType,\n UpCUpContextType,\n UpCStructType,\n UpCBoolType,\n UpCCharType,\n UpCUCharType,\n UpCShortType,\n UpCUShortType,\n UpCIntType,\n UpCUIntType,\n UpCLongType,\n UpCULongType,\n UpCLongLongType,\n UpCULongLongType,\n UpCFloatType,\n UpCDoubleType,\n UpCLongDoubleType,\n} UpCPrimitive;\n\nstruct UpCType {\n UpCPrimitive primitive;\n char* name;\n int pointers;\n UpCType* next;\n};\n\nstruct UpCFunction {\n UpObject __base;\n UpCFunctionDef* def;\n};\n\n// ************************************************************************************************\n\nUpCFunction* UpCFunctionCreate(UpCFunctionDef* def);\n\nUpCType* UpCTypeCreate(UpCPrimitive primitive, const char* name, int pointers);\nUpStatus UpCFunctionDefCreate(const char* name, const char* library,\n UpCType* returnType, int argumentCount, UpCType* argumentTypes,\n UpCFunctionDef** out);\n\nUpStatus UpCFunctionCall(UpCFunction* self, UpObject** args, UpObject** result);\n\nchar* UpCFunctionGetName(UpCFunction* self);\nbool UpCFunctionIsBound(UpCFunction* self);\n\n// ************************************************************************************************\n\nconst char* UpCFunctionDefGetName(UpCFunctionDef* self);\nint UpCFunctionDefGetArgumentCount(UpCFunctionDef* self);\n\n#endif // UP_UPCFUNCTION_H\n" }, { "alpha_fraction": 0.6964136958122253, "alphanum_fraction": 0.6964136958122253, "avg_line_length": 32.27777862548828, "blob_id": "f02044938123df83260a2d2e24d191ee6bae9e40", "content_id": "ff8f01d88e50cd9c48b8b167d38ea0364261806f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1199, "license_type": "permissive", "max_line_length": 100, "num_lines": 36, "path": "/src/vm/include/UpLong.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPLONG_H\n#define UP_UPLONG_H\n\n#include \"Up/UpObject.h\"\n\nstruct UpLong {\n UpObject __base;\n long long value;\n UpSymbol unit;\n};\n\n// *************************************************************************************************\n\nUpLong* UpLongCreate(long long value);\nUpLong* UpLongCreateWithUnit(long long value, UpSymbol unit);\n\nvoid UpLongInit(UpLong* self, const char* value, int base, const char* unit);\n\nconst char* UpLongToCString(UpLong* self, int base);\nUpString* UpLongToString(UpLong* self, int base);\n\nbool UpLongEquals(UpLong* self, UpObject* other);\nbool UpLongGreaterThan(UpLong* self, UpObject* other);\nbool UpLongGreaterThanEquals(UpLong* self, UpObject* other);\nbool UpLongLessThan(UpLong* self, UpObject* other);\nbool UpLongLessThanEquals(UpLong* self, UpObject* other);\n\nUpObject* UpLongAdd(UpLong* self, UpObject* other);\nUpObject* UpLongSubtract(UpLong* self, UpObject* other);\nUpObject* UpLongMultiply(UpLong* self, UpObject* other);\nUpObject* UpLongDivide(UpLong* self, UpObject* other);\nUpObject* UpLongMod(UpLong* self, UpObject* other);\nUpObject* UpLongPow(UpLong* self, UpObject* other);\nUpObject* UpLongNegate(UpLong* self);\n\n#endif // UP_UPLONG_H\n" }, { "alpha_fraction": 0.5167597532272339, "alphanum_fraction": 0.517690896987915, "avg_line_length": 28.01801872253418, "blob_id": "73a88837a6848bee223054055e194947a81ea90e", "content_id": "4da0ef5462107d0469ecfc3e3210bc1247bc1de4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3222, "license_type": "permissive", "max_line_length": 100, "num_lines": 111, "path": "/make/build/Resource.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nimport os.path, mmap, re, stat\nfrom ..util import rootProjectPath, exports\n\n# **************************************************************************************************\n\nresources = {}\n\nreInclude = re.compile(r\"\"\"#(?:include|import) (?:\"|<)(.*?(?:\\.h|\\.hpp|))(?:\"|>)\"\"\")\n\n# **************************************************************************************************\n\ndef res(path, name=None, stats=None, project=None):\n resource = resources.get(path)\n if not resource:\n resource = resources[path] = Resource(path, name, stats, project)\n\n if project:\n resource.project = project\n return resource\n\n# **************************************************************************************************\n\nclass Resource(object):\n _includes = None\n _dependencies = None\n\n def __init__(self, path, name=None, stats=None, project=None):\n self.path = path\n self.name = name or os.path.basename(path)\n self._stats = stats\n self.project = project\n\n def __repr__(self):\n return self.path\n\n def needsUpdate(self, target):\n \"\"\" Determines if the file is older than another file.\"\"\"\n\n return not target.stats or not self.stats or self.stats.st_mtime > target.stats.st_mtime\n\n def resetStats(self):\n \"\"\" Deletes cached stats so they will be read from the file again next time.\"\"\"\n\n self._stats = None\n\n @property\n def isdir(self):\n stats = self.stats\n if stats:\n return stat.S_ISDIR(stats.st_mode)\n\n @property\n def exists(self):\n return self.stats != 0\n\n @property\n def stats(self):\n if self._stats:\n return self._stats\n \n try:\n self._stats = os.stat(self.path)\n return self._stats\n except:\n self._stats = 0\n return 0\n\n @property\n def includes(self):\n if self._includes:\n return self._includes\n\n fd = os.open(self.path, os.O_RDONLY)\n text = mmap.mmap(fd, self.stats.st_size, access=mmap.ACCESS_READ)\n matches = re.findall(reInclude, text)\n os.close(fd)\n\n self._includes = matches\n return matches\n\n @property\n def dependencies(self):\n if self._dependencies is not None:\n return self._dependencies\n\n # Prevent infinite loop for circular references\n self._dependencies = []\n\n deps = {}\n\n dirPath = os.path.dirname(self.path)\n for includePath in self.includes:\n headerPath = os.path.join(dirPath, includePath)\n header = None\n isWithinRoot = headerPath.startswith(rootProjectPath)\n if isWithinRoot and os.path.isfile(headerPath):\n header = res(headerPath)\n else:\n includeDir = os.path.dirname(includePath)\n project = exports.get(includeDir)\n if project:\n header = project.getExport(includePath)\n\n if header:\n deps[header.path] = header\n\n for dep in header.dependencies:\n deps[dep.path] = dep\n\n self._dependencies = deps.values()\n return self._dependencies\n" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.739130437374115, "avg_line_length": 22, "blob_id": "42a1fdc0ac3c720cbc51d3e9a98307527bc573d5", "content_id": "68fa8932472f3b6976645e8f16f0ecbbf226fffe", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 207, "license_type": "permissive", "max_line_length": 55, "num_lines": 9, "path": "/src/vm/UpParser.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPPARSER_H\n#define UP_UPPARSER_H\n\n#include \"Up/UpGlobal.h\"\n\nUpStatus UpParse(const char* source, UpSyntax** node);\nUpStatus UpParseC(const char* source, UpSyntax** node);\n\n#endif // UP_UPPARSER_H\n" }, { "alpha_fraction": 0.5774545669555664, "alphanum_fraction": 0.5792727470397949, "avg_line_length": 27.340206146240234, "blob_id": "ed68808bd9917e066c8ac9e1c8f9052b84e95696", "content_id": "70f454744ba35824502a417cd79cc6db0ed22ffc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2750, "license_type": "permissive", "max_line_length": 83, "num_lines": 97, "path": "/make/test/pages.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nimport os.path, markdown, cgi\nfrom .run import getTestRunners\n\ndef renderTestPages(testModule, outDirPath):\n if not os.path.isdir(outDirPath):\n raise \"Directory doesn't exist at %s\" % outDirPath\n \n links = []\n for runner in getTestRunners(testModule):\n for testName, x, y in runner.getTestNames():\n fixture = runner.fixtureClass(testName, None)\n testCase = getattr(fixture, testName)\n if 1 or testCase.testFilePath.find('parseStringWhitespace') >= 0:\n groups, hasFocus = testCase.walkTextGroups()\n \n fileName = os.path.basename(testCase.testFilePath)\n fileName,ext = os.path.splitext(fileName)\n\n content = renderGroups(groups, fileName)\n \n pagePath = os.path.join(outDirPath, fileName+'.html')\n f = file(pagePath, 'w')\n f.write(content)\n f.close()\n \n link = linkTemplate % (fileName+'.html', fileName)\n links.append(link)\n\n content = \"\\n\".join(links)\n indexContent = indexTemplate % {\"content\": content}\n indexPath = os.path.join(outDirPath, 'index.html')\n f = file(indexPath, 'w')\n f.write(indexContent)\n f.close()\n \n\ndef renderGroups(groups, fileName):\n sections = []\n for group in groups:\n comment = group.comment\n comment = comment.decode('utf8').encode('ascii', 'xmlcharrefreplace')\n comment = markdown.markdown(comment)\n sections.append(comment)\n \n for test in group.tests:\n source = cgi.escape(test.source)\n source = source.decode('utf8').encode('ascii', 'xmlcharrefreplace')\n \n expected = cgi.escape(test.expected)\n expected = expected.decode('utf8').encode('ascii', 'xmlcharrefreplace')\n \n testContent = testTemplate % {\"source\": source, \"expected\": expected}\n sections.append(testContent)\n \n pageContent = \"\\n\".join(sections)\n \n return pageTemplate % {\"title\": fileName, \"content\": pageContent}\n\n\nindexTemplate = \"\"\"\n<html>\n<head>\n<title>Up Tests</title>\n<style type=\"text/css\">@import \"tests.css\"</style>\n</head>\n<body>\n%(content)s\n</body>\n</html>\n\"\"\"\n \npageTemplate = \"\"\"\n<html>\n<head>\n<title>%(title)s</title>\n<style type=\"text/css\">@import \"tests.css\"</style>\n</head>\n<body>\n<div class=\"content\">\n%(content)s\n</div>\n</body>\n</html>\n\"\"\"\n\ntestTemplate = \"\"\"\n<table class=\"test-table\">\n<tr>\n<td class=\"test-source\"><code>%(source)s</code></td>\n<td class=\"test-expected\"><code>%(expected)s</code></td>\n</tr>\n</table>\n\"\"\"\n\nlinkTemplate = \"\"\"\n<div><a href=\"%s\">%s</a></div>\n\"\"\"\n" }, { "alpha_fraction": 0.7370589971542358, "alphanum_fraction": 0.7370589971542358, "avg_line_length": 28.86153793334961, "blob_id": "335b27d8dec3647d3e99921171a8c44ea32a5185", "content_id": "b4208280ec6048387a5da319d8c0e77b254da26b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3883, "license_type": "permissive", "max_line_length": 91, "num_lines": 130, "path": "/src/vm/include/UpContext.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPCONTEXT_H\n#define UP_UPCONTEXT_H\n\n#include \"Up/UpGlobal.h\"\n\ntypedef struct {\n UpClass* objectClass;\n UpClass* classClass;\n UpClass* functionClass;\n UpClass* cfunctionClass;\n UpClass* cpointerClass;\n UpClass* exceptionClass;\n UpClass* channelClass;\n UpClass* nullClass;\n UpClass* boolClass;\n UpClass* integerClass;\n UpClass* longClass;\n UpClass* floatClass;\n UpClass* stringClass;\n UpClass* listClass;\n UpClass* mapClass;\n} UpBuiltinClasses;\n\nextern UpContext* UpGlobalContext;\n\nUpContext* UpContextCreate();\nvoid UpContextShutdown(UpContext* self, UpStatus status);\n\nUpContext* UpSwitchContext(UpContext* context);\n\nUpObject* UpFalse();\nUpObject* UpTrue();\nUpObject* UpNull();\nUpObject* UpUndefined();\nUpObject* UpClosed();\nUpObject* UpEval();\n\nUpObject* UpTrueOrFalse(bool truth);\nUpString* UpEmptyString();\n\nchar* UpFormatString(const char* str, ...);\n\nUpArena* UpGetHeap();\n\nchar* UpGetVersion();\n\nUpSymbol UpGetSymbol(const char* name);\nconst char* UpGetSymbolName(UpSymbol symbol);\nvoid UpReleaseSymbols();\n\nint UpGetRecursionLimit();\nvoid UpSetRecursionLimit(int limit);\n\nUpArray* UpGetSearchPaths();\n\nUpBuiltinClasses* UpGetBuiltinClasses();\n\nUpInteger** UpGetSharedIntegers();\nUpLong** UpGetSharedLongs();\nUpFloat** UpGetSharedFloats();\n\nvoid UpDisableEventLoop(bool disabled);\n\n/**\n * stream is a FILE*, but here it's void* to avoid including <stdio.h>\n */\nvoid* UpGetLogStream();\nvoid UpSetLogStream(void* stream);\n\nvoid UpLog(const char* text);\nvoid UpPrintf(const char* fmt, ...);\nvoid UpWarn(const char* fmt, ...);\n\nvoid UpEnableProbe(const char* probeName, bool enabled);\nvoid UpProbe(void* probe, size_t size);\nvoid UpSetProbeDumpPath(const char* path);\n\nchar* UpGetFileConstructorName(const char* path);\n\nUpCompileFrame* UpGetCompileFrame();\nvoid UpPushCompilation(const char* path, const char* moduleName);\nvoid UpPopCompilation(UpContext* self);\nvoid UpSetCompileLocation(int line, int column);\n\nUpException* UpGetError();\nUpException* UpClaimError();\nvoid UpSetException(UpException* exception);\nUpException* UpSetError(const char* description, ...);\nvoid UpSetErrorFrame(UpCallFrame* frame);\n\nUpIntTable* UpGetModuleMap();\nUpObject* UpGetBuiltinModule();\nvoid UpSetBuiltinModule(UpObject* module);\n\n/**\n * Name must be the fully-normalized name including dots.\n */\nconst char* UpFindModulePath(const char* moduleName);\n\n/**\n * Loads, compiles, and caches a module but does not execute it.\n */\nUpStatus UpImport(UpSymbol name, UpObject** outModule, UpFunctionDef** outDef,\n UpClassDef** outClassDef);\nUpObject* UpGetImport(UpSymbol name);\nvoid UpSetImport(UpSymbol name, UpObject* module);\n\nUpCLibrary* UpGetCLibrary(const char* name);\n\nUpStatus UpExecuteModule(const char* moduleName, UpObject** outModule);\nUpStatus UpExecuteFile(const char* sourcePath, const char* moduleName,\n char writeToDisk, UpObject** outModule);\nUpStatus UpExecuteSource(const char* source, const char* sourcePath,\n const char* moduleName, UpObject** outModule);\nUpStatus UpContextRun(UpObject** outModule);\n\nUpStatus UpCompileModule(const char* moduleName, UpFunctionDef** outDef,\n UpClassDef** outClassDef);\nUpStatus UpCompileFile(const char* sourcePath, const char* moduleName,\n char writeToDisk, UpFunctionDef** outDef, UpClassDef** outClassDef);\nUpStatus UpCompileSource(const char* source, const char* sourcePath,\n const char* moduleName, UpFunctionDef** outDef);\nUpStatus UpCompileEval(const char* source, UpFunctionDef* caller,\n UpFunctionDef** outDef);\n\nUpStatus UpParseModule(const char* moduleName, UpSyntax** out);\nUpStatus UpParseFile(const char* sourcePath, UpSyntax** out);\nUpStatus UpParseSource(const char* source, UpSyntax** out);\n\n#endif // UP_UPCONTEXT_H\n" }, { "alpha_fraction": 0.6901840567588806, "alphanum_fraction": 0.6901840567588806, "avg_line_length": 14.476190567016602, "blob_id": "d7dc32f2b4e2d2bd957287301d88ebf041441a47", "content_id": "a0e5e9c62cc0d5dae4bbf9ebb8a145dc42bdc50c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 326, "license_type": "permissive", "max_line_length": 27, "num_lines": 21, "path": "/src/vm/include/Up.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UP_H\n#define UP_UP_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#include \"Up/UpGlobal.h\"\n#include \"Up/UpDebug.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpObject.h\"\n#include \"Up/UpFunction.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpException.h\"\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif // UP_UP_H\n" }, { "alpha_fraction": 0.6533464193344116, "alphanum_fraction": 0.6538923382759094, "avg_line_length": 35.20158004760742, "blob_id": "09689f1fc2b022afe7a1e26e54cb8ec5b5f649e5", "content_id": "a81b9e76e885383d013d1cac3815bf22ed59b724", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 9159, "license_type": "permissive", "max_line_length": 99, "num_lines": 253, "path": "/src/vm/UpParsing.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPPARSING_H\n#define UP_UPPARSING_H\n\n#include \"Up/UpGlobal.h\"\n#include \"Up/UpContext.h\"\n#include \"UpSyntax.h\"\n#include \"UpScanner.h\"\n\n// ************************************************************************************************\n// Overridding definitions in the Flex scanner\n\n#define YY_DECL \\\n int UpScan(YYSTYPE* yylval, YYLTYPE* yylloc, UpScanner* scanner, yyscan_t yyscanner)\n\n#define YY_INPUT(buf, result, max_size) \\\n { \\\n result = UpScannerRead(Upget_extra(yyscanner), buf, max_size); \\\n }\n\n#define YY_EXTRA_TYPE UpScanner*\n\n// ************************************************************************************************\n// Flex states\n\nint UpParseState;\nint CParseState;\nint TextParseState;\nint SelectorParseState;\n\nvoid UpPushState(int new_state, void* yyscanner);\nvoid UpPopState(void* yyscanner);\n\n// ************************************************************************************************\n// Callbacks from Bison\n\nint Uplex(YYSTYPE* yylval, YYLTYPE* yylloc, UpScanner* scanner);\nvoid Uperror(YYLTYPE* yylloc, UpScanner* scanner, const char *str);\n\n// ************************************************************************************************\n\nchar* UpStripPrefix(char* text);\nchar* UpParseUnit(char* text);\nchar* UpParseString(char* text, char quote, bool trailing, char** outSpecialty);\nUpStatus UpUnescapeString(UpScanner* scanner, char* text);\nchar* UpRemoveTrailingWhitespace(char* text);\n \nbool UpToBool(char* str);\nint UpToInt(char* str);\nlong long UpToLongLong(char* str);\ndouble UpToDouble(char* str);\nint UpHexToInt(char* str);\nlong long UpHexToLong(char* str);\n\n// ************************************************************************************************\n\n#define UPSCANNER (scanner)\n#define UPSCANNERHEAP (UpGetHeap())\n\n#define SYNTAX_ERROR \\\n UpScannerSetError(UPSCANNER, \"Invalid syntax\"); \\\n CONSUME(ERROR_TOKEN);\n\n#define LOCATE() \\\n UpScannerLocate(UPSCANNER, yytext, (yyltype*)yylloc);\n\n#define CONSUME(_TOKEN) \\\n if (_TOKEN) return _TOKEN;\n\n#define TOKEN(_TOKEN) \\\n LOCATE(); CONSUME(_TOKEN); \\\n\n#define ID_TOKEN(_TOKEN) \\\n LOCATE(); \\\n yylval->stringValue = UpArenaCopyString(UPSCANNERHEAP, yytext); \\\n CONSUME(_TOKEN);\n\n#define BID_TOKEN(_TOKEN) \\\n LOCATE(); \\\n yylval->stringValue = UpArenaCopyString(UPSCANNERHEAP, UpStripPrefix(yytext)); \\\n CONSUME(_TOKEN);\n\n#define ERROR_TOKEN -1\n\n// ************************************************************************************************\n\n#define NODE(_TYPE, _LOC) \\\n UpCreateSyntaxWithType(UPSCANNERHEAP, _TYPE, (_LOC).first_line, (_LOC).first_column)\n\n#define PARSE_1(_TYPE, _LOC, _A) \\\n UpParse1(UPSCANNERHEAP, NODE(_TYPE, _LOC), _A)\n\n#define PARSE_2(_TYPE, _LOC, _A, _B) \\\n UpParse2(UPSCANNERHEAP, NODE(_TYPE, _LOC), _A, _B)\n\n#define PARSE_SET(_LOC) \\\n NODE(UpSetSyntaxType, _LOC)\n\n#define ENSURE_SET(_SET) \\\n UpSyntaxSetEnsure(UPSCANNERHEAP, _SET)\n\n#define APPEND(_SET, _ITEM) \\\n UpSyntaxSetAppend(UPSCANNERHEAP, _SET, _ITEM)\n\n#define PARSE_GROUP(_LOC, _EXPR) \\\n UpParseGroup(UPSCANNERHEAP, _EXPR, (_LOC).first_line, (_LOC).first_column)\n\n#define PARSE_DECLARATION(_LOC, _ACCESS, _HEAD, _BODY, _WHERE) \\\n UpParseDeclaration(UPSCANNERHEAP, NODE(UpDeclarationSyntaxType, _LOC), _ACCESS, _HEAD, _BODY, \\\n _WHERE)\n\n#define PARSE_FUNCTION(_LOC, _HEAD, _BODY, _IMP) \\\n UpParseFunction(UPSCANNERHEAP, UPSCANNER, NODE(UpFunctionSyntaxType, _LOC), _HEAD, _BODY, \\\n _IMP)\n\n#define PARSE_ASSIGNMENT(_LOC, _OP, _LEFT, _RIGHT) \\\n UpParseAssignment(UPSCANNERHEAP, UPSCANNER, NODE(UpAssignmentSyntaxType, _LOC), \\\n _OP, _LEFT, _RIGHT)\n\n#define PARSE_ITERATOR(_LOC, _LEFT, _ITERABLE, _CLAUSE, _BODY, _ISON, _ISWHILE) \\\n UpParseIterator(UPSCANNERHEAP, UPSCANNER, NODE(UpIteratorSyntaxType, _LOC), _LEFT, \\\n _ITERABLE, _CLAUSE, _BODY, _ISON, _ISWHILE, false)\n\n#define PARSE_MAPPER(_LOC, _LEFT, _CLAUSE, _BODY, _ISON, _ISWHILE) \\\n UpParseIterator(UPSCANNERHEAP, UPSCANNER, NODE(UpIteratorSyntaxType, _LOC), _LEFT, \\\n NULL, _CLAUSE, _BODY, _ISON, _ISWHILE, true)\n\n#define PARSE_BINARY(_LOC, _OP, _LEFT, _RIGHT) \\\n UpParseBinary(UPSCANNERHEAP, UPSCANNER, NODE(UpBinarySyntaxType, _LOC), _OP, _LEFT, _RIGHT)\n\n#define PARSE_UNARY(_LOC, _OP, _OPERAND) \\\n UpParseUnary(UPSCANNERHEAP, UPSCANNER, NODE(UpUnarySyntaxType, _LOC), _OP, _OPERAND)\n\n#define PARSE_IMPORT(_LOC, _NAMES) \\\n UpParseImport(UPSCANNERHEAP, NODE(UpImportSyntaxType, _LOC), _NAMES)\n\n#define PARSE_WILDCARD(_IMPORT) \\\n UpParseWildcard(UPSCANNERHEAP, _IMPORT)\n\n#define PARSE_CALL(_LOC, _CALLABLE, _ARGS) \\\n UpParseCall(UPSCANNERHEAP, NODE(UpCallSyntaxType, _LOC), _CALLABLE, _ARGS, false, NULL)\n\n#define PARSE_CALL_TASK(_LOC, _CALLABLE, _ARGS, _IMP, _SCHEDULE) \\\n UpParseCall(UPSCANNERHEAP, NODE(UpCallSyntaxType, _LOC), _CALLABLE, _ARGS, _IMP, _SCHEDULE)\n\n#define PARSE_PROPERTY(_LOC, _LEFT, _RIGHT) \\\n UpParseProperty(UPSCANNERHEAP, NODE(UpPropertySyntaxType, _LOC), _LEFT, _RIGHT)\n\n#define PARSE_RANGE(_LOC, _FROM, _TO, _BY, _ISTHROUGH) \\\n UpParseRange(UPSCANNERHEAP, NODE(UpRangeSyntaxType, _LOC), _FROM, _TO, _BY, _ISTHROUGH)\n\n#define PARSE_DEFAULT(_LOC, _VALUE, _DEFAULT) \\\n UpParseDefault(UPSCANNERHEAP, NODE(UpDefaultSyntaxType, _LOC), _VALUE, _DEFAULT)\n\n#define PARSE_ID(_LOC, _NAME) \\\n UpParseId(UPSCANNERHEAP, NODE(UpIdSyntaxType, _LOC), _NAME)\n\n#define PARSE_TYPEID(_LOC, _NAME) \\\n UpParseId(UPSCANNERHEAP, NODE(UpTypeIdSyntaxType, _LOC), _NAME)\n\n#define PARSE_UNDEFINED(_LOC) \\\n NODE(UpUndefinedSyntaxType, _LOC)\n\n#define PARSE_INT(_LOC, _VALUE, _UNIT) \\\n UpParseInt(UPSCANNERHEAP, NODE(UpIntSyntaxType, _LOC), _VALUE, _UNIT)\n\n#define PARSE_LONG(_LOC, _VALUE, _UNIT) \\\n UpParseLong(UPSCANNERHEAP, NODE(UpLongSyntaxType, _LOC), _VALUE, _UNIT)\n\n#define PARSE_ULONG(_LOC, _VALUE, _UNIT) \\\n UpParseULong(UPSCANNERHEAP, NODE(UpLongSyntaxType, _LOC), _VALUE, _UNIT)\n\n#define PARSE_FLOAT(_LOC, _VALUE, _UNIT) \\\n UpParseFloat(UPSCANNERHEAP, NODE(UpFloatSyntaxType, _LOC), _VALUE, _UNIT)\n\n#define PARSE_STR(_LOC, _VALUE, _SPECIALTY) \\\n UpParseStr(UPSCANNERHEAP, _VALUE, _SPECIALTY, (_LOC).first_line, (_LOC).first_column)\n\n#define PARSE_LIST(_LOC, _EXPR) \\\n UpParseList(UPSCANNERHEAP, NODE(UpListSyntaxType, _LOC), _EXPR)\n\n#define PARSE_MAP(_LOC, _EXPR) \\\n UpParseMap(UPSCANNERHEAP, NODE(UpMapSyntaxType, _LOC), _EXPR)\n\n#define PARSE_CHANNEL(_LOC, _TYPESIG) \\\n UpParseChannel(UPSCANNERHEAP, NODE(UpChannelSyntaxType, _LOC), _TYPESIG)\n\n#define PARSE_CFUNCTION(_LOC, _NAME, _RET, _ARGS) \\\n UpParseCFunction(UPSCANNERHEAP, NODE(UpCFunctionSyntaxType, _LOC), _NAME, _RET, _ARGS)\n\n#define PARSE_CTYPE(_LOC, _NAME) \\\n UpParseCType(UPSCANNERHEAP, NODE(UpCTypeSyntaxType, _LOC), _NAME)\n\n#define ADD_CTYPE_POINTER(_LOC, _CTYPE) \\\n UpAddCTypePointer(_CTYPE)\n\n#define PARSE_CARGUMENT(_LOC, _TYPE, _NAME) \\\n UpParseCArgument(UPSCANNERHEAP, NODE(UpCArgumentSyntaxType, _LOC), _TYPE, _NAME)\n\n#define PARSE_IF(_LOC, _TRANSFORMS, _ELSE) \\\n UpParseIf(UPSCANNERHEAP, NODE(UpIfSyntaxType, _LOC), _TRANSFORMS, _ELSE)\n\n#define PARSE_IS(_LOC, _SUBJECT, _TRANSFORMS) \\\n UpParseIs(UPSCANNERHEAP, NODE(UpIsSyntaxType, _LOC), _SUBJECT, _TRANSFORMS)\n\n#define PARSE_HAS(_LOC, _SUBJECT, _TRANSFORMS) \\\n UpParseHas(UPSCANNERHEAP, NODE(UpHasSyntaxType, _LOC), _SUBJECT, _TRANSFORMS)\n\n#define PARSE_TRANSFORM(_LOC, _CONDITION, _BODY) \\\n UpParseTransform(UPSCANNERHEAP, NODE(UpTransformSyntaxType, _LOC), _CONDITION, _BODY)\n\n#define APPEND_TRANSFORM(_LEFT, _RIGHT) \\\n UpAppendTransform(_LEFT, _RIGHT)\n\n#define PARSE_WHILE(_LOC, _CONDITION, _BODY) \\\n UpParseWhile(UPSCANNERHEAP, NODE(UpWhileSyntaxType, _LOC), _CONDITION, _BODY)\n\n#define PARSE_FOR(_LOC, _LEFT, _RIGHT, _BODY) \\\n UpParseFor(UPSCANNERHEAP, NODE(UpForSyntaxType, _LOC), _LEFT, _RIGHT, _BODY)\n\n#define APPEND_ARGS(_HEAD, _ARGS) \\\n UpAppendArgs(UPSCANNERHEAP, UPSCANNER, _HEAD, _ARGS)\n\n#define APPEND_ARG(_ARG, _ARGS) \\\n UpAppendArg(UPSCANNERHEAP, _ARG, _ARGS)\n \n#define PARSE_ARG(_LOC, _NAME, _EXPR, _NEXT) \\\n UpParseArgument(UPSCANNERHEAP, NODE(UpArgumentSyntaxType, _LOC), _NAME, _EXPR, false, _NEXT)\n\n#define PARSE_VARG(_LOC, _EXPR) \\\n UpParseArgument(UPSCANNERHEAP, NODE(UpArgumentSyntaxType, _LOC), NULL, _EXPR, true, NULL)\n\n#define PARSE_WHERE(_LOC, _BODY, _ASSIGNMENTS) \\\n UpParseWhere(UPSCANNERHEAP, NODE(UpWhereSyntaxType, _LOC), _BODY, _ASSIGNMENTS)\n\n#define PARSE_TRY(_LOC, _TRY, _CATCH, _FINALLY) \\\n UpParseTry(UPSCANNERHEAP, NODE(UpTrySyntaxType, _LOC), _TRY, _CATCH, _FINALLY)\n\n#define PARSE_CATCH(_LOC, _BINDING, _STATEMENTS) \\\n UpParseCatch(UPSCANNERHEAP, NODE(UpCatchSyntaxType, _LOC), _BINDING, _STATEMENTS)\n\n#define PARSE_CAST(_LOC, _EXPR, _TYPE) \\\n UpParseCast(UPSCANNERHEAP, NODE(UpCastSyntaxType, _LOC), _EXPR, _TYPE)\n\n#define APPEND_TYPE(_LEFT, _RIGHT) \\\n UpAppendType(UPSCANNERHEAP, _LEFT, _RIGHT)\n\n#define PARSE_SUBTYPE(_LOC, _LEFT, _RIGHT) \\\n UpParseSubtype(UPSCANNERHEAP, NODE(UpSubtypeSyntaxType, _LOC), _LEFT, _RIGHT)\n\n#define PARSE_PRIVATE_CFUNCTION(_CFUNC) \\\n UpMakeCFunctionsPrivate(_CFUNC)\n\n#endif // UP_UPPARSING_H\n" }, { "alpha_fraction": 0.5676621794700623, "alphanum_fraction": 0.567868173122406, "avg_line_length": 34.9555549621582, "blob_id": "51c6d378a9b4c641047d7022c2acf141c74ca217", "content_id": "4f5551c88cc8870bd6b18ae451b4c9ec4760252f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4855, "license_type": "permissive", "max_line_length": 102, "num_lines": 135, "path": "/make/test/TestFixture.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nimport inspect, types, sys, os.path, re, glob\nfrom .TestFileFunc import TestFileFunc\nfrom utils import testOutputPath, findModule, testFunctionPrefix, testFileFunctionHiddenName\nfrom utils import functionNameToTestFilePattern, testFileFunctionName\n\n# **************************************************************************************************\n\nclass TestFixture:\n metadata = None\n\n class __metaclass__(type):\n def __new__(cls, *args):\n newClass = super(cls, cls).__new__(cls, *args)\n\n for name in dir(newClass):\n attr = getattr(newClass, name)\n if type(attr) == types.UnboundMethodType:\n args, varags, varkwds, defaults = inspect.getargspec(attr)\n if len(args) < 2:\n continue\n\n functionName = testFunctionPrefix(name)\n if not functionName:\n continue\n\n delattr(newClass, name)\n hiddenName = testFileFunctionHiddenName(functionName)\n setattr(newClass, hiddenName, attr)\n\n # Create a new function for every test file whose name matches the class name\n pattern = functionNameToTestFilePattern(newClass, functionName)\n for filePath in glob.glob(pattern):\n fnName = testFileFunctionName(filePath)\n filePath = os.path.abspath(filePath)\n setattr(newClass, fnName, TestFileFunc(newClass, filePath, hiddenName))\n\n return newClass\n\n def __new__(cls, *args, **kwds):\n obj = object.__new__(cls, *args)\n for name in dir(obj):\n value = getattr(obj, name)\n if isinstance(value, TestFileFunc):\n setattr(obj, name, TestFileFunc(fixture=obj, filePath=value.testFilePath,\n methodName=value.methodName))\n return obj\n\n def __init__(self, testName=None, writer=None, *args, **kwds):\n self.testName = testName\n self.writer = writer\n for name,value in kwds.iteritems():\n setattr(self, name, value)\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def assert_(self, truth):\n if not truth:\n raise TestException(\"Assertion\")\n\n def assertEqual(self, actual, expected):\n if not actual == expected:\n raise TestException(\"Equality Assertion\", Actual=str(actual), Expected=str(expected))\n\n def assertEqualString(self, actual, expected):\n a = str(actual).strip()\n e = str(expected).strip()\n if not a == e:\n raise TestException(\"Equality Assertion\", Actual=a, Expected=e)\n\n def assertNotEqual(self, actual, expected):\n if actual == expected:\n raise TestException(\"Not Equal Assertion\", Actual=str(actual), Expected=str(expected))\n\n def assertIn(self, item, container):\n if not item in container:\n raise TestException(\"Contained Assertion\", item=str(item), container=str(container))\n\n def assertNotIn(self, item, container):\n if item in container:\n raise TestException(\"Not Contained Assertion\",\n item=str(item), container=str(container))\n\n def assertType(self, actual, expected):\n if not isinstance(actual, expected):\n raise TestException(\"Type Assertion\", Actual=str(actual), Expected=str(expected))\n\n def assertException(self, expected):\n exceptionPrefix = \"Exception:\"\n if expected.startswith(exceptionPrefix):\n actual = \"%s %s\" % (exceptionPrefix, sys.exc_value)\n if not actual == expected:\n # logException(log)\n raise TestException(\"Exception Assertion\", Actual=str(actual), Expected=str(expected))\n else:\n raise# sys.exc_value\n\n def logAttributes(self, object, *names):\n lines = []\n for name in names:\n value = getattr(object, name)\n line = \"%s: %s\" % (name, value)\n lines.append(line)\n\n result = \"\\n\".join(lines)\n print result, \"\\n\"\n\n def warn(self, message):\n print message\n\n def fail(self):\n raise TestException(\"Failure\")\n\n# **************************************************************************************************\n\nclass TestException(Exception):\n def __init__(self, title, **vars):\n self.title = title\n self.vars = vars\n\nclass TestAbortException(Exception):\n def __init__(self, message=\"\"):\n self.message = message\n\nclass RunException(Exception):\n def __init__(self, exePath, code, errors, out, args=None, source=None):\n self.exePath = exePath\n self.code = code\n self.errors = errors\n self.out = out\n self.args = args\n self.source = source\n" }, { "alpha_fraction": 0.508484959602356, "alphanum_fraction": 0.5120970606803894, "avg_line_length": 30.28358268737793, "blob_id": "7f3ca91db5a97db2392a1b3b18fa32d788836126", "content_id": "9fdb1c40c788892ebc81b34d72ec610798f2728c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14673, "license_type": "permissive", "max_line_length": 105, "num_lines": 469, "path": "/make/build/Project.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nimport sys, os.path, traceback, stat, re\nfrom ..util import rootProjectPath, projects, projectPaths, exports\nfrom ..util import sourceLink, getSystemAbbreviation, BuildError\nfrom ..Message import opener, closer\nfrom .Resource import res\n\n# **************************************************************************************************\n\nrePch = re.compile(\"(^pch\\.h|\\.pch)$\")\n\n# **************************************************************************************************\n\nclass Project(object):\n sources = None\n\n made = False\n external = False\n\n path = None\n build = None\n version = None\n description = None\n url = None\n author = None\n authorEmail = None\n\n buildPre = None\n build = None\n buildPost = None\n\n install = None\n\n enableProbes = False\n showCommands = False\n formatOutput = True\n installLink = False\n\n platform = None\n device = None\n sdk = None\n arch = None\n\n buildPath = None\n installPath = None\n\n compilerPath = None\n linkerPath = None\n ranlibPath = None\n\n optimize = \"debug\"\n warningLevel = 4\n pedantic = False\n linkerFlags = \"\"\n compilerFlags = \"\"\n gitVersion = \"\"\n\n defines = {}\n\n _dependencies = None\n\n def __init__(self, name, fn):\n self.name = name\n self.fn = fn\n\n self.deps = {}\n self.defines = dict(self.defines)\n\n self.exclude = []\n self.debugSources = []\n\n self.alwaysBuild = [] # Source files to rebuild every time\n self.neverLink = [] # Target files to never link\n\n self.includes = []\n self.libs = []\n self.frameworks = []\n self.ignoreFrameworks = []\n self.ignoreLibs = []\n\n self.frameworkPaths = []\n self.exports = {}\n\n def __repr__(self):\n return '<project %s>' % self.path\n\n @classmethod\n def initWithConfig(self):\n if not sys.platform == \"darwin\" and not sys.platform == \"win32\":\n self.defines[\"_REENTRANT\"] = True\n\n if sys.platform == \"darwin\":\n self.defines[\"DARWIN\"] = True\n\n if self.platform == 'ios':\n developerPath = self.osxDeveloper\n if not developerPath:\n raise Exception, \"OSX_DEVELOPER environment variable not specified.\"\n\n if \"Simulator\" in self.sdk:\n self.platformPath = \"%s/Platforms/iPhoneSimulator.platform\" % (developerPath)\n else:\n self.platformPath = \"%s/Platforms/iPhoneOS.platform\" % (developerPath)\n\n self.sdkPath = '%s/Developer/SDKs/%s.sdk' % (self.platformPath, self.sdk)\n self.compilerPath = \"%s/Developer/usr/bin/gcc-4.2\" % self.platformPath\n self.linkerPath = \"ar\"\n self.ranlibPath = \"ranlib\"\n\n self.compilerFlags = ' '.join([\n ('-arch %s' % self.arch) if self.arch else '',\n '-isysroot %s' % self.sdkPath,\n '-fobjc-abi-version=2',\n '-fobjc-legacy-dispatch',\n '-pipe',\n '-std=c99',\n '-fmessage-length=0',\n '-fpascal-strings',\n # '-fasm-blocks',\n '-Wno-trigraphs',\n '-Wreturn-type',\n '-Wunused-variable',\n ])\n\n self.defines[\"__IPHONE_OS_VERSION_MIN_REQUIRED\"] = 3020\n self.defines[\"IOS\"] = 1\n\n elif self.platform == 'mac':\n self.compilerFlags = ' '.join([\n ('-arch %s' % self.arch) if self.arch else '',\n '-fobjc-abi-version=2',\n # '-fobjc-legacy-dispatch',\n '-pipe',\n '-fmessage-length=0',\n '-fpascal-strings',\n '-fasm-blocks',\n '-Wno-trigraphs',\n '-Wreturn-type',\n '-Wunused-variable',\n '-DSK_RELEASE',\n '-DMAC',\n ])\n\n elif self.platform == 'android':\n ndkPath = self.androidNDK\n if not ndkPath:\n raise Exception, \"ANDROID_NDK environment variable not specified.\"\n sourcePath = self.androidSource\n if not sourcePath:\n raise Exception, \"ANDROID_SOURCE environment variable not specified.\"\n\n self.compilerPath = \"%s/build/prebuilt/darwin-x86/arm-eabi-4.4.0/bin/arm-eabi-gcc\" % ndkPath\n self.linkerPath = \"%s/build/prebuilt/darwin-x86/arm-eabi-4.4.0/bin/arm-eabi-ar\" % ndkPath\n self.ranlibPath = \"%s/build/prebuilt/darwin-x86/arm-eabi-4.4.0/bin/arm-eabi-ranlib\" % ndkPath\n\n self.includePaths = [\n '-I%s/frameworks/base/core/jni/android/graphics' % sourcePath,\n '-I%s/frameworks/base/include' % sourcePath,\n '-I%s/system/core/include' % sourcePath,\n '-I%s/external/skia/include/core' % sourcePath\n ]\n\n self.libPaths = [\n '-L%s/out/target/product/generic/system/lib' % sourcePath\n ]\n\n self.compilerFlags = ' '.join([\n ('-march=%s' % self.arch) if self.arch else '',\n '-I%s/build/platforms/%s/arch-arm/usr/include' % (ndkPath, self.sdk),\n '-mtune=xscale',\n # '-msoft-float',\n '-mfloat-abi=softfp',\n '-mfpu=neon',\n '-mthumb-interwork',\n '-mthumb',\n '-fpic',\n '-fno-rtti',\n '-std=gnu99',\n '-ffunction-sections',\n '-funwind-tables',\n '-fstack-protector',\n '-fno-short-enums',\n '-fomit-frame-pointer',\n '-fno-strict-aliasing',\n '-finline-limit=64',\n '-fno-exceptions',\n '-D__ARM_ARCH_5__',\n '-D__ARM_ARCH_5T__',\n '-D__ARM_ARCH_5E__',\n '-D__ARM_ARCH_5TE__',\n '-DANDROID',\n '-DSK_RELEASE',\n ] + self.includePaths)\n\n self.linkerFlags = ' '.join([\n '-nostdlib',\n '-lc',\n '-lm',\n '-lstdc++',\n '-llog',\n '-landroid_runtime',\n '-lskia',\n '-L%s/build/platforms/%s/arch-arm/usr/lib' \\\n % (ndkPath, self.sdk),\n '-Wl,--no-whole-archive',\n '-Wl,-rpath=%s/build/platforms/%s/arch-arm/usr/lib' \\\n % (ndkPath, self.sdk),\n '-Wl,--no-undefined',\n '-Wl,-rpath-link=%s/build/platforms/%s/arch-arm/usr/lib' \\\n % (ndkPath, self.sdk),\n '%s/build/prebuilt/darwin-x86/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.2.1/interwork/libgcc.a' \\\n % ndkPath,\n ] + self.libPaths)\n\n if self.buildPath:\n buildPath = os.path.abspath(self.buildPath)\n self.buildRootPath = os.path.join(buildPath, self.getPlatformName(), self.getBuildName())\n\n def normalize(self):\n if self.path:\n self.path = os.path.abspath(self.path)\n projectPaths[self.path] = self\n\n self.buildPath = self.getBuildPath()\n\n for includePath,shorthand in self.exports.iteritems():\n includePath = os.path.join(self.path, includePath)\n if not self.external:\n symlinkPath = os.path.join(self.path, shorthand)\n else:\n symlinkPath = shorthand\n\n symlinkPath = self.getBuildPath(symlinkPath)\n symlinkDir = os.path.dirname(symlinkPath)\n if not os.path.exists(symlinkDir):\n os.makedirs(symlinkDir)\n if not os.path.islink(symlinkPath):\n os.symlink(includePath, symlinkPath)\n\n exports[shorthand] = self\n\n @classmethod\n def getPlatformName(self):\n names = []\n if self.platform:\n names.append(self.platform)\n if self.sdk:\n names.append(self.sdk)\n if names:\n return \"_\".join(names)\n else:\n return getSystemAbbreviation()\n\n @classmethod\n def getBuildName(self):\n if not self.optimize or self.optimize == \"debug\":\n return \"debug\"\n else:\n return \"release\"\n\n def getBuildPath(self, targetPath=None):\n if self.external:\n rootPath = os.path.join(self.buildRootPath, \"external\", self.name)\n if targetPath:\n return os.path.join(rootPath, targetPath)\n else:\n return rootPath\n else:\n if not targetPath:\n targetPath = self.path\n\n if targetPath.startswith(rootProjectPath):\n relPath = targetPath[len(rootProjectPath)+1:] # +1 for the trailing slash\n if relPath:\n return os.path.join(self.buildRootPath, relPath)\n else:\n return self.buildPath\n else:\n return targetPath\n\n def getSources(self):\n if self.sources:\n return self.sources\n\n sources = self._readSources()\n self.sources = self._sortSources(sources)\n return self.sources\n\n def getDependencies(self):\n if self._dependencies is not None:\n return self._dependencies\n\n # Store this here to prevent infinite loop if there is a circular reference\n self._dependencies = []\n\n deps = {}\n\n for source in self.getSources():\n for header in source.dependencies:\n if header.project and not deps.get(header.project.name):\n deps[header.project.name] = header.project\n\n self._dependencies = deps.values()\n return self._dependencies\n\n def getExport(self, includePath):\n includeDir = os.path.dirname(includePath)\n includeName = os.path.basename(includePath)\n for includePath,shorthand in self.exports.iteritems():\n if includeDir == shorthand:\n absoluteDir = os.path.join(self.path, includePath, includeName)\n return res(absoluteDir, project=self)\n\n def getIncludes(self):\n includes = []\n includes.extend(self.includes)\n\n for dep in self.getDependencies():\n if dep != self:\n includes.extend([name for name in dep.getIncludes() if name not in includes])\n\n return includes\n\n def getLibs(self):\n libs = []\n libs.extend(self.libs)\n\n for dep in self.getDependencies():\n if dep != self and dep not in self.ignoreLibs:\n libs.extend([name for name in dep.getLibs() if name not in libs])\n\n if dep.build:\n target = dep.build.getTarget(dep)\n if target and target not in libs:\n libs.append(target)\n\n return libs\n\n def getFrameworks(self):\n names = []\n names.extend(self.frameworks)\n\n for dep in self.getDependencies():\n for name in dep.frameworks:\n if name not in names and name not in self.ignoreFrameworks:\n names.append(name)\n\n return names\n\n def make(self, action, out):\n if self.external or self.made:\n return 0\n\n if not os.path.isdir(self.path):\n raise Exception, \"%s not found\" % self.path\n\n out << opener(\"Project\", self.path)\n\n self.made = True\n\n result = 0\n\n # Make sure the working directory is the project path so that all relative paths work\n cwd = os.getcwd()\n os.chdir(self.path)\n\n try:\n if action == \"build\":\n self.doBuild(out)\n elif action == \"clean\":\n self.doClean(out)\n elif action == \"install\":\n self.doBuild(out)\n self.doInstall(out)\n else:\n raise Exception(\"The action '%s' is not recognized\" % action)\n\n except BuildError,exc:\n result = exc.code\n\n except:\n exc = sys.exc_info()\n traceback.print_exception(*exc)\n result = 1\n\n os.chdir(cwd)\n\n out << closer()\n\n return result\n\n def doBuild(self, out):\n sources = self.getSources()\n\n if self.buildPre:\n self.buildPre.build(self, out, sources)\n if self.build:\n self.build.build(self, out, sources)\n if self.buildPost:\n self.buildPost.build(self, out, sources)\n\n def doClean(self, out):\n sources = self.getSources()\n\n if self.buildPre:\n self.buildPre.clean(self, out, sources)\n if self.build:\n self.build.clean(self, out, sources)\n if self.buildPost:\n self.buildPost.clean(self, out, sources)\n\n def doInstall(self, out):\n if self.buildPre:\n self.buildPre.install(self, out)\n if self.build:\n self.build.install(self, out)\n if self.buildPost:\n self.buildPost.install(self, out)\n\n def _readSources(self):\n sources = []\n\n def searchDir(dirPath):\n for name in os.listdir(dirPath):\n if name.startswith(\".\"):\n continue\n\n path = os.path.join(dirPath, name)\n stats = os.stat(path)\n if stat.S_ISDIR(stats.st_mode):\n if path not in projectPaths:\n searchDir(path)\n elif stat.S_ISREG(stats.st_mode):\n if name not in self.exclude:\n source = res(path, name, stats, project=self)\n sources.append(source)\n\n if not self.external:\n searchDir(self.path)\n\n return sources\n\n def _sortSources(self, sources):\n \"\"\"Sort the sources by last modification time so we first build the files you touched most\n recently, with the precompiled header first in the list.\"\"\"\n\n sorts = []\n pchFound = None\n\n for source in sources:\n if rePch.search(source.name):\n pchFound = source\n else:\n sorts.append(source)\n\n sorts = sorted(sorts, cmp=lambda a,b: cmp(a.stats.st_mtime, b.stats.st_mtime), reverse=True)\n\n if pchFound:\n sorts.insert(0, pchFound);\n\n return sorts\n\n# **************************************************************************************************\n\ndef project(fn):\n \"\"\" Decorator for project declarations.\"\"\"\n\n p = Project(fn.__name__, fn)\n projects.append(p)\n return p\n" }, { "alpha_fraction": 0.6546546816825867, "alphanum_fraction": 0.7207207083702087, "avg_line_length": 21.200000762939453, "blob_id": "1ad8af60ce51ddb91c4cd8c9209af58acc15a1b4", "content_id": "fa9e5a9c03c1d4238803c9c894acef23b4870af8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 333, "license_type": "permissive", "max_line_length": 151, "num_lines": 15, "path": "/README.md", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "Up\n====\n\nUp is a programming language that is being actively developed. Instructions to compile and use Up will come in the future when the language stabilizes.\n\nFor more information visit my blog at https://medium.com/@joehewitt\n\nRequirements\n------------\n\n1. Python 2.6\n2. bison 1.875\n3. flex 2.5.35\n4. libffi 3.0.11\n5. pcre 8.3.0\n" }, { "alpha_fraction": 0.35116949677467346, "alphanum_fraction": 0.3702338933944702, "avg_line_length": 25.670940399169922, "blob_id": "3c5210c4395c6a5bf7a2872951132cfaeefc3a9e", "content_id": "91fbbd6aa88c80ddb7ea7963f48e1d58ca562762", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6242, "license_type": "permissive", "max_line_length": 99, "num_lines": 234, "path": "/src/vm/UpParsing.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpParsing.h\"\n#include \"Up/UpCFunction.h\"\n#include \"UpParser.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nstatic bool _IsIndentChar(char c) {\n return c == ' ' || c == '\\t';\n}\n\nstatic const char* _EncodeBinary(uint32_t x) {\n static char buf[sizeof(uint32_t)+1];\n\n char *s = buf + sizeof(uint32_t)+1;\n *--s = 0;\n\n if (!x) {\n *--s = '0';\n }\n for(; x; x/=2) {\n *--s = '0' + x%2;\n }\n\n return s;\n}\n\nstatic void _ShiftCopy(char* str, unsigned int shiftBy) {\n unsigned int i = 0;\n while (true) {\n char c = str[i+shiftBy];\n str[i] = c;\n if (c == 0) break;\n ++i;\n }\n}\n\n// ************************************************************************************************\n\nextern int UpScan(YYSTYPE* yylval, YYLTYPE* yylloc, UpScanner* scanner, void* yyscanner);\n\nint Uplex(YYSTYPE* yylval, YYLTYPE* yylloc, UpScanner* scanner) {\n int token = UpScannerNext(scanner, yylval, yylloc);\n // printf(\"token %d\\n\", token); fflush(stdout);\n return token;\n}\n\nvoid Uperror(YYLTYPE* yylloc, UpScanner* scanner, const char *str) {\n UpScannerSetError(scanner, str);\n}\n\n// ************************************************************************************************\n\nchar* UpStripPrefix(char* text) {\n return text+3;\n}\n\nchar* UpParseUnit(char* text) {\n for (; (*text >= '0' && *text <= '9') || *text == '-' || *text == '.'; ++text);\n return *text ? text : NULL;\n}\n\nchar* UpRemoveTrailingWhitespace(char* text) {\n int len = strlen(text);\n char* last = text + (len-1);\n for (char* c = last; c >= text; --c) {\n if (_IsIndentChar(*c)) {\n *c = 0;\n } else {\n break;\n }\n }\n return text;\n}\n\nchar* UpParseString(char* text, char quote, bool trailing, char** outSpecialty) {\n if (*text != quote) {\n *outSpecialty = text;\n\n while (*text != quote) {\n ++text;\n }\n *text = 0;\n } else {\n *outSpecialty = NULL;\n }\n\n // Skip past the leading quote\n ++text;\n\n int len = strlen(text);\n\n // Remove trailing quote\n text[len-1] = 0;\n\n if (!trailing) {\n for (char* c = text; *c; ++c) {\n if (!_IsIndentChar(*c)) {\n return text;\n }\n }\n\n return NULL;\n } else {\n return text;\n }\n}\n\nUpStatus UpUnescapeString(UpScanner* scanner, char* text) {\n if (!text) {\n return UpSuccess;\n }\n for (char* c = text; *c; ++c) {\n // Shift string left by one character for every backslash\n if (*c == '\\\\') {\n char cc = *(c+1);\n _ShiftCopy(c, 1);\n\n if (cc == '0') {\n *c = 0;\n } else if (cc == 'a') {\n *c = 0x07;\n } else if (cc == 'b') {\n *c = 0x08;\n } else if (cc == 'f') {\n *c = 0x08;\n } else if (cc == 'n') {\n *c = '\\n';\n } else if (cc == 'r') {\n *c = '\\r';\n } else if (cc == 't') {\n *c = '\\t';\n } else if (cc == 'v') {\n *c = '\\v';\n } else if (cc == '\\\\') {\n *c = '\\\\';\n } else if (cc == '\"') {\n *c = '\"';\n } else if (cc == '\\'') {\n *c = '\\'';\n } else if (cc == '?') {\n *c = '?';\n } else if (cc == '%') {\n *c = '%';\n } else if (cc == 'x') {\n if (*(c+1) && *(c+2)) {\n char code[3];\n code[0] = *(c+1);\n code[1] = *(c+2);\n code[2] = 0;\n\n _ShiftCopy(c, 2);\n *c = strtol(code, NULL, 16);\n } else {\n UpScannerSetError(scanner, \"Illegal escape sequence\");\n return UpFailure;\n }\n } else if (cc == 'u') {\n if (*(c+1) && *(c+2) && *(c+3) && *(c+4)) {\n char code[5];\n code[0] = *(c+1);\n code[1] = *(c+2);\n code[2] = *(c+3);\n code[3] = *(c+4);\n code[4] = 0;\n\n _ShiftCopy(c, 3);\n uint16_t value = strtol(code, NULL, 16);\n\n *c = (char)(value);\n *(c+1) = (char)(value >> 8);\n } else {\n UpScannerSetError(scanner, \"Illegal escape sequence\");\n return UpFailure;\n }\n } else if (cc == 'U') {\n if (*(c+1) && *(c+2) && *(c+3) && *(c+4)\n && *(c+5) && *(c+6) && *(c+7) && *(c+8)) {\n char code[9];\n code[0] = *(c+1);\n code[1] = *(c+2);\n code[2] = *(c+3);\n code[3] = *(c+4);\n code[4] = *(c+5);\n code[5] = *(c+6);\n code[6] = *(c+7);\n code[7] = *(c+8);\n code[8] = 0;\n\n _ShiftCopy(c, 5);\n uint32_t value = strtol(code, NULL, 16);\n\n *c = (char)(value);\n *(c+1) = (char)(value >> 8);\n *(c+2) = (char)(value >> 16);\n *(c+3) = (char)(value >> 24);\n } else {\n UpScannerSetError(scanner, \"Illegal escape sequence\");\n return UpFailure;\n }\n } else {\n UpScannerSetError(scanner, \"Illegal escape sequence\");\n return UpFailure;\n }\n }\n }\n return UpSuccess;\n}\n\nbool UpToBool(char* str) {\n return UpToInt(str) != 0;\n}\n\nint UpToInt(char* str) {\n return strtol(str, NULL, 10);\n}\n\nlong long UpToLongLong(char* str) {\n return strtoll(str, NULL, 10);\n}\n\ndouble UpToDouble(char* str) {\n return atof(str);\n}\n\nint UpHexToInt(char* str) {\n return strtol(str, NULL, 16);\n}\n\nlong long UpHexToLong(char* str) {\n return strtoll(str, NULL, 16);\n}\n" }, { "alpha_fraction": 0.5817626118659973, "alphanum_fraction": 0.5851587653160095, "avg_line_length": 27.582523345947266, "blob_id": "319929f017dba33ab2da6ed8b2c351599c5f920d", "content_id": "a8d15e71d23a8715acece8eedd4a5b0c09cccb26", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5889, "license_type": "permissive", "max_line_length": 100, "num_lines": 206, "path": "/src/vm/UpList.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpList.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpLong.h\"\n#include \"Up/UpFloat.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpArena.h\"\n\n// *************************************************************************************************\n\nstatic unsigned int _UpListItemsAllocationSize = 30;\n\n// *************************************************************************************************\n\nstatic void _UpListStore(UpList* self, UpIndex index, UpObject* value) {\n self->items[index] = value;\n}\n\nstatic void _UpListShiftItems(UpList* self, UpIndex index, bool reverse) {\n if (reverse) {\n for (UpIndex i = index; i < self->itemCount-1; ++i) {\n self->items[i] = self->items[i+1];\n }\n } else {\n for (UpIndex i = self->itemCount; i >= index && i != UpNotFound; --i) {\n self->items[i] = i > 0 ? self->items[i-1] : NULL;\n }\n }\n}\n\nstatic void _UpListResize(UpList* self, unsigned int newSize) {\n UpObject** oldItems = self->items;\n int oldSize = self->itemsAllocated;\n self->itemsAllocated = newSize;\n self->items = UpArenaResizeArray(UpGetHeap(), oldItems, oldSize, UpObject, newSize);\n}\n\nstatic void _UpListGrow(UpList* self) {\n _UpListResize(self, self->itemsAllocated + _UpListItemsAllocationSize);\n}\n\nstatic void _UpListShrink(UpList* self) {\n _UpListResize(self, self->itemsAllocated - _UpListItemsAllocationSize);\n}\n\nstatic int _NormalizeIndex(UpList* self, int index) {\n if (index < 0) {\n return self->itemCount + index;\n } else {\n return index;\n }\n}\n\n// ************************************************************************************************\n\nUpList* UpListCreate() {\n UpList* self = (UpList*)UpObjectCreateWithClass(UP_BUILTIN(list));\n self->itemsAllocated = _UpListItemsAllocationSize;\n self->items = UpArenaNewArray(UpGetHeap(), UpObject, self->itemsAllocated);\n return self;\n}\n\nUpList* UpListCreateWithList(UpList* self) {\n UpList* clone = (UpList*)UpObjectCreateWithClass(UP_BUILTIN(list));\n clone->itemsAllocated = self->itemsAllocated;\n clone->itemCount = self->itemCount;\n clone->items = UpArenaCopyArray(UpGetHeap(), self->items, self->itemsAllocated,\n UpObject, self->itemsAllocated);\n\n return clone;\n}\n\nunsigned int UpListCount(UpList* self) {\n return self->itemCount;\n}\n\nUpObject** UpListBuffer(UpList* self) {\n return self->items;\n}\n\nUpObject* UpListGet(UpList* self, UpIndex index, UpObject* defaultValue) {\n index = _NormalizeIndex(self, index);\n if (index < self->itemCount) {\n return self->items[index];\n } else if (defaultValue && defaultValue != UpUndefined()) {\n return defaultValue;\n } else {\n // return UpUndefined();//UpIntegerCreate(, 0);\n UpSetError(\"List index %d out of range\", index);\n return NULL;\n }\n}\n\nUpObject* UpListGetFront(UpList* self) {\n if (self->itemCount) {\n return self->items[0];\n } else {\n UpSetError(\"Empty list has no front\");\n return NULL;\n }\n}\n\nUpObject* UpListGetBack(UpList* self) {\n if (self->itemCount) {\n return self->items[self->itemCount-1];\n } else {\n UpSetError(\"Empty list has no back\");\n return NULL;\n }\n}\n\nUpIndex UpListFind(UpList* self, UpObject* value) {\n for (UpIndex i = 0; i < self->itemCount; ++i) {\n UpObject* item = self->items[i];\n if (item == value) {\n return i;\n }\n }\n return UpNotFound;\n}\n\nvoid UpListSet(UpList* self, UpIndex index, UpObject* value) {\n index = _NormalizeIndex(self, index);\n if (index < self->itemCount) {\n _UpListStore(self, index, value);\n } else {\n UpObject* undefined = UpUndefined();\n for (int i = self->itemCount; i < index; ++i) {\n UpListAppend(self, undefined);\n }\n UpListAppend(self, value);\n }\n}\n\nUpIndex UpListAppend(UpList* self, UpObject* value) {\n if (self->itemCount == self->itemsAllocated) {\n _UpListGrow(self);\n }\n\n _UpListStore(self, self->itemCount, value);\n return self->itemCount++;\n}\n\nvoid UpListInsert(UpList* self, UpIndex index, UpObject* value) {\n if (self->itemCount == self->itemsAllocated) {\n _UpListGrow(self);\n }\n index = _NormalizeIndex(self, index);\n if (index == self->itemCount) {\n _UpListStore(self, self->itemCount, value);\n ++self->itemCount;\n } else if (index < self->itemCount || index == UpNotFound) {\n _UpListShiftItems(self, index, false);\n self->items[index] = NULL;\n _UpListStore(self, index, value);\n ++self->itemCount;\n } else {\n UpListSet(self, index, value);\n }\n}\n\nint UpListRemove(UpList* self, UpObject* value) {\n UpIndex index = UpListFind(self, value);\n if (index != UpNotFound) {\n UpListRemoveAtIndex(self, index);\n return 1;\n }\n return 0;\n}\n\nint UpListRemoveAtIndex(UpList* self, UpIndex index) {\n if (index < self->itemCount) {\n self->items[index] = NULL;\n _UpListShiftItems(self, index, true);\n\n if (--self->itemCount - self->itemsAllocated == _UpListItemsAllocationSize) {\n _UpListShrink(self);\n }\n return 1;\n } else {\n UpSetError(\"List index %d out of range\", index);\n return 0;\n }\n}\n\nvoid UpListRemoveAll(UpList* self) {\n size_t size = sizeof(UpObject*) * self->itemsAllocated;\n memset(self->items, 0, size);\n\n self->itemCount = 0;\n}\n\nUpObject* UpListPop(UpList* self) {\n int count = UpListCount(self);\n if (count > 0) {\n UpObject* popped = UpListGet(self, count-1, NULL);\n UpListRemoveAtIndex(self, count-1);\n return popped;\n } else {\n UpSetError(\"Can't pop an empty list\");\n return NULL;\n }\n}\n" }, { "alpha_fraction": 0.5933653116226196, "alphanum_fraction": 0.5958690047264099, "avg_line_length": 28.04242515563965, "blob_id": "7aa3ea94c55469282bb91c2bc5a3c9c5c08f91aa", "content_id": "ed6a4e31bb913c5347228dd463aad7e66a3b6c09", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4793, "license_type": "permissive", "max_line_length": 100, "num_lines": 165, "path": "/src/vm/UpArray.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpArray.h\"\n#include \"Up/UpArena.h\"\n\n// *************************************************************************************************\n\nstatic unsigned int _UpArrayItemsAllocationSize = 32;\n\n// *************************************************************************************************\n\nstatic void* _UpArrayGet(UpArray* self, UpIndex index) {\n return self->items+(index*self->size);\n}\n\nstatic void _UpArrayStore(UpArray* self, UpIndex index, void* value) {\n // XXXjoe What happens if value is NULL?\n memcpy(self->items+(index*self->size), value, self->size);\n}\n\nstatic void _UpArrayShiftItems(UpArray* self, UpIndex index, bool reverse) {\n if (reverse) {\n int size = (self->itemCount-index)*self->size;\n memcpy(self->items+(index*self->size), self->items+((index+1)*self->size), size);\n } else {\n // XXXjoe Do this in one single memcpy!\n for (UpIndex i = self->itemCount; i >= index && i != UpNotFound; --i) {\n _UpArrayStore(self, i, i > 0 ? _UpArrayGet(self, i-1) : NULL);\n }\n }\n}\n\nstatic void _UpArrayResize(UpArray* self, unsigned int newCount) {\n char* oldItems = self->items;\n int oldCount = self->itemsAllocated;\n self->itemsAllocated = newCount;\n self->items = UpArenaReallocate(self->heap, self->size*newCount, oldItems, self->size*oldCount);\n}\n\nstatic void _UpArrayGrow(UpArray* self) {\n _UpArrayResize(self, self->itemsAllocated + _UpArrayItemsAllocationSize);\n}\n\nstatic void _UpArrayShrink(UpArray* self) {\n _UpArrayResize(self, self->itemsAllocated - _UpArrayItemsAllocationSize);\n}\n\n// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n\nUpArray* UpArrayCreate(UpArena* heap, size_t size) {\n UpArray* self = UpArenaNew(heap, UpArray);\n self->heap = heap;\n self->size = size;\n self->itemCount = 0;\n self->itemsAllocated = _UpArrayItemsAllocationSize;\n self->items = UpArenaAllocate(heap, size*self->itemsAllocated, \"\");\n return self;\n}\n\nunsigned int UpArrayCount(UpArray* self) {\n return self->itemCount;\n}\n\nvoid* UpArrayBuffer(UpArray* self) {\n return self->items;\n}\n\nvoid UpArrayCopyBuffer(UpArray* self, void** buffer, unsigned int* outCount) {\n size_t size = self->size * self->itemCount;\n *buffer = UpArenaAllocateCopy(self->heap, size, self->items, size);\n *outCount = self->itemCount;\n}\n\nbool UpArrayGet(UpArray* self, UpIndex index, void* out) {\n if (index < self->itemCount) {\n void* item = _UpArrayGet(self, index);\n memcpy(out, item, self->size);\n return true;\n } else {\n // UpError(\"Out of range\");\n return false;\n }\n}\n\nvoid UpArrayPop(UpArray* self, void* out) {\n UpIndex lastIndex = self->itemCount-1;\n UpArrayGet(self, lastIndex, out);\n UpArrayRemoveAtIndex(self, lastIndex);\n}\n\nvoid UpArrayShift(UpArray* self, void* out) {\n UpArrayGet(self, 0, out);\n UpArrayRemoveAtIndex(self, 0);\n}\n\nUpIndex UpArrayFind(UpArray* self, void* value) {\n for (UpIndex i = 0; i < self->itemCount; ++i) {\n void* item = _UpArrayGet(self, i);\n if (item == value) {\n return i;\n }\n }\n return UpNotFound;\n}\n\nvoid UpArraySet(UpArray* self, UpIndex index, void* value) {\n if (index < self->itemCount) {\n _UpArrayStore(self, index, value);\n } else {\n // UpError(\"Out of range\");\n }\n}\n\nUpIndex UpArrayAppend(UpArray* self, void* value) {\n if (self->itemCount == self->itemsAllocated) {\n _UpArrayGrow(self);\n }\n\n _UpArrayStore(self, self->itemCount, value);\n return self->itemCount++;\n}\n\nvoid UpArrayInsert(UpArray* self, UpIndex index, void* value) {\n if (self->itemCount == self->itemsAllocated) {\n _UpArrayGrow(self);\n }\n\n if (index < self->itemCount || index == UpNotFound) {\n _UpArrayShiftItems(self, index, false);\n _UpArrayStore(self, index, value);\n } else {\n _UpArrayStore(self, self->itemCount, value);\n }\n ++self->itemCount;\n}\n\nbool UpArrayRemove(UpArray* self, void* value) {\n UpIndex index = UpArrayFind(self, value);\n if (index != UpNotFound) {\n UpArrayRemoveAtIndex(self, index);\n return true;\n }\n return false;\n}\n\nbool UpArrayRemoveAtIndex(UpArray* self, UpIndex index) {\n if (index < self->itemCount) {\n _UpArrayShiftItems(self, index, true);\n\n if (--self->itemCount - self->itemsAllocated == _UpArrayItemsAllocationSize) {\n _UpArrayShrink(self);\n }\n return true;\n } else {\n // UpError(\"Out of range\");\n return false;\n }\n}\n\nvoid UpArrayRemoveAll(UpArray* self) {\n size_t size = self->size * self->itemsAllocated;\n memset(self->items, 0, size);\n\n self->itemCount = 0;\n}\n" }, { "alpha_fraction": 0.5863798260688782, "alphanum_fraction": 0.5886444449424744, "avg_line_length": 30.535715103149414, "blob_id": "25dbd9a6023bf47f39f2a650378acd27436cc8dd", "content_id": "2703559eb2cc3462fcb75cdfcc6fc3e6c5805ce2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6182, "license_type": "permissive", "max_line_length": 103, "num_lines": 196, "path": "/src/vm/UpStrTable.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpStrTable.h\"\n#include \"Up/UpContext.h\"\n#include \"UpIntTable.h\"\n#include \"Up/UpArena.h\"\n#include \"lookup3.h\"\n\n// *************************************************************************************************\n\nstatic unsigned int _UpStrTableBucketAllocationSize = 100;\n\nstruct UpStrTableItem {\n UpStrTableItem* next;\n unsigned int hash;\n char* key;\n void* value;\n};\n\nstatic unsigned int _UpStrTableHashForKey(const char* key) {\n return hashlittle((const void*)key, strlen(key), 1);\n}\n\nstatic unsigned int _UpStrTableBucketForHash(UpStrTable* self, unsigned int hash) {\n return hash & (self->bucketsAllocated-1);\n}\n\nstatic unsigned int _UpStrTableBucketForKey(UpStrTable* self, const char* key) {\n unsigned int hash = _UpStrTableHashForKey(key);\n return _UpStrTableBucketForHash(self, hash);\n}\n\nstatic UpStrTableItem* _UpStrTableCreateItem(UpStrTable* self, unsigned int hash, const char* key,\n void* value) {\n // XXXjoe It would be faster to allocate the buckets and item storage inline, instead\n // of allocating new items every time\n UpStrTableItem* item = UpArenaAllocate(self->heap, self->size+sizeof(UpStrTableItem), \"\");\n item->hash = hash;\n item->key = UpArenaCopyString(self->heap, key);\n item->value = (&item->value)+1;\n memcpy(item->value, value, self->size);\n return item;\n}\n\nstatic void _UpStrTableSetWithHash(UpStrTable* self, unsigned int hash, const char* key, void* value) {\n unsigned int bucket = _UpStrTableBucketForHash(self, hash);\n UpStrTableItem* item = self->buckets[bucket];\n if (!item) {\n self->buckets[bucket] = _UpStrTableCreateItem(self, hash, key, value);\n ++self->bucketsUsed;\n ++self->itemCount;\n } else {\n while (item) {\n if (!strcmp(item->key, key)) {\n memcpy(item->value, value, self->size);\n break;\n } else if (!item->next) {\n item->next = _UpStrTableCreateItem(self, hash, key, value);\n ++self->itemCount;\n break;\n }\n item = item->next;\n }\n }\n}\n\nstatic void _UpStrTableGrowBuckets(UpStrTable* self) {\n unsigned int oldBucketCount = self->bucketsAllocated;\n UpStrTableItem** oldBuckets = self->buckets;\n \n self->bucketsUsed = 0;\n self->bucketsAllocated = self->bucketsAllocated + _UpStrTableBucketAllocationSize;\n self->buckets = UpArenaNewArray(self->heap, UpStrTableItem, self->bucketsAllocated);\n \n for (unsigned int i = 0; i < oldBucketCount; ++i) {\n UpStrTableItem* item = oldBuckets[i];\n while (item) {\n UpStrTableItem* next = item->next;\n _UpStrTableSetWithHash(self, item->hash, item->key, item->value);\n item = next;\n }\n }\n}\n\n// *************************************************************************************************\n\nUpStrTable* UpStrTableCreate(UpArena* heap, size_t size) {\n UpStrTable* self = UpArenaNew(heap, UpStrTable);\n self->heap = heap;\n self->bucketsAllocated = _UpStrTableBucketAllocationSize;\n self->bucketsUsed = 0;\n self->itemCount = 0;\n self->size = size;\n self->buckets = UpArenaNewArray(heap, UpStrTableItem, self->bucketsAllocated);\n return self;\n}\n\nunsigned int UpStrTableCount(UpStrTable* self) {\n return self->itemCount;\n}\n\nbool UpStrTableGet(UpStrTable* self, const char* key, void* value) {\n unsigned int bucket = _UpStrTableBucketForKey(self, key);\n UpStrTableItem* item = self->buckets[bucket];\n\n while (item) {\n if (!strcmp(item->key, key)) {\n memcpy(value, item->value, self->size);\n return true;\n }\n item = item->next;\n }\n\n return false;\n}\n\nconst char* UpStrTableReverseGet(UpStrTable* self, void* value, UpStrTableCompareFunc compare) {\n for (unsigned int i = 0; i < self->bucketsAllocated; ++i) {\n UpStrTableItem* item = self->buckets[i];\n while (item && !compare(item->value, value)) {\n item = item->next;\n }\n if (item) {\n return item->key;\n }\n }\n return NULL;\n}\n\nvoid UpStrTableSet(UpStrTable* self, const char* key, void* value) {\n if (self->bucketsUsed == self->bucketsAllocated) {\n _UpStrTableGrowBuckets(self);\n }\n\n unsigned int hash = _UpStrTableHashForKey(key);\n _UpStrTableSetWithHash(self, hash, key, value);\n}\n\nvoid UpStrTableRemove(UpStrTable* self, const char* key) {\n unsigned int bucket = _UpStrTableBucketForKey(self, key);\n UpStrTableItem* item = self->buckets[bucket];\n UpStrTableItem* previousItem = NULL;\n while (item) {\n UpStrTableItem* next = item->next;\n if (!next || !strcmp(item->key, key)) {\n if (previousItem) {\n previousItem->next = next;\n } else {\n self->buckets[bucket] = next;\n if (!next) {\n --self->bucketsUsed;\n }\n }\n --self->itemCount;\n break;\n }\n previousItem = item;\n item = next;\n }\n}\n\n\nUpIntTable* UpStrTableAsSymbolTable(UpStrTable* self) {\n UpIntTable* table = UpIntTableCreate(self->heap, self->size);\n\n unsigned int count = self->bucketsAllocated;\n \n for (unsigned int i = 0; i < count; ++i) {\n UpStrTableItem* item = self->buckets[i];\n while (item) {\n UpStrTableItem* next = item->next;\n UpSymbol key = UpGetSymbol(item->key);\n UpIntTableSet(table, key, item->value);\n item = next;\n }\n }\n\n return table;\n}\n\nvoid UpStrTableIterate(UpStrTable* self, UpStrTableIterator callback, void* context) {\n unsigned int bucketCount = self->bucketsAllocated;\n UpStrTableItem** buckets = self->buckets;\n\n for (unsigned int i = 0; i < bucketCount; ++i) {\n UpStrTableItem* item = buckets[i];\n while (item) {\n void* value;\n void* ptr = &value;\n memcpy(ptr, item->value, self->size);\n\n callback(self, item->key, value, context);\n item = item->next;\n }\n }\n}\n" }, { "alpha_fraction": 0.5979037880897522, "alphanum_fraction": 0.601715087890625, "avg_line_length": 28.97142791748047, "blob_id": "ae88daea0f393324c48bbc6f28ac9ca6841c6c0b", "content_id": "6f5d322e556e0da448fb17661392f701b2cfa1c3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2099, "license_type": "permissive", "max_line_length": 99, "num_lines": 70, "path": "/src/vm/UpTerminal.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpTerminal.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpObject.h\"\n#include \"Up/UpArena.h\"\n\n#include <sys/ioctl.h>\n#include <termios.h>\n\n// ************************************************************************************************\n\nstruct termios* UpTerminalEnter(UpObject* self, FILE* stream) {\n struct termios newState;\n struct termios originalState;\n\n int fd = fileno(stream);\n\n if (!isatty(fd)) {\n UpSetError(\"Stream is not a TTY\");\n return NULL;\n }\n\n if (tcgetattr(fd, &originalState) == -1) {\n UpSetError(\"Unable to modify terminal\");\n return NULL;\n }\n\n // This code originates from:\n // https://github.com/antirez/linenoise/blob/master/linenoise.c\n\n newState = originalState; /* modify the original mode */\n /* input modes: no break, no CR to NL, no parity check, no strip char,\n * no start/stop output control. */\n newState.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON);\n\n /* output modes - disable post processing */\n // newState.c_oflag &= ~(OPOST);\n\n /* control modes - set 8 bit chars */\n newState.c_cflag |= (CS8);\n\n /* local modes - choing off, canonical off, no extended functions,\n * no signal chars (^Z,^C) */\n newState.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG);\n\n /* control chars - set return condition: min number of bytes and timer.\n * We want read to return every single byte, without timeout. */\n newState.c_cc[VMIN] = 1;\n newState.c_cc[VTIME] = 0; /* 1 byte, no timer */\n\n /* put terminal in raw mode after flushing */\n if (tcsetattr(fd, TCSAFLUSH, &newState) < 0) {\n UpSetError(\"Unable to modify terminal\");\n return NULL;\n }\n\n struct termios* original = UpArenaNew(UpGetHeap(), struct termios);\n *original = originalState;\n return original;\n}\n\nvoid UpTerminalRestore(UpObject* self, FILE* stream, struct termios* state) {\n int fd = fileno(stream);\n\n if (tcsetattr(fd, TCSAFLUSH, state) < 0) {\n UpSetError(\"Unable to restore terminal\");\n return;\n }\n}\n" }, { "alpha_fraction": 0.7800604104995728, "alphanum_fraction": 0.7806646227836609, "avg_line_length": 24.859375, "blob_id": "4d4e6db29ceb7f235248848dedd6a68cb9009ec1", "content_id": "2d56af41fc32011673daf614f89a425da600a654", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1655, "license_type": "permissive", "max_line_length": 37, "num_lines": 64, "path": "/src/vm/include/UpInstructions.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPINSTRUCTION_H\n#define UP_UPINSTRUCTION_H\n\ntypedef enum {\n UpInstructionNull,\n UpInstructionPop,\n UpInstructionJump,\n UpInstructionJumpIf,\n UpInstructionJumpIfNot,\n UpInstructionJumpIfDefined,\n UpInstructionJumpIfNotDefined,\n UpInstructionJumpIfHas,\n UpInstructionJumpIfHasNot,\n UpInstructionLoadObject,\n UpInstructionLoadInteger,\n UpInstructionLoadLong,\n UpInstructionLoadFloat,\n UpInstructionGetLocal,\n UpInstructionSetLocal,\n UpInstructionDeleteLocal,\n UpInstructionSetArgument,\n UpInstructionGetProperty,\n UpInstructionLookupProperty,\n UpInstructionSetProperty,\n UpInstructionValidateProperty,\n UpInstructionDeleteProperty,\n UpInstructionCallProperty,\n UpInstructionCallOperator,\n UpInstructionCallOperator2,\n UpInstructionIs,\n UpInstructionIsDefined,\n UpInstructionImport,\n UpInstructionSetImport,\n UpInstructionCall,\n UpInstructionReturn,\n UpInstructionThrow,\n UpInstructionNewObject,\n UpInstructionNewList,\n UpInstructionNewMap,\n UpInstructionNewClass,\n UpInstructionBindClass,\n UpInstructionNewGenerator,\n UpInstructionNewFunction,\n UpInstructionNewCFunction,\n UpInstructionSchedule,\n UpInstructionSubscribe,\n UpInstructionRead,\n UpInstructionWrite,\n UpInstructionWriteAwaitRead,\n UpInstructionWriteAwaitResponse,\n UpInstructionRespond,\n UpInstructionJumpIfChannelClosed,\n UpInstructionBeginTry,\n UpInstructionEndTry,\n UpInstructionCatch,\n UpInstructionFinally,\n} UpInstruction;\n\ntypedef struct {\n int lineOffset;\n int opOffset;\n} UpLineItem;\n\n#endif // UP_UPINSTRUCTION_H\n" }, { "alpha_fraction": 0.7102272510528564, "alphanum_fraction": 0.7102272510528564, "avg_line_length": 20.875, "blob_id": "4d6cfafd17d4957d082fe5123d033cdc65172e9b", "content_id": "85394215e2b1fd92335e7a9456ecb9746577f290", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 176, "license_type": "permissive", "max_line_length": 41, "num_lines": 8, "path": "/src/vm/UpType.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpType.h\"\n#include \"Up/UpContext.h\"\n\nconst char* UpTypeGetName(UpType* self) {\n return UpGetSymbolName(self->name);\n}\n" }, { "alpha_fraction": 0.7621878981590271, "alphanum_fraction": 0.7621878981590271, "avg_line_length": 26.129032135009766, "blob_id": "967c0ed670372b7049dad9ee1724eadc111d4532", "content_id": "2a6a21f885ca4f2f6957e450389528ad289d3fc7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 841, "license_type": "permissive", "max_line_length": 89, "num_lines": 31, "path": "/src/vm/UpIntTable.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPINTTABLE_H\n#define UP_UPINTTABLE_H\n\n#include \"Up/UpGlobal.h\"\n\ntypedef struct UpIntTableItem UpIntTableItem;\n\ntypedef void(*UpIntTableIterator)(UpIntTable* self, int key, void* value, void* context);\n\nstruct UpIntTable {\n UpArena* heap;\n UpIntTableItem** buckets;\n unsigned int bucketsAllocated;\n unsigned int bucketsUsed;\n unsigned int itemCount;\n size_t size;\n};\n\nUpIntTable* UpIntTableCreate(UpArena* heap, size_t size);\n\nunsigned int UpIntTableCount(UpIntTable* map);\n\nbool UpIntTableGet(UpIntTable* map, int key, void* value);\nint UpIntTableReverseGet(UpIntTable* map, void* value);\n\nvoid UpIntTableSet(UpIntTable* map, int key, void* value);\nvoid UpIntTableRemove(UpIntTable* map, int key);\n\nvoid UpIntTableIterate(UpIntTable* self, UpIntTableIterator callback, void* context);\n\n#endif // UP_UPINTTABLE_H\n" }, { "alpha_fraction": 0.7697560787200928, "alphanum_fraction": 0.7697560787200928, "avg_line_length": 29.147058486938477, "blob_id": "0d28c377075918aee04c7550156fcb50bd413d5f", "content_id": "6e9035ff4694ff919c478b7309d5b0ae5f7ff4a3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1025, "license_type": "permissive", "max_line_length": 97, "num_lines": 34, "path": "/src/vm/UpStrTable.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPSTRTABLE_H\n#define UP_UPSTRTABLE_H\n\n#include \"Up/UpGlobal.h\"\n\ntypedef struct UpStrTableItem UpStrTableItem;\n\ntypedef bool(*UpStrTableCompareFunc)(void* a, void* b);\ntypedef void(*UpStrTableIterator)(UpStrTable* self, const char* key, void* value, void* context);\n\nstruct UpStrTable {\n UpArena* heap;\n UpStrTableItem** buckets;\n unsigned int bucketsAllocated;\n unsigned int bucketsUsed;\n unsigned int itemCount;\n size_t size;\n};\n\nUpStrTable* UpStrTableCreate(UpArena* heap, size_t size);\n\nunsigned int UpStrTableCount(UpStrTable* map);\n\nbool UpStrTableGet(UpStrTable* map, const char* key, void* value);\nconst char* UpStrTableReverseGet(UpStrTable* map, void* value, UpStrTableCompareFunc compare);\n\nvoid UpStrTableSet(UpStrTable* map, const char* key, void* value);\nvoid UpStrTableRemove(UpStrTable* map, const char* key);\n\nUpIntTable* UpStrTableAsSymbolTable(UpStrTable* table);\n\nvoid UpStrTableIterate(UpStrTable* self, UpStrTableIterator callback, void* context);\n\n#endif // UP_UPSTRTABLE_H\n" }, { "alpha_fraction": 0.6834532618522644, "alphanum_fraction": 0.7266187071800232, "avg_line_length": 16.25, "blob_id": "a691775722118870b5f6542266d988d6ab523056", "content_id": "189196974b85dea71cf34b1ffb984de480f4b9cf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 139, "license_type": "permissive", "max_line_length": 71, "num_lines": 8, "path": "/src/vm/lookup3.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef LOOKUP3_H\n#define LOOKUP3_H\n\n#include <stdint.h>\n\nuint32_t hashlittle( const void *key, size_t length, uint32_t initval);\n\n#endif\n" }, { "alpha_fraction": 0.6868263483047485, "alphanum_fraction": 0.6880239248275757, "avg_line_length": 31.72549057006836, "blob_id": "93858ffd2c5e33f854c4f944c6379ae01b2c4b7d", "content_id": "f2e60442e12a0201a583471f522d40be355e3891", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1670, "license_type": "permissive", "max_line_length": 100, "num_lines": 51, "path": "/src/vm/include/UpChannel.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPCHANNEL_H\n#define UP_UPCHANNEL_H\n\n#include \"Up/UpObject.h\"\n\n// *************************************************************************************************\n\nstruct UpChannel {\n UpObject __base;\n UpTask* generator;\n // XXXjoe Instead of UpArray, I should use a structure that can O(1) shift\n UpArray* messages;\n UpArray* subscribers;\n UpArray* readers;\n UpArray* writers;\n UpArray* respondees;\n bool isClosed:1;\n};\n\n// *************************************************************************************************\n\nUpObject* UpChannelCreate();\n\nbool UpChannelIsClosed(UpChannel* self);\nbool UpChannelClose(UpChannel* self);\n\nbool UpChannelHasGenerator(UpChannel* self);\nUpTask* UpChannelGetGenerator(UpChannel* self);\n\nbool UpChannelHasSubscribers(UpChannel* self);\nvoid UpChannelAddSubscriber(UpChannel* self, UpEvent* subscriber);\nUpEvent** UpChannelCloneSubscribers(UpChannel* self, unsigned int* outCount);\n\nbool UpChannelHasMessage(UpChannel* self);\nvoid UpChannelAddMessage(UpChannel* self, UpObject* message);\nUpObject* UpChannelPopMessage(UpChannel* self);\n\nbool UpChannelHasReader(UpChannel* self);\nvoid UpChannelAddReader(UpChannel* self, UpEvent* reader);\nUpEvent** UpChannelPopAllReaders(UpChannel* self, unsigned int* outCount);\nUpEvent* UpChannelPopReader(UpChannel* self);\n\nbool UpChannelHasWriter(UpChannel* self);\nvoid UpChannelAddWriter(UpChannel* self, UpEvent* writer);\nUpEvent* UpChannelPopWriter(UpChannel* self);\n\nbool UpChannelHasRespondee(UpChannel* self);\nvoid UpChannelAddRespondee(UpChannel* self, UpEvent* respondee);\nUpEvent* UpChannelPopRespondee(UpChannel* self);\n\n#endif // UP_UPCHANNEL_H\n" }, { "alpha_fraction": 0.5545607209205627, "alphanum_fraction": 0.5581335425376892, "avg_line_length": 32.28080368041992, "blob_id": "34a42feca2167f831dc871800acbc7056f8d4b88", "content_id": "a97e5762e87c2a65c842e342d57a3a3da57cc694", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23231, "license_type": "permissive", "max_line_length": 100, "num_lines": 698, "path": "/make/build/Maker.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nimport re, os.path, sys, shutil, json\nfrom .Resource import res\nfrom ..util import subread, sourceLink, BuildError\nfrom ..Message import error, warning, command\n# **************************************************************************************************\n\n# commonLibs = [\"stdc++\"]\ncommonLibs = []\n\nrePch = re.compile(\"(^pch\\.h|\\.pch)$\")\nreLexFiles = re.compile(r\"\\.(lex)$\")\nreBisonFiles = re.compile(r\"\\.(y)$\")\nreCFiles = re.compile(r\"\\.(cpp|c|cxx|m|mm)$\")\nreCPPFiles = re.compile(r\"\\.(cpp|cc)$\")\n\nreExtraLine = re.compile(r\"\\s\\s\\s(.*?)\\n\")\n\n# **************************************************************************************************\n\nclass Maker(object):\n def __init__(self):\n self.nextMaker = None\n self.previousMaker = None\n\n def __rshift__(self, other):\n self.nextMaker = other\n other.previousMaker = self\n return other\n\n def build(self, project, out, sources):\n if self.previousMaker:\n sources = self.previousMaker.build(project, out, sources)\n\n def clean(self, project, out, sources):\n pass\n\n def install(self, project, out):\n pass\n\n def printResult(self, project, out, text):\n for line in text.splitlines():\n out << \" %s\" % line\n\nclass MakerOneToOne(Maker):\n def needsUpdate(self, project, source, target):\n return False\n\n def getTarget(self, project, source):\n return \"\"\n\n def build(self, project, out, sources):\n if self.previousMaker:\n sources = self.previousMaker.build(project, out, sources)\n\n targets = []\n\n for source in sources:\n target = self.getTarget(project, source)\n if not target:\n continue\n\n targets.append(target)\n\n targetDir = os.path.dirname(target.path)\n if not os.path.exists(targetDir):\n os.makedirs(targetDir)\n\n if source.needsUpdate(target) or self.needsUpdate(project, source, target):\n status = self.buildTarget(project, out, source, target)\n if status != 0:\n raise BuildError(status)\n else:\n target.resetStats()\n\n return targets\n\n def clean(self, project, out, sources):\n if self.previousMaker:\n sources = self.previousMaker.clean(project, out, sources)\n\n targets = []\n\n for source in sources:\n target = self.getTarget(project, source)\n if not target:\n continue\n\n targets.append(target)\n\n out << command('remove', target.path)\n deleteTarget(target)\n\n return targets\n\nclass MakerManyToOne(Maker):\n def needsUpdate(self, project, target):\n return False\n\n def getTarget(self, project):\n return \"\"\n\n def build(self, project, out, sources):\n # Check projects we depend on and build those first\n for dep in project.getDependencies():\n dep.make(\"build\", out)\n\n if self.previousMaker:\n sources = self.previousMaker.build(project, out, sources)\n\n targets = []\n\n target = self.getTarget(project)\n if not target:\n return\n\n targets.append(target)\n\n targetDir = os.path.dirname(target.path)\n if not os.path.exists(targetDir):\n os.makedirs(targetDir)\n\n needUpdate = self.needsUpdate(project, target)\n if not needUpdate:\n for source in sources:\n if source.needsUpdate(target):\n needUpdate = True\n break\n\n if needUpdate:\n out << command('link', target.path)\n result = self.buildTarget(project, out, sources, target)\n if result != 0:\n raise BuildError(result)\n else:\n target.resetStats()\n\n return targets\n\n def clean(self, project, out, sources):\n if self.previousMaker:\n sources = self.previousMaker.clean(project, out, sources)\n\n targets = []\n\n target = self.getTarget(project)\n if not target:\n return\n\n targets.append(target)\n\n out << command('remove', target.path)\n deleteTarget(target)\n\n return targets\n\nclass Preprocessor(Maker):\n def needsUpdate(self, project, source, target):\n return False\n\n def getTarget(self, project, source):\n return \"\"\n\n def build(self, project, out, sources):\n if self.previousMaker:\n sources = self.previousMaker.build(project, out, sources)\n\n targets = []\n\n for source in sources:\n target = self.getTarget(project, source)\n if not target:\n targets.append(source)\n else:\n targets.append(target)\n\n if source.needsUpdate(target) or self.needsUpdate(project, source, target):\n status = self.buildTarget(project, out, source, target)\n if status != 0:\n raise BuildError(status)\n\n return targets\n\n def clean(self, project, out, sources):\n if self.previousMaker:\n sources = self.previousMaker.clean(project, out, sources)\n\n targets = []\n\n for source in sources:\n target = self.getTarget(project, source)\n if not target:\n targets.append(source)\n else:\n targets.append(target)\n\n out << command('remove', target.path)\n deleteTarget(target)\n\n return targets\n\n# **************************************************************************************************\n\nclass Compile(MakerOneToOne):\n path = \"gcc\"\n\n patterns = re.compile(r\"\"\"\n (?P<Include>^In file included from (.*?):(\\d*?),$)|\n (?P<Unknown>^g\\+\\+:\\s(.+?):\\s(.+?)$)|\n (?P<Problem>^([^\\s]*?):(\\d*?):(\\d*?):\\s(warning|error|note):\\s(.*?)$)|\n (?P<Problem2>^([^\\s]*?):(\\d*?):\\s(warning|error|note):\\s(.*?)$)|\n (?P<Problem3>^([^\\s]*?):\\s(warning|error|note):\\s(.*?)$)\n \"\"\", re.M | re.VERBOSE)\n\n def needsUpdate(self, project, source, target):\n if source.name in project.alwaysBuild:\n return True\n\n for header in source.dependencies:\n if header.needsUpdate(target):\n return True\n\n return False\n\n def getTarget(self, project, source):\n if rePch.search(source.name):\n targetName = \"%s.gch\" % source.path\n return res(targetName)\n\n elif reCFiles.search(source.name):\n targetName = re.sub(r\"\\.(cpp|c|cxx|m|mm)$\", r\".o\", source.path)\n return res(project.getBuildPath(targetName))\n\n def buildTarget(self, project, out, source, target):\n out << command('compile', source.path)\n args = self.getBaseArgs(project, source, target)\n\n if project.pedantic:\n args += \" -pedantic\"\n\n # If for some reason a header makes it in here (e.g. pch.h), treat it like this\n name,ext = os.path.splitext(source.name)\n # if ext == \".h\":\n # args += \" -x c++-header\"\n if ext == \".pch\":\n args += \" -x objective-c-header\"\n\n compilerPath = project.compilerPath if project.compilerPath else self.path\n line = \"%s %s -c -o %s %s\" % (compilerPath, args, target, source)\n #c1 = time.time()\n result = executeCommand(line, project, out, self)\n #c2 = time.time()\n #out << \"built %s in %f\" % (source, c2-c1)\n return result\n\n def getBaseArgs(self, project, source, target):\n # The \"no-long-double\" option seems to be a mac-only thing\n if sys.platform == \"darwin\":\n # Causes error on Snow Leopard\n #args = \"-Wno-long-double \"\n args = \"\"\n else:\n args = \"\"\n\n if not reCPPFiles.search(source.name):\n args += ' -std=gnu99'\n\n if project.compilerFlags:\n args += project.compilerFlags + \" \"\n\n args += self.getOptimizeFlag(project)\n\n # XXXjoe Building on some unix machines without this flag causes a link error on some libs\n #args += \" -fPIC\"\n\n if project.enableProbes:\n args += ' -DUP_ENABLE_PROBES'\n\n if project.gitVersion:\n args += ' -DGIT_VERSION=\\\"%s\\\"' % project.gitVersion\n\n for name,value in project.defines.iteritems():\n if value:\n args += ' -D%s=%s' % (name, value)\n\n args += \" -I%s\" % project.path\n args += \" -I%s\" % project.buildPath\n for include in project.getIncludes():\n args += \" -I%s\" % include\n for dep in project.getDependencies():\n if dep != project:\n args += \" -I%s\" % dep.buildPath\n\n return args\n\n def getOptimizeFlag(self, project):\n vals = {\"size\": \"-Os\", \"speed\": \"-O3\", \"full\": \"-O3\"}\n if project.optimize in vals:\n return vals[project.optimize]\n else:\n return \"-O0 -gdwarf-2 -DDEBUG -D_DEBUG\"\n\n def printResult(self, project, out, text):\n m = self.patterns.search(text)\n if not m:\n if text:\n out << text\n\n else:\n # Find the appropriate handler to pretty print the matched text\n while m:\n groupdict = m.groupdict()\n groups = m.groups()\n offset = m.end()\n\n for name in groupdict:\n if groupdict[name]:\n extras = \"\"\n m2 = reExtraLine.match(text, offset+1)\n while m2:\n extras += m2.groups()[0]\n offset = m2.end()\n m2 = reExtraLine.match(text, offset)\n\n handler = globals()[\"print%s\" % name]\n if handler:\n index = self.patterns.groupindex[name]\n handler(groups[index:], extras, project, out)\n\n m = self.patterns.search(text, offset)\n\nclass Link(MakerManyToOne):\n path = \"gcc\"\n\n def needsUpdate(self, project, target):\n for dep in project.getDependencies():\n if dep.build:\n depTarget = dep.build.getTarget(dep)\n if depTarget and depTarget.needsUpdate(target):\n return True\n\n def buildTarget(self, project, out, sources, target):\n if target.name in project.neverLink:\n return 0\n\n # Object files must appear before libraries on the link line (see http://tinyurl.com/s97do)\n args = self.getSourceArgs(project, sources, target)\n args += \" \" + self.getBaseArgs(project, sources, target)\n\n linkerPath = project.linkerPath if project.linkerPath else self.path\n line = \"%s %s\" % (linkerPath, args)\n return executeCommand(line, project, out, self)\n\n def install(self, project, out):\n buildTarget = self.getTarget(project)\n buildInstallPath = self.getInstallPath(project, buildTarget)\n if buildInstallPath:\n installDir = os.path.dirname(buildInstallPath)\n if not os.path.exists(installDir):\n os.makedirs(installDir)\n\n out << command('install to', buildInstallPath)\n if project.installLink:\n if not os.path.exists(str(buildInstallPath)):\n os.symlink(str(buildTarget), str(buildInstallPath))\n else:\n shutil.copy2(str(buildTarget), str(buildInstallPath))\n\n if project.exports:\n for includePath,shorthand in project.exports.iteritems():\n includePath = os.path.join(project.path, includePath)\n\n installIncludePath = os.path.join(project.installPath, \"include\")\n if not os.path.isdir(installIncludePath):\n os.makedirs(installIncludePath)\n\n includeInstallPath = os.path.join(installIncludePath, shorthand)\n\n out << command('install exports to', includeInstallPath)\n if project.installLink:\n if not os.path.exists(includeInstallPath):\n os.symlink(includePath, includeInstallPath)\n else:\n if os.path.isdir(includeInstallPath):\n shutil.rmtree(includeInstallPath)\n shutil.copytree(includePath, includeInstallPath)\n\n def getSourceArgs(self, project, sources, target):\n def isPrecompiledHeader(name):\n return name.endswith(\".gch\")\n\n return \" \".join([source.path for source in sources if not isPrecompiledHeader(source.name)])\n\n def getBaseArgs(self, project, sources, target):\n args = \"-o %s\" % target\n\n # Add compiler-specific link flags\n if project.linkerFlags:\n args += \" \" + project.linkerFlags\n\n # XXXblake On Linux, link order matters, so our stripping of duplicates in getDependencies()\n # busts the link. Allowing duplicates while still preventing recursion is tricky, however...\n # for example, right now, the Suade serialize and memory projects depend on each other. For\n # now, we use the \"grouping\" option to force the linker to keep checking the archives until\n # all references are resolved. This has a significant performance cost, as the docs note.\n if not sys.platform == \"darwin\":\n args += \" -Xlinker --start-group\"\n\n libs = project.getLibs()\n\n # XXXjoe On Mac, link order matters also, but we don't have the luxury of --start-group,\n # so the only hack I can think of is to list libraries twice\n if sys.platform == \"darwin\":\n libs *= 2\n\n for libName in libs:\n args += \" %s\" % libName\n\n if not sys.platform == \"darwin\":\n args += \" -Xlinker --end-group\"\n\n for libName in commonLibs:\n args += \" -l%s\" % libName\n\n for name in project.getFrameworks():\n args += \" -framework %s\" % name\n\n # Ensure that we use the NPTL version of libc on Unix (see http://tinyurl.com/rv49a)\n #if not sys.platform == \"win32\" and not sys.platform == \"darwin\":\n # args += \" -L/usr/lib/nptl\"\n\n return args\n\nclass LinkExecutable(Link):\n def getTarget(self, project):\n exePath = os.path.join(project.path, project.name)\n return res(project.getBuildPath(exePath))\n\n def getInstallPath(self, project, targetPath):\n targetName = os.path.basename(str(targetPath))\n installPath = os.path.join(project.installPath, \"bin\", targetName)\n return installPath\n\nclass LinkStaticLib(Link):\n path = \"libtool\"\n\n def needsUpdate(self, project, target):\n return False\n\n def getTarget(self, project):\n libPath = os.path.join(project.path, \"lib%s.a\" % project.name)\n return res(project.getBuildPath(libPath))\n\n def getBaseArgs(self, project, sources, target):\n if not sys.platform == \"darwin\":\n return \"--mode=link gcc -static -o %s\" % target\n else:\n return \"-static -o %s\" % target\n\n def getInstallPath(self, project, targetPath):\n targetName = os.path.basename(str(targetPath))\n installPath = os.path.join(project.installPath, \"lib\", targetName)\n return installPath\n\n # We need to override Link's build() method so we can format the arg string in the proper order,\n # i.e. we need the base args to come before the source args here\n def buildTarget(self, project, out, sources, target):\n if target.name in project.neverLink:\n return 0\n\n linkerPath = project.linkerPath if project.linkerPath else 'ar'\n line1 = \"%s cru %s\" % (linkerPath, target.path)\n line1 += \" \" + self.getSourceArgs(project, sources, target)\n result = executeCommand(line1, project, out, self)\n if result != 0:\n return result\n\n ranlibPath = project.ranlibPath if project.ranlibPath else 'ranlib'\n line2 = \"%s %s\" % (ranlibPath, target.path)\n result = executeCommand(line2, project, out, self)\n return result\n\n # #xxxJoe This is the old way using libtool\n #args = self.getBaseArgs(project, sources, target)\n #args += \" \" + self.getSourceArgs(project, sources, target)\n\n #line = \"%s %s\" % (self.path, args)\n #return executeCommand(line, project, out, self)\n\nclass LinkDynamicLib(Link):\n def getTarget(self, project):\n return res(project.getBuildPath(\"%s.so\" % os.path.join(project.path, project.name)))\n\n def getBaseArgs(self, project, sources, target):\n args = Link.getBaseArgs(self, project, sources, target)\n\n if sys.platform == \"darwin\":\n args += \" -bundle -undefined dynamic_lookup\"\n else:\n args += \" -shared\"\n\n return args\n\n def getInstallPath(self, project, targetPath):\n targetName = os.path.basename(str(targetPath))\n installPath = os.path.join(project.installPath, \"lib\", targetName)\n return installPath\n\nclass LinkPythonModule(LinkDynamicLib):\n # XXXjoe There could someday be Python-specific goodies here\n pass\n\nclass Probes(Preprocessor):\n def getTarget(self, project, source):\n if source.path.endswith('/UpProbes.json'):\n outputPath = source.path.replace(\".json\", \".c\")\n return res(outputPath)\n\n def buildTarget(self, project, out, source, target):\n out << command('make probes', source.path)\n from .ProbeMaker import probeMake\n try:\n probeMake(source.path)\n return 0\n except Exception,exc:\n out << error('Unable to parse json.', source.path)\n out << error(str(exc))\n return -1\n\nclass FlexParse(Preprocessor):\n path = \"/usr/bin/env flex\"\n\n def getTarget(self, project, source):\n if reLexFiles.search(source.name):\n outputPath = re.sub(reLexFiles, r\".yy.c\", source.path)\n return res(outputPath)\n\n def buildTarget(self, project, out, source, target):\n out << command('flex', source.path)\n\n headerPath = re.sub(reLexFiles, r\".yy.h\", source.path)\n line = \"%s --header-file=%s -o%s %s\" % (self.path, headerPath, target, source)\n return executeCommand(line, project, out, self)\n\nclass BisonParse(Preprocessor):\n path = \"/usr/bin/env bison\"\n\n def getTarget(self, project, source):\n if reBisonFiles.search(source.name):\n outputPath = re.sub(reBisonFiles, r\".tab.c\", source.path)\n return res(outputPath)\n\n def buildTarget(self, project, out, source, target):\n out << command('bison', source.path)\n\n line = \"%s -r solved -v -d -o %s %s\" % (self.path, target, source)\n return executeCommand(line, project, out, self)\n\nclass ConfigureMake(MakerManyToOne):\n def getTarget(self, project):\n output = getattr(project, \"output\", None)\n if output:\n return res(os.path.abspath(output))\n\n def build(self, project, out, source, target):\n line = \"make clean\"\n result = executeCommand(project, self, out, line)\n\n if project.configure:\n line = project.configure\n else:\n line = \"./configure\"\n result = executeCommand(project, self, out, line)\n\n line = \"make\"\n result = executeCommand(project, self, out, line)\n\n line = \"cp %s %s\" % (self.getTarget(project), target)\n result = executeCommand(project, self, out, line)\n return result\n\nclass StaticFiles(MakerManyToOne):\n def install(self, project, out):\n if project.install:\n for source,dest in project.install:\n sourcePath = os.path.abspath(source)\n destPath = os.path.join(project.installPath, dest)\n\n out << command('install files to', destPath)\n if project.installLink:\n if not os.path.exists(destPath):\n os.symlink(sourcePath, destPath)\n else:\n if os.path.isdir(destPath):\n shutil.rmtree(destPath)\n shutil.copytree(sourcePath, destPath)\n\nclass GitVersion(Preprocessor):\n def getTarget(self, project, source):\n # This file will have changed after each commit\n if source.name == \"master\":\n versionPath = os.path.join(project.path, \"..\", \"..\", \"..\", \"..\", \".upversion\")\n return res(os.path.abspath(versionPath))\n\n def buildTarget(self, project, out, sources, target):\n output,error = subread(\"git describe --always --tags --abbrev=1000\")\n if error:\n return 0\n\n parts = output.strip().split('-')\n if len(parts) == 1:\n tag = parts[0]\n commit = ''\n elif len(parts) >= 3:\n tag = \"-\".join(parts[0:-2])\n commit = parts[-1]\n else:\n tag = '(no tag)'\n commit = ''\n\n if tag.startswith('v'):\n tag = tag[1:]\n\n if commit:\n gitVersion = \"%s (%s)\" % (tag, commit)\n else:\n gitVersion = tag\n\n from ..util import projects\n for globalProject in projects:\n globalProject.gitVersion = gitVersion\n\n f = open(str(target), \"w\")\n f.write(gitVersion)\n f.close()\n return 0\n\n# **************************************************************************************************\n\ndef printInclude(m, extras, project, out):\n fileName = os.path.basename(m[0])\n out << \"File included from: %s\" % sourceLink(m[0], int(m[1]))\n out << \"In %s (line %s)\" % (fileName, m[1])\n\ndef printFrom(m, extras, project, out):\n fileName = os.path.basename(m[0])\n out << \"From %s (line %s)\" % (fileName, m)\n\ndef printProblem(m, extras, project, out):\n fileName = os.path.basename(m[0])\n message = m[4] + extras\n\n if m[3] == \"error\":\n out << error(message, m[0], int(m[1]), int(m[2]))\n else:\n out << warning(message, m[0], int(m[1]), int(m[2]))\n\ndef printProblem2(m, extras, project, out):\n fileName = os.path.basename(m[0])\n message = m[3] + extras\n\n if m[2] == \"error\":\n out << error(message, m[0], int(m[1]))\n else:\n out << error(warning, m[0], int(m[1]))\n\ndef printProblem3(m, extras, project, out):\n fileName = os.path.basename(m[0])\n message = m[2] + extras\n\n if m[1] == \"error\":\n out << error(message, m[0])\n else:\n out << error(warning, m[0])\n\ndef printUnknown(m, extras, project, out):\n out << \"%s %s\" % (m[1], m[0])\n\n# **************************************************************************************************\n\ndef executeCommand(command, project, out, maker):\n if project.showCommands:\n out << command\n\n output, error = subread(command)\n\n if output:\n if project.formatOutput:\n maker.printResult(project, out, output)\n else:\n out << output\n\n return error\n\ndef deleteTarget(target):\n if target.isdir:\n shutil.rmtree(target.path)\n elif os.path.isfile(target.path):\n os.remove(target.path)\n" }, { "alpha_fraction": 0.7659574747085571, "alphanum_fraction": 0.7659574747085571, "avg_line_length": 22.5, "blob_id": "ac03cd12b892ec4f4b0471227f767c17a4149b94", "content_id": "a27a87213301fa15d0b7c364a66c750dddce70ae", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "permissive", "max_line_length": 25, "num_lines": 2, "path": "/make/__init__.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nfrom .cmdline import make\nfrom .build import *" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 27, "blob_id": "9a400fe10502cc90166bdbea8987d6cd9ca3c085", "content_id": "ed4d2635eb6b0b9c21f2a2917b2b41bba9254726", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 28, "license_type": "permissive", "max_line_length": 27, "num_lines": 1, "path": "/src/vm/UpEventLoop.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"UpMacEventLoop.h\"" }, { "alpha_fraction": 0.5052022933959961, "alphanum_fraction": 0.5109826326370239, "avg_line_length": 28.31355857849121, "blob_id": "207a4e46968a6fa64ebd98c254cc1f165b081b1f", "content_id": "e055222ee27ea4eb29415690f1d05660a10bccd6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3460, "license_type": "permissive", "max_line_length": 100, "num_lines": 118, "path": "/make/test/utils.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\nimport inspect, os.path, traceback, sys, imp, re\n\n# **************************************************************************************************\n \nUnknownType = 0\nFixtureType = 1\nSuiteType = 2\nWrapperType = 3\nFunctionType = 4\nInspectorType = 5\nExeType = 6\n\ntestTypeNames = {\n UnknownType: \"Unknown\",\n FixtureType: \"Fixture\",\n SuiteType: \"Suite\",\n WrapperType: \"Wrapper\",\n FunctionType: \"Function\",\n InspectorType: \"Inspector\",\n ExeType: \"Exe\",\n}\n\ntestFilesDirName = \"__tests__\"\n\nreFunctionPrefix = re.compile(\"test(.+)\")\n\n# **************************************************************************************************\n\ndef sourceLink(path, line=-1, col=-1):\n if line >= 0:\n if col >= 0:\n return \"[[%s:%s:%s]]\" % (path, line, col)\n else:\n return \"[[%s:%s]]\" % (path, line)\n else:\n return \"[[%s]]\" % (path,)\n\n# **************************************************************************************************\n\ndef logException(writer, printStack=False):\n fileName, lineNo = getTracebackSource()\n description = traceback.format_exc()\n print description.strip()\n\n# **************************************************************************************************\n\ndef testOutputPath(path):\n return os.path.join(os.environ['HOME'], 'Library', 'Caches', 'uptests')\n\ndef testFunctionPrefix(name):\n m = reFunctionPrefix.match(name)\n if m:\n testPrefix = m.groups()[0]\n return testPrefix[0].lower() + testPrefix[1:]\n \ndef functionNameToTestFilePattern(obj, name):\n # Find the directory of the module that the class belongs to \n modFilePath = findModule(obj.__module__)\n modFileDirPath = os.path.dirname(modFilePath)\n \n return os.path.join(modFileDirPath, \"%s*.test\" % name)\n \ndef testFileName(obj, name):\n # Find the directory of the module that the class belongs to \n modFilePath = findModule(obj.__module__)\n modFileDirPath = os.path.dirname(modFilePath)\n \n return os.path.join(modFileDirPath, \"%s.test\" % name)\n\ndef testFileFunctionName(filePath):\n fileName = os.path.basename(filePath)\n fnPrefix,ext = os.path.splitext(fileName)\n fnPrefix = fnPrefix[0].upper() + fnPrefix[1:]\n return \"test%s\" % fnPrefix\n\ndef testFileFunctionHiddenName(name):\n return \"__%s__\" % name\n\n# **************************************************************************************************\n\ndef findModule(moduleName, finder=imp.find_module):\n \"\"\" Gets the absolute path of a module.\"\"\"\n \n path = None\n for name in moduleName.split(\".\"):\n y,path,z = finder(name, [path] if path else None)\n return path\n\ndef getTracebackSource(exc=None):\n if not exc:\n exc = sys.exc_info()\n \n try:\n msg, (filename, lineno, offset, badline) = exc[1]\n return filename, lineno\n except:\n tb = exc[2]\n while tb.tb_next:\n tb = tb.tb_next\n \n try:\n info = inspect.getframeinfo(tb.tb_frame)\n return info[0:2]\n except:\n return (None,None)\n\ndef copyToClipboard(text):\n assert sys.platform == \"darwin\", \"Clipboard copying only supported on Mac OS X\"\n\n stream = os.popen(\"pbcopy\", \"w\")\n stream.write(text)\n stream.close()\n\n# **************************************************************************************************\n\nclass PipeEater(object):\n def write(self, *args):\n pass\n" }, { "alpha_fraction": 0.5740163922309875, "alphanum_fraction": 0.6183367371559143, "avg_line_length": 25.85769271850586, "blob_id": "a9d6b855f0baf30ca91e789e07dc02312c7f73b2", "content_id": "b3a671f4320c9991f955830378fb4d5a1aa5d618", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 27933, "license_type": "permissive", "max_line_length": 130, "num_lines": 1040, "path": "/src/vm/include/UpProbes.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n// Do not edit this file. It is generated!\n\n#ifndef UP_PROBES_H\n#define UP_PROBES_H\n\n#include \"Up/UpGlobal.h\"\n\n#ifdef UP_ENABLE_PROBES\n\ntypedef enum {\n UpProbeNull,\n UpProbeCompilerNode,\n UpProbeOp1,\n UpProbeOp2,\n UpProbeFloat,\n UpProbeParse,\n UpProbeSet,\n UpProbeOp1f,\n UpProbeLog,\n UpProbeLong,\n UpProbeCall,\n UpProbeCompilerFrame,\n UpProbeRun,\n UpProbeString,\n UpProbeGet,\n UpProbeCompilerClass,\n UpProbeInteger,\n UpProbeOp1v,\n UpProbeExc,\n UpProbeInstruction,\n UpProbeCompile,\n UpProbeOp1ll,\n UpProbeOp,\n UpProbeDelete,\n} UpProbeType;\n\ntypedef enum {\n UpProbeMappingNull,\n UpProbeMappingOpcodes = 2,\n UpProbeMappingSymbol = 3,\n UpProbeMappingSyntax = 1,\n} UpProbeMapping;\n\nextern bool UpProbeCompilerNodeEnabled;\nextern bool UpProbeOp1Enabled;\nextern bool UpProbeOp2Enabled;\nextern bool UpProbeFloatEnabled;\nextern bool UpProbeParseEnabled;\nextern bool UpProbeSetEnabled;\nextern bool UpProbeOp1fEnabled;\nextern bool UpProbeLogEnabled;\nextern bool UpProbeLongEnabled;\nextern bool UpProbeCallEnabled;\nextern bool UpProbeCompilerFrameEnabled;\nextern bool UpProbeRunEnabled;\nextern bool UpProbeStringEnabled;\nextern bool UpProbeGetEnabled;\nextern bool UpProbeCompilerClassEnabled;\nextern bool UpProbeIntegerEnabled;\nextern bool UpProbeOp1vEnabled;\nextern bool UpProbeExcEnabled;\nextern bool UpProbeInstructionEnabled;\nextern bool UpProbeCompileEnabled;\nextern bool UpProbeOp1llEnabled;\nextern bool UpProbeOpEnabled;\nextern bool UpProbeDeleteEnabled;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpBeginCompilerNode;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpEndCompilerNode;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n} UpCountCompilerNode;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n uint32_t _ARG2;\n} UpBeginOp1;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n uint32_t _ARG2;\n} UpEndOp1;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n uint32_t _ARG1;\n uint32_t _ARG2;\n} UpCountOp1;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n uint32_t _ARG2;\n uint32_t _ARG3;\n} UpBeginOp2;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n uint32_t _ARG2;\n uint32_t _ARG3;\n} UpEndOp2;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n uint32_t _ARG1;\n uint32_t _ARG2;\n uint32_t _ARG3;\n} UpCountOp2;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n double _ARG0;\n} UpBeginFloat;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n double _ARG0;\n} UpEndFloat;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double _ARG0;\n} UpCountFloat;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpBeginParse;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpEndParse;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n} UpCountParse;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n} UpBeginSet;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n} UpEndSet;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n uint32_t _ARG1;\n} UpCountSet;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n double _ARG2;\n} UpBeginOp1f;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n double _ARG2;\n} UpEndOp1f;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n uint32_t _ARG1;\n double _ARG2;\n} UpCountOp1f;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n} UpDoLog;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint64_t _ARG0;\n} UpBeginLong;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint64_t _ARG0;\n} UpEndLong;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint64_t _ARG0;\n} UpCountLong;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpBeginCall;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpEndCall;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n} UpCountCall;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpBeginCompilerFrame;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpEndCompilerFrame;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n} UpCountCompilerFrame;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpBeginRun;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpEndRun;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n} UpCountRun;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpBeginString;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpEndString;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n} UpCountString;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n} UpBeginGet;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n} UpEndGet;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n uint32_t _ARG1;\n} UpCountGet;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpBeginCompilerClass;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpEndCompilerClass;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n} UpCountCompilerClass;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpBeginInteger;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpEndInteger;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n} UpCountInteger;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n uint64_t _ARG2;\n} UpBeginOp1v;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n uint64_t _ARG2;\n} UpEndOp1v;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n uint32_t _ARG1;\n uint64_t _ARG2;\n} UpCountOp1v;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n uint32_t _ARG1;\n uint32_t _ARG2;\n uint32_t _ARG3;\n} UpDoExc;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpBeginInstruction;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpEndInstruction;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n} UpCountInstruction;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpBeginCompile;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n} UpEndCompile;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n} UpCountCompile;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n uint64_t _ARG2;\n} UpBeginOp1ll;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n uint64_t _ARG2;\n} UpEndOp1ll;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n uint32_t _ARG1;\n uint64_t _ARG2;\n} UpCountOp1ll;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n} UpBeginOp;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n} UpEndOp;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n uint32_t _ARG1;\n} UpCountOp;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n} UpBeginDelete;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n double time;\n uint32_t _ARG0;\n uint32_t _ARG1;\n} UpEndDelete;\n\ntypedef struct __attribute__((__packed__)) {\n uint8_t type;\n uint8_t probe;\n uint32_t _ARG0;\n uint32_t _ARG1;\n} UpCountDelete;\n\n#define BEGIN_COMPILER_NODE(_ARG0) \\\n if (UpProbeCompilerNodeEnabled) { \\\n UpBeginCompilerNode probe = {5, UpProbeCompilerNode,UpGetProbeTime(), (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_COMPILER_NODE(_ARG0) \\\n if (UpProbeCompilerNodeEnabled) { \\\n UpEndCompilerNode probe = {6, UpProbeCompilerNode,UpGetProbeTime(), (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_COMPILER_NODE(_ARG0) \\\n if (UpProbeCompilerNodeEnabled) { \\\n UpCountCompilerNode probe = {7, UpProbeCompilerNode, (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define BEGIN_OP1(_ARG0, _ARG1, _ARG2) \\\n if (UpProbeOp1Enabled) { \\\n UpBeginOp1 probe = {5, UpProbeOp1,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1, (uint32_t)_ARG2}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_OP1(_ARG0, _ARG1, _ARG2) \\\n if (UpProbeOp1Enabled) { \\\n UpEndOp1 probe = {6, UpProbeOp1,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1, (uint32_t)_ARG2}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_OP1(_ARG0, _ARG1, _ARG2) \\\n if (UpProbeOp1Enabled) { \\\n UpCountOp1 probe = {7, UpProbeOp1, (uint32_t)_ARG0, (uint32_t)_ARG1, (uint32_t)_ARG2}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define BEGIN_OP2(_ARG0, _ARG1, _ARG2, _ARG3) \\\n if (UpProbeOp2Enabled) { \\\n UpBeginOp2 probe = {5, UpProbeOp2,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1, (uint32_t)_ARG2, (uint32_t)_ARG3}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_OP2(_ARG0, _ARG1, _ARG2, _ARG3) \\\n if (UpProbeOp2Enabled) { \\\n UpEndOp2 probe = {6, UpProbeOp2,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1, (uint32_t)_ARG2, (uint32_t)_ARG3}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_OP2(_ARG0, _ARG1, _ARG2, _ARG3) \\\n if (UpProbeOp2Enabled) { \\\n UpCountOp2 probe = {7, UpProbeOp2, (uint32_t)_ARG0, (uint32_t)_ARG1, (uint32_t)_ARG2, (uint32_t)_ARG3}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define BEGIN_FLOAT(_ARG0) \\\n if (UpProbeFloatEnabled) { \\\n UpBeginFloat probe = {5, UpProbeFloat,UpGetProbeTime(), (double)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_FLOAT(_ARG0) \\\n if (UpProbeFloatEnabled) { \\\n UpEndFloat probe = {6, UpProbeFloat,UpGetProbeTime(), (double)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_FLOAT(_ARG0) \\\n if (UpProbeFloatEnabled) { \\\n UpCountFloat probe = {7, UpProbeFloat, (double)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define BEGIN_PARSE(_ARG0) \\\n if (UpProbeParseEnabled) { \\\n uint32_t len0 = sizeof(char) * strlen(_ARG0); \\\n UpBeginParse probe = {5, UpProbeParse,UpGetProbeTime(), len0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)_ARG0, len0); \\\n }\n\n#define END_PARSE(_ARG0) \\\n if (UpProbeParseEnabled) { \\\n uint32_t len0 = sizeof(char) * strlen(_ARG0); \\\n UpEndParse probe = {6, UpProbeParse,UpGetProbeTime(), len0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)_ARG0, len0); \\\n }\n\n#define COUNT_PARSE(_ARG0) \\\n if (UpProbeParseEnabled) { \\\n uint32_t len0 = sizeof(char) * strlen(_ARG0); \\\n UpCountParse probe = {7, UpProbeParse, len0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)_ARG0, len0); \\\n }\n\n#define BEGIN_SET(_ARG0, _ARG1) \\\n if (UpProbeSetEnabled) { \\\n UpBeginSet probe = {5, UpProbeSet,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_SET(_ARG0, _ARG1) \\\n if (UpProbeSetEnabled) { \\\n UpEndSet probe = {6, UpProbeSet,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_SET(_ARG0, _ARG1) \\\n if (UpProbeSetEnabled) { \\\n UpCountSet probe = {7, UpProbeSet, (uint32_t)_ARG0, (uint32_t)_ARG1}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define BEGIN_OP1F(_ARG0, _ARG1, _ARG2) \\\n if (UpProbeOp1fEnabled) { \\\n UpBeginOp1f probe = {5, UpProbeOp1f,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1, (double)_ARG2}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_OP1F(_ARG0, _ARG1, _ARG2) \\\n if (UpProbeOp1fEnabled) { \\\n UpEndOp1f probe = {6, UpProbeOp1f,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1, (double)_ARG2}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_OP1F(_ARG0, _ARG1, _ARG2) \\\n if (UpProbeOp1fEnabled) { \\\n UpCountOp1f probe = {7, UpProbeOp1f, (uint32_t)_ARG0, (uint32_t)_ARG1, (double)_ARG2}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define DO_LOG(_ARG0) \\\n if (UpProbeLogEnabled) { \\\n uint32_t len0 = sizeof(char) * strlen(_ARG0); \\\n UpDoLog probe = {7, UpProbeLog, len0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)_ARG0, len0); \\\n }\n\n#define BEGIN_LONG(_ARG0) \\\n if (UpProbeLongEnabled) { \\\n UpBeginLong probe = {5, UpProbeLong,UpGetProbeTime(), (uint64_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_LONG(_ARG0) \\\n if (UpProbeLongEnabled) { \\\n UpEndLong probe = {6, UpProbeLong,UpGetProbeTime(), (uint64_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_LONG(_ARG0) \\\n if (UpProbeLongEnabled) { \\\n UpCountLong probe = {7, UpProbeLong, (uint64_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define BEGIN_CALL(_ARG0) \\\n if (UpProbeCallEnabled) { \\\n UpBeginCall probe = {5, UpProbeCall,UpGetProbeTime(), (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_CALL(_ARG0) \\\n if (UpProbeCallEnabled) { \\\n UpEndCall probe = {6, UpProbeCall,UpGetProbeTime(), (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_CALL(_ARG0) \\\n if (UpProbeCallEnabled) { \\\n UpCountCall probe = {7, UpProbeCall, (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define BEGIN_COMPILER_FRAME(_ARG0) \\\n if (UpProbeCompilerFrameEnabled) { \\\n UpBeginCompilerFrame probe = {5, UpProbeCompilerFrame,UpGetProbeTime(), (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_COMPILER_FRAME(_ARG0) \\\n if (UpProbeCompilerFrameEnabled) { \\\n UpEndCompilerFrame probe = {6, UpProbeCompilerFrame,UpGetProbeTime(), (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_COMPILER_FRAME(_ARG0) \\\n if (UpProbeCompilerFrameEnabled) { \\\n UpCountCompilerFrame probe = {7, UpProbeCompilerFrame, (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define BEGIN_RUN(_ARG0) \\\n if (UpProbeRunEnabled) { \\\n uint32_t len0 = sizeof(char) * strlen(_ARG0); \\\n UpBeginRun probe = {5, UpProbeRun,UpGetProbeTime(), len0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)_ARG0, len0); \\\n }\n\n#define END_RUN(_ARG0) \\\n if (UpProbeRunEnabled) { \\\n uint32_t len0 = sizeof(char) * strlen(_ARG0); \\\n UpEndRun probe = {6, UpProbeRun,UpGetProbeTime(), len0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)_ARG0, len0); \\\n }\n\n#define COUNT_RUN(_ARG0) \\\n if (UpProbeRunEnabled) { \\\n uint32_t len0 = sizeof(char) * strlen(_ARG0); \\\n UpCountRun probe = {7, UpProbeRun, len0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)_ARG0, len0); \\\n }\n\n#define BEGIN_STRING(_ARG0) \\\n if (UpProbeStringEnabled) { \\\n uint32_t len0 = sizeof(char) * strlen(_ARG0); \\\n UpBeginString probe = {5, UpProbeString,UpGetProbeTime(), len0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)_ARG0, len0); \\\n }\n\n#define END_STRING(_ARG0) \\\n if (UpProbeStringEnabled) { \\\n uint32_t len0 = sizeof(char) * strlen(_ARG0); \\\n UpEndString probe = {6, UpProbeString,UpGetProbeTime(), len0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)_ARG0, len0); \\\n }\n\n#define COUNT_STRING(_ARG0) \\\n if (UpProbeStringEnabled) { \\\n uint32_t len0 = sizeof(char) * strlen(_ARG0); \\\n UpCountString probe = {7, UpProbeString, len0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)_ARG0, len0); \\\n }\n\n#define BEGIN_GET(_ARG0, _ARG1) \\\n if (UpProbeGetEnabled) { \\\n UpBeginGet probe = {5, UpProbeGet,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_GET(_ARG0, _ARG1) \\\n if (UpProbeGetEnabled) { \\\n UpEndGet probe = {6, UpProbeGet,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_GET(_ARG0, _ARG1) \\\n if (UpProbeGetEnabled) { \\\n UpCountGet probe = {7, UpProbeGet, (uint32_t)_ARG0, (uint32_t)_ARG1}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define BEGIN_COMPILER_CLASS(_ARG0) \\\n if (UpProbeCompilerClassEnabled) { \\\n UpBeginCompilerClass probe = {5, UpProbeCompilerClass,UpGetProbeTime(), (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_COMPILER_CLASS(_ARG0) \\\n if (UpProbeCompilerClassEnabled) { \\\n UpEndCompilerClass probe = {6, UpProbeCompilerClass,UpGetProbeTime(), (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_COMPILER_CLASS(_ARG0) \\\n if (UpProbeCompilerClassEnabled) { \\\n UpCountCompilerClass probe = {7, UpProbeCompilerClass, (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define BEGIN_INTEGER(_ARG0) \\\n if (UpProbeIntegerEnabled) { \\\n UpBeginInteger probe = {5, UpProbeInteger,UpGetProbeTime(), (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_INTEGER(_ARG0) \\\n if (UpProbeIntegerEnabled) { \\\n UpEndInteger probe = {6, UpProbeInteger,UpGetProbeTime(), (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_INTEGER(_ARG0) \\\n if (UpProbeIntegerEnabled) { \\\n UpCountInteger probe = {7, UpProbeInteger, (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define BEGIN_OP1V(_ARG0, _ARG1, _ARG2) \\\n if (UpProbeOp1vEnabled) { \\\n UpBeginOp1v probe = {5, UpProbeOp1v,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1, (uint64_t)_ARG2}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_OP1V(_ARG0, _ARG1, _ARG2) \\\n if (UpProbeOp1vEnabled) { \\\n UpEndOp1v probe = {6, UpProbeOp1v,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1, (uint64_t)_ARG2}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_OP1V(_ARG0, _ARG1, _ARG2) \\\n if (UpProbeOp1vEnabled) { \\\n UpCountOp1v probe = {7, UpProbeOp1v, (uint32_t)_ARG0, (uint32_t)_ARG1, (uint64_t)_ARG2}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define DO_EXC(_ARG0, _ARG1, _ARG2, _ARG3) \\\n if (UpProbeExcEnabled) { \\\n uint32_t len0 = sizeof(char) * strlen(_ARG0); \\\n uint32_t len1 = sizeof(char) * strlen(_ARG1); \\\n UpDoExc probe = {7, UpProbeExc, len0, len1, (uint32_t)_ARG2, (uint32_t)_ARG3}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)_ARG0, len0); \\\n UpProbe((void*)_ARG1, len1); \\\n }\n\n#define BEGIN_INSTRUCTION(_ARG0) \\\n if (UpProbeInstructionEnabled) { \\\n UpBeginInstruction probe = {5, UpProbeInstruction,UpGetProbeTime(), (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_INSTRUCTION(_ARG0) \\\n if (UpProbeInstructionEnabled) { \\\n UpEndInstruction probe = {6, UpProbeInstruction,UpGetProbeTime(), (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_INSTRUCTION(_ARG0) \\\n if (UpProbeInstructionEnabled) { \\\n UpCountInstruction probe = {7, UpProbeInstruction, (uint32_t)_ARG0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define BEGIN_COMPILE(_ARG0) \\\n if (UpProbeCompileEnabled) { \\\n uint32_t len0 = sizeof(char) * strlen(_ARG0); \\\n UpBeginCompile probe = {5, UpProbeCompile,UpGetProbeTime(), len0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)_ARG0, len0); \\\n }\n\n#define END_COMPILE(_ARG0) \\\n if (UpProbeCompileEnabled) { \\\n uint32_t len0 = sizeof(char) * strlen(_ARG0); \\\n UpEndCompile probe = {6, UpProbeCompile,UpGetProbeTime(), len0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)_ARG0, len0); \\\n }\n\n#define COUNT_COMPILE(_ARG0) \\\n if (UpProbeCompileEnabled) { \\\n uint32_t len0 = sizeof(char) * strlen(_ARG0); \\\n UpCountCompile probe = {7, UpProbeCompile, len0}; \\\n UpProbe(&probe, sizeof(probe)); \\\n UpProbe((void*)_ARG0, len0); \\\n }\n\n#define BEGIN_OP1LL(_ARG0, _ARG1, _ARG2) \\\n if (UpProbeOp1llEnabled) { \\\n UpBeginOp1ll probe = {5, UpProbeOp1ll,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1, (uint64_t)_ARG2}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_OP1LL(_ARG0, _ARG1, _ARG2) \\\n if (UpProbeOp1llEnabled) { \\\n UpEndOp1ll probe = {6, UpProbeOp1ll,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1, (uint64_t)_ARG2}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_OP1LL(_ARG0, _ARG1, _ARG2) \\\n if (UpProbeOp1llEnabled) { \\\n UpCountOp1ll probe = {7, UpProbeOp1ll, (uint32_t)_ARG0, (uint32_t)_ARG1, (uint64_t)_ARG2}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define BEGIN_OP(_ARG0, _ARG1) \\\n if (UpProbeOpEnabled) { \\\n UpBeginOp probe = {5, UpProbeOp,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_OP(_ARG0, _ARG1) \\\n if (UpProbeOpEnabled) { \\\n UpEndOp probe = {6, UpProbeOp,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_OP(_ARG0, _ARG1) \\\n if (UpProbeOpEnabled) { \\\n UpCountOp probe = {7, UpProbeOp, (uint32_t)_ARG0, (uint32_t)_ARG1}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define BEGIN_DELETE(_ARG0, _ARG1) \\\n if (UpProbeDeleteEnabled) { \\\n UpBeginDelete probe = {5, UpProbeDelete,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define END_DELETE(_ARG0, _ARG1) \\\n if (UpProbeDeleteEnabled) { \\\n UpEndDelete probe = {6, UpProbeDelete,UpGetProbeTime(), (uint32_t)_ARG0, (uint32_t)_ARG1}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\n#define COUNT_DELETE(_ARG0, _ARG1) \\\n if (UpProbeDeleteEnabled) { \\\n UpCountDelete probe = {7, UpProbeDelete, (uint32_t)_ARG0, (uint32_t)_ARG1}; \\\n UpProbe(&probe, sizeof(probe)); \\\n }\n\nbool* UpGetProbeFlag(const char* name);\nvoid UpInitProbes();\n#else\n\n#define BEGIN_COMPILER_NODE(_ARG0)\n#define END_COMPILER_NODE(_ARG0)\n#define COUNT_COMPILER_NODE(_ARG0)\n#define BEGIN_OP1(_ARG0, _ARG1, _ARG2)\n#define END_OP1(_ARG0, _ARG1, _ARG2)\n#define COUNT_OP1(_ARG0, _ARG1, _ARG2)\n#define BEGIN_OP2(_ARG0, _ARG1, _ARG2, _ARG3)\n#define END_OP2(_ARG0, _ARG1, _ARG2, _ARG3)\n#define COUNT_OP2(_ARG0, _ARG1, _ARG2, _ARG3)\n#define BEGIN_FLOAT(_ARG0)\n#define END_FLOAT(_ARG0)\n#define COUNT_FLOAT(_ARG0)\n#define BEGIN_PARSE(_ARG0)\n#define END_PARSE(_ARG0)\n#define COUNT_PARSE(_ARG0)\n#define BEGIN_SET(_ARG0, _ARG1)\n#define END_SET(_ARG0, _ARG1)\n#define COUNT_SET(_ARG0, _ARG1)\n#define BEGIN_OP1F(_ARG0, _ARG1, _ARG2)\n#define END_OP1F(_ARG0, _ARG1, _ARG2)\n#define COUNT_OP1F(_ARG0, _ARG1, _ARG2)\n#define DO_LOG(_ARG0)\n#define BEGIN_LONG(_ARG0)\n#define END_LONG(_ARG0)\n#define COUNT_LONG(_ARG0)\n#define BEGIN_CALL(_ARG0)\n#define END_CALL(_ARG0)\n#define COUNT_CALL(_ARG0)\n#define BEGIN_COMPILER_FRAME(_ARG0)\n#define END_COMPILER_FRAME(_ARG0)\n#define COUNT_COMPILER_FRAME(_ARG0)\n#define BEGIN_RUN(_ARG0)\n#define END_RUN(_ARG0)\n#define COUNT_RUN(_ARG0)\n#define BEGIN_STRING(_ARG0)\n#define END_STRING(_ARG0)\n#define COUNT_STRING(_ARG0)\n#define BEGIN_GET(_ARG0, _ARG1)\n#define END_GET(_ARG0, _ARG1)\n#define COUNT_GET(_ARG0, _ARG1)\n#define BEGIN_COMPILER_CLASS(_ARG0)\n#define END_COMPILER_CLASS(_ARG0)\n#define COUNT_COMPILER_CLASS(_ARG0)\n#define BEGIN_INTEGER(_ARG0)\n#define END_INTEGER(_ARG0)\n#define COUNT_INTEGER(_ARG0)\n#define BEGIN_OP1V(_ARG0, _ARG1, _ARG2)\n#define END_OP1V(_ARG0, _ARG1, _ARG2)\n#define COUNT_OP1V(_ARG0, _ARG1, _ARG2)\n#define DO_EXC(_ARG0, _ARG1, _ARG2, _ARG3)\n#define BEGIN_INSTRUCTION(_ARG0)\n#define END_INSTRUCTION(_ARG0)\n#define COUNT_INSTRUCTION(_ARG0)\n#define BEGIN_COMPILE(_ARG0)\n#define END_COMPILE(_ARG0)\n#define COUNT_COMPILE(_ARG0)\n#define BEGIN_OP1LL(_ARG0, _ARG1, _ARG2)\n#define END_OP1LL(_ARG0, _ARG1, _ARG2)\n#define COUNT_OP1LL(_ARG0, _ARG1, _ARG2)\n#define BEGIN_OP(_ARG0, _ARG1)\n#define END_OP(_ARG0, _ARG1)\n#define COUNT_OP(_ARG0, _ARG1)\n#define BEGIN_DELETE(_ARG0, _ARG1)\n#define END_DELETE(_ARG0, _ARG1)\n#define COUNT_DELETE(_ARG0, _ARG1)\n\n#endif\n\n#endif // UP_PROBES_H\n" }, { "alpha_fraction": 0.5907056927680969, "alphanum_fraction": 0.5932372212409973, "avg_line_length": 30.198871612548828, "blob_id": "a0a90bf290a658d065b84d1597260b8cd070b1bf", "content_id": "4ceffa32e14abaf8604626a1af8e5f145dea3dbb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 22121, "license_type": "permissive", "max_line_length": 100, "num_lines": 709, "path": "/src/vm/UpSyntax.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpParsing.h\"\n#include \"Up/UpCFunction.h\"\n#include \"Up/UpDebug.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nstatic bool _IsLeftType(UpSyntax* syntax) {\n switch (syntax->type) {\n case UpIdSyntaxType:\n case UpPropertySyntaxType:\n case UpSubtypeSyntaxType:\n case UpCastSyntaxType:\n return true;\n case UpBinarySyntaxType: {\n UpBinarySyntax* binary = (UpBinarySyntax*)syntax;\n switch (binary->op) {\n case UpLookupOp:\n case UpIndexOp:\n case UpSliceOp:\n return true;\n default:\n break;\n }\n }\n default:\n break;\n }\n return false;\n}\n\nstatic bool _IsChannelOp(UpOperator op) {\n return op == UpReadOp\n || op == UpWriteOp\n || op == UpWriteAllOp;\n}\n\n// ************************************************************************************************\n\nsize_t UpGetSyntaxTypeSize(UpSyntaxType type) {\n switch (type) {\n case UpSetSyntaxType:\n return sizeof(UpSetSyntax);\n case UpDeclarationSyntaxType:\n return sizeof(UpDeclarationSyntax);\n case UpGroupSyntaxType:\n return sizeof(UpSyntax1);\n case UpPrintSyntaxType:\n return sizeof(UpSyntax1);\n case UpAssignmentSyntaxType:\n return sizeof(UpAssignmentSyntax);\n case UpBinarySyntaxType:\n return sizeof(UpBinarySyntax);\n case UpUnarySyntaxType:\n return sizeof(UpUnarySyntax);\n case UpImportSyntaxType:\n return sizeof(UpImportSyntax);\n case UpCallSyntaxType:\n return sizeof(UpCallSyntax);\n case UpArgumentSyntaxType:\n return sizeof(UpArgumentSyntax);\n case UpPropertySyntaxType:\n return sizeof(UpPropertySyntax);\n case UpIdSyntaxType:\n return sizeof(UpIdSyntax);\n case UpTypeIdSyntaxType:\n return sizeof(UpIdSyntax);\n case UpRangeSyntaxType:\n return sizeof(UpRangeSyntax);\n case UpDefaultSyntaxType:\n return sizeof(UpSyntax2);\n case UpIntSyntaxType:\n case UpLongSyntaxType:\n case UpFloatSyntaxType:\n return sizeof(UpNumberSyntax);\n case UpStringSyntaxType:\n return sizeof(UpStringSyntax);\n case UpListSyntaxType:\n return sizeof(UpSyntax1);\n case UpMapSyntaxType:\n return sizeof(UpSyntax1);\n case UpChannelSyntaxType:\n return sizeof(UpSyntax1);\n case UpReturnSyntaxType:\n return sizeof(UpSyntax1);\n case UpBreakSyntaxType:\n return sizeof(UpSyntax);\n case UpContinueSyntaxType:\n return sizeof(UpSyntax1);\n case UpThrowSyntaxType:\n return sizeof(UpSyntax1);\n case UpFunctionSyntaxType:\n return sizeof(UpFunctionSyntax);\n case UpIteratorSyntaxType:\n return sizeof(UpIteratorSyntax);\n case UpCFunctionSyntaxType:\n return sizeof(UpCFunctionSyntax);\n case UpCTypeSyntaxType:\n return sizeof(UpCTypeSyntax);\n case UpCArgumentSyntaxType:\n return sizeof(UpCArgumentSyntax);\n case UpIfSyntaxType:\n return sizeof(UpIfSyntax);\n case UpWhileSyntaxType:\n return sizeof(UpWhileSyntax);\n case UpForSyntaxType:\n return sizeof(UpForSyntax);\n case UpTrySyntaxType:\n return sizeof(UpTrySyntax);\n case UpCatchSyntaxType:\n return sizeof(UpCatchSyntax);\n case UpFinallySyntaxType:\n return sizeof(UpTrySyntax);\n case UpCastSyntaxType:\n return sizeof(UpCastSyntax);\n case UpTypeSyntaxType:\n return sizeof(UpTypeSyntax);\n case UpSubtypeSyntaxType:\n return sizeof(UpSyntax2);\n case UpWhereSyntaxType:\n return sizeof(UpSyntax2);\n case UpTransformSyntaxType:\n return sizeof(UpTransformSyntax);\n case UpIsSyntaxType:\n return sizeof(UpSyntax2);\n case UpHasSyntaxType:\n return sizeof(UpSyntax2);\n case UpUndefinedSyntaxType:\n return 0;\n default:\n printf(\"WARNING: You forgot to specify the size of this syntax object.\");\n return 0;\n }\n}\n\n\nconst char* UpGetOperatorOverrideName(UpOperator op) {\n switch (op) {\n case UpLookupOp:\n return \"op:.[]\";\n case UpIndexOp:\n return \"op:[]\";\n case UpSliceOp:\n return \"op:[to]\";\n case UpEqualsOp:\n return \"op:==\";\n case UpGreaterThanOp:\n return \"op:>\";\n case UpGreaterThanEqualsOp:\n return \"op:>=\";\n case UpLessThanOp:\n return \"op:<\";\n case UpLessThanEqualsOp:\n return \"op:<=\";\n case UpIsInOp:\n return \"op:isin\";\n case UpAddOp:\n return \"op:+\";\n case UpSubtractOp:\n return \"op:-\";\n case UpMultiplyOp:\n return \"op:*\";\n case UpDivideOp:\n return \"op:/\";\n case UpModOp:\n return \"op:%\";\n case UpPowOp:\n return \"op:**\";\n case UpConcatOp:\n return \"op:++\";\n case UpAddEqOp:\n return \"op:+=\";\n case UpSubtractEqOp:\n return \"op:-=\";\n case UpMultiplyEqOp:\n return \"op:*=\";\n case UpDivideEqOp:\n return \"op:/=\";\n case UpModEqOp:\n return \"op:%=\";\n case UpPowEqOp:\n return \"op:**=\";\n case UpConcatEqOp:\n return \"op:++=\";\n case UpPositiveOp:\n return \"op:positive\";\n case UpNegativeOp:\n return \"op:negative\";\n case UpNotOp:\n return \"op:not\";\n case UpInOp:\n return \"op:in\";\n default: {\n return NULL;\n }\n }\n}\n\nUpSyntax* UpCreateSyntaxWithType(UpArena* heap, UpSyntaxType type, int line, int col) {\n UpSyntax* node = (UpSyntax*)UpArenaAlloc(heap, UpGetSyntaxTypeSize(type));\n node->type = type;\n node->line = line;\n node->col = col;\n return node;\n}\n\nUpSyntax* UpParse1(UpArena* heap, UpSyntax* node, UpSyntax* a) {\n UpSyntax1* node1 = (UpSyntax1*)node;\n node1->value = a;\n return node;\n}\n\nUpSyntax* UpParse2(UpArena* heap, UpSyntax* node, UpSyntax* a, UpSyntax* b) {\n UpSyntax2* node2 = (UpSyntax2*)node;\n node2->value = a;\n node2->value2 = b;\n return node;\n}\n\nvoid UpSyntaxSetPrepend(UpArena* heap, UpSyntax* list, UpSyntax* node) {\n UpSetSyntax* n = (UpSetSyntax*)list;\n UpSyntaxItem* item = UpArenaNew(heap, UpSyntaxItem);\n item->value = node;\n if (!n->first) {\n n->first = n->last = item;\n } else {\n item->next = n->first;\n n->first = item;\n }\n}\n\nvoid UpSyntaxSetAppend(UpArena* heap, UpSyntax* list, UpSyntax* node) {\n UpSetSyntax* n = (UpSetSyntax*)list;\n UpSyntaxItem* item = UpArenaNew(heap, UpSyntaxItem);\n item->value = node;\n if (!n->first) {\n n->first = item;\n }\n if (n->last) {\n n->last->next = item;\n } else {\n n->last = item;\n }\n n->last = item;\n}\n\nUpSyntax* UpSyntaxSetEnsure(UpArena* heap, UpSyntax* maybeSet) {\n if (maybeSet->type == UpSetSyntaxType) {\n return maybeSet;\n } else {\n UpSyntax* newSet = UpCreateSyntaxWithType(heap, UpSetSyntaxType, maybeSet->line,\n maybeSet->col);\n UpSyntaxSetAppend(heap, newSet, maybeSet);\n return newSet;\n }\n}\n\nUpSyntax* UpParseAssignment(UpArena* heap, UpScanner* scanner, UpSyntax* node, UpOperator op,\n UpSyntax* left, UpSyntax* right) {\n if (op == UpEqOp) {\n if (!_IsLeftType(left)) {\n UpScannerSetErrorAtNode(scanner, \"Illegal assignment\", left);\n return NULL;\n }\n\n UpAssignmentSyntax* n = (UpAssignmentSyntax*)node;\n n->op = op;\n n->left = left;\n n->right = right;\n return node;\n } else {\n UpBinarySyntax* increment =\n (UpBinarySyntax*)UpCreateSyntaxWithType(heap, UpBinarySyntaxType,\n node->line, node->col);\n increment->op = op;\n increment->left = left;\n increment->right = right;\n if (_IsLeftType(left)) {\n UpAssignmentSyntax* n = (UpAssignmentSyntax*)node;\n n->op = UpEqOp;\n n->left = left;\n n->right = (UpSyntax*)increment;\n return node;\n } else {\n return (UpSyntax*)increment;\n }\n }\n}\n\nUpSyntax* UpParseBinary(UpArena* heap, UpScanner* scanner, UpSyntax* node, UpOperator op,\n UpSyntax* left, UpSyntax* right) {\n if (!left && _IsChannelOp(op)) {\n scanner->containsGenerator = true;\n }\n\n if (op == UpIndexOp && right && right->type == UpRangeSyntaxType) {\n op = UpSliceOp;\n }\n UpBinarySyntax* n = (UpBinarySyntax*)node;\n n->op = op;\n n->left = left;\n n->right = right;\n return node;\n}\n\nUpSyntax* UpParseUnary(UpArena* heap, UpScanner* scanner, UpSyntax* node, UpOperator op,\n UpSyntax* operand) {\n if (!operand && _IsChannelOp(op)) {\n scanner->containsGenerator = true;\n }\n\n if (op == UpNegativeOp) {\n if (operand->type == UpIntSyntaxType) {\n ((UpNumberSyntax*)operand)->value.i *= -1;\n return operand;\n } else if (operand->type == UpFloatSyntaxType) {\n ((UpNumberSyntax*)operand)->value.f *= -1.0;\n return operand;\n } else if (operand->type == UpLongSyntaxType) {\n ((UpNumberSyntax*)operand)->value.l *= -1;\n return operand;\n }\n }\n\n UpUnarySyntax* n = (UpUnarySyntax*)node;\n n->op = op;\n n->operand = operand;\n return node;\n}\n\nUpSyntax* UpParseImport(UpArena* heap, UpSyntax* node, UpSyntax* names) {\n UpImportSyntax* n = (UpImportSyntax*)node;\n n->names = names;\n return node;\n}\n\nUpSyntax* UpParseWildcard(UpArena* heap, UpSyntax* node) {\n UpImportSyntax* n = (UpImportSyntax*)node;\n n->wildcard = true;\n return node;\n}\n\nUpSyntax* UpParseCall(UpArena* heap, UpSyntax* node, UpSyntax* callable, UpSyntax* args,\n bool isImperative, UpSyntax* schedule) {\n UpCallSyntax* n = (UpCallSyntax*)node;\n n->callable = callable;\n n->args = args;\n n->schedule = schedule;\n n->isImperative = isImperative;\n return node;\n}\n\nUpSyntax* UpParseProperty(UpArena* heap, UpSyntax* node, UpSyntax* left, const char* right) {\n UpPropertySyntax* n = (UpPropertySyntax*)node;\n n->left = left;\n n->right = right;\n return node;\n}\n\nUpSyntax* UpParseId(UpArena* heap, UpSyntax* node, const char* name) {\n UpIdSyntax* n = (UpIdSyntax*)node;\n n->name = name;\n return node;\n}\n\nUpSyntax* UpParseRange(UpArena* heap, UpSyntax* node, UpSyntax* from, UpSyntax* to, UpSyntax* by,\n bool isThrough) {\n UpRangeSyntax* n = (UpRangeSyntax*)node;\n n->from = from;\n n->to = to;\n n->by = by;\n n->isThrough = isThrough;\n return node;\n}\n\nUpSyntax* UpParseDefault(UpArena* heap, UpSyntax* node, UpSyntax* value, UpSyntax* value2) {\n UpSyntax2* n = (UpSyntax2*)node;\n n->value = value;\n n->value2 = value2;\n return node;\n}\n\nUpSyntax* UpParseBool(UpArena* heap, UpSyntax* node, bool value) {\n UpNumberSyntax* n = (UpNumberSyntax*)node;\n n->value.b = value;\n return node;\n}\n\nUpSyntax* UpParseInt(UpArena* heap, UpSyntax* node, int value, const char* unit) {\n UpNumberSyntax* n = (UpNumberSyntax*)node;\n n->value.i = value;\n if (unit && *unit) {\n UpSyntax* args = UpCreateSyntaxWithType(heap, UpSetSyntaxType,\n node->line, node->col);\n UpSyntaxSetAppend(heap, args, node);\n UpSyntax* id = UpCreateSyntaxWithType(heap, UpIdSyntaxType, node->line, node->col);\n UpParseId(heap, id, UpArenaCopyString(heap, unit));\n UpSyntax* call = UpCreateSyntaxWithType(heap, UpCallSyntaxType, node->line, node->col);\n return UpParseCall(heap, call, id, args, false, NULL);\n } else {\n return node;\n }\n return node;\n}\n\nUpSyntax* UpParseUInt(UpArena* heap, UpSyntax* node, unsigned int value, const char* unit) {\n UpNumberSyntax* n = (UpNumberSyntax*)node;\n n->value.u = value;\n n->unit = UpArenaCopyString(heap, unit);\n return node;\n}\n\nUpSyntax* UpParseLong(UpArena* heap, UpSyntax* node, long long value, const char* unit) {\n UpNumberSyntax* n = (UpNumberSyntax*)node;\n n->value.l = value;\n n->unit = UpArenaCopyString(heap, unit);\n return node;\n}\n\nUpSyntax* UpParseULong(UpArena* heap, UpSyntax* node, unsigned long long value, const char* unit) {\n UpNumberSyntax* n = (UpNumberSyntax*)node;\n n->value.ul = value;\n n->unit = UpArenaCopyString(heap, unit);\n return node;\n}\n\nUpSyntax* UpParseFloat(UpArena* heap, UpSyntax* node, double value, const char* unit) {\n UpNumberSyntax* n = (UpNumberSyntax*)node;\n n->value.f = value;\n n->unit = UpArenaCopyString(heap, unit);\n return node;\n}\n\nUpSyntax* UpParseStr(UpArena* heap, const char* value, const char* specialty,\n int line, int column) {\n UpSyntax* node = UpCreateSyntaxWithType(heap, UpStringSyntaxType, line, column);\n UpStringSyntax* n = (UpStringSyntax*)node;\n n->value = UpArenaCopyString(heap, value);\n n->specialty = specialty ? UpArenaCopyString(heap, specialty) : NULL;\n return node;\n}\n\nUpSyntax* UpParseList(UpArena* heap, UpSyntax* node, UpSyntax* items) {\n UpSyntax1* n = (UpSyntax1*)node;\n n->value = items;\n return node;\n}\n\nUpSyntax* UpParseMap(UpArena* heap, UpSyntax* node, UpSyntax* items) {\n UpSyntax1* n = (UpSyntax1*)node;\n n->value = items;\n return node;\n}\n\nUpSyntax* UpParseChannel(UpArena* heap, UpSyntax* node, UpSyntax* typeSig) {\n UpSyntax1* n = (UpSyntax1*)node;\n n->value = typeSig;\n return node;\n}\n\nUpSyntax* UpParseDeclaration(UpArena* heap, UpSyntax* node, UpAccessMode access, UpSyntax* head,\n UpSyntax* body, UpSyntax* where)\n{\n UpDeclarationSyntax* n = (UpDeclarationSyntax*)node;\n n->access = access;\n n->head = head;\n n->body = body;\n n->where = where;\n return node;\n}\n\nUpSyntax* UpParseFunction(UpArena* heap, UpScanner* scanner, UpSyntax* node, UpSyntax* head,\n UpSyntax* body, bool isImperative) {\n UpFunctionSyntax* fn = (UpFunctionSyntax*)node;\n fn->head = head;\n fn->body = body;\n fn->isExpression = !isImperative;\n\n if (scanner->containsGenerator) {\n fn->isGenerator = true;\n scanner->containsGenerator = false;\n }\n return node;\n}\n\nUpSyntax* UpParseIterator(UpArena* heap, UpScanner* scanner, UpSyntax* node, UpSyntax* left,\n UpSyntax* iterable, UpSyntax* clause, UpSyntax* body, bool isOn,\n bool isWhile, bool isMapper) {\n if (!iterable) {\n scanner->containsGenerator = true;\n }\n\n UpIteratorSyntax* iter = (UpIteratorSyntax*)node;\n iter->left = left;\n iter->iterable = iterable;\n iter->clause = clause;\n iter->body = body;\n iter->isOn = isOn;\n iter->isWhile = isWhile;\n iter->isMapper = isMapper;\n return node;\n}\n\nUpSyntax* UpParseCFunction(UpArena* heap, UpSyntax* node, const char* name, UpSyntax* returns,\n UpSyntax* args) {\n UpCFunctionSyntax* n = (UpCFunctionSyntax*)node;\n n->name = UpArenaCopyString(heap, name);\n n->returns = returns;\n n->args = args;\n return node;\n}\n\nUpSyntax* UpParseCType(UpArena* heap, UpSyntax* node, const char* name) {\n UpCTypeSyntax* n = (UpCTypeSyntax*)node;\n n->name = UpArenaCopyString(heap, name);\n return node;\n}\n\nUpSyntax* UpParseCArgument(UpArena* heap, UpSyntax* node, UpSyntax* type, const char* name) {\n UpCArgumentSyntax* n = (UpCArgumentSyntax*)node;\n n->type = type;\n n->name = UpArenaCopyString(heap, name);\n return node;\n}\n\nUpSyntax* UpAddCTypePointer(UpSyntax* node) {\n UpCTypeSyntax* n = (UpCTypeSyntax*)node;\n ++n->pointerCount;\n return node;\n}\n\nUpSyntax* UpMakeCFunctionsPrivate(UpSyntax* node) {\n if (node->type == UpSetSyntaxType) {\n for (UpSyntaxItem* item = ((UpSetSyntax*)node)->first; item; item = item->next) {\n UpMakeCFunctionsPrivate(item->value);\n }\n } else if (node->type == UpCFunctionSyntaxType) {\n // UpCFunctionSyntax* func = (UpCFunctionSyntax*)node;\n // func->isPrivate = true;\n }\n return node;\n}\n\n\nUpSyntax* UpParseIf(UpArena* heap, UpSyntax* node, UpSyntax* transforms, UpSyntax* elsex) {\n UpIfSyntax* n = (UpIfSyntax*)node;\n n->transforms = transforms;\n n->elsex = elsex;\n return node;\n}\n\nUpSyntax* UpParseTransform(UpArena* heap, UpSyntax* node, UpSyntax* condition, UpSyntax* body) {\n UpTransformSyntax* n = (UpTransformSyntax*)node;\n n->condition = condition;\n n->body = body;\n return node;\n}\n\nUpSyntax* UpAppendTransform(UpSyntax* left, UpSyntax* right) {\n for (UpTransformSyntax* n = (UpTransformSyntax*)left; n; n = (UpTransformSyntax*)n->next) {\n if (!n->next) {\n n->next = right;\n break;\n }\n }\n return left;\n}\n\nUpSyntax* UpParseWhile(UpArena* heap, UpSyntax* node,\n UpSyntax* condition, UpSyntax* body) {\n UpWhileSyntax* n = (UpWhileSyntax*)node;\n n->condition = condition;\n n->body = body;\n return node;\n}\n\nUpSyntax* UpParseFor(UpArena* heap, UpSyntax* node, UpSyntax* left,\n UpSyntax* right, UpSyntax* body) {\n UpForSyntax* n = (UpForSyntax*)node;\n n->left = left;\n n->right = right;\n n->body = body;\n return node;\n}\n\nUpSyntax* UpParseArgument(UpArena* heap, UpSyntax* node, const char* name, UpSyntax* expr,\n bool isVariadic, UpSyntax* next) {\n UpArgumentSyntax* n = (UpArgumentSyntax*)node;\n n->name = name;\n n->expr = expr;\n n->isVariadic = isVariadic;\n n->next = next;\n return node;\n}\n\nUpSyntax* UpParseTry(UpArena* heap, UpSyntax* node,\n UpSyntax* tryBlock, UpSyntax* catchBlocks, UpSyntax* finallyBlock) {\n UpTrySyntax* n = (UpTrySyntax*)node;\n n->tryBlock = tryBlock;\n n->catchBlocks = catchBlocks;\n n->finallyBlock = finallyBlock;\n return node;\n}\n\nUpSyntax* UpParseCatch(UpArena* heap, UpSyntax* node, UpSyntax* binding, UpSyntax* statements) {\n UpCatchSyntax* n = (UpCatchSyntax*)node;\n n->binding = binding;\n n->statements = statements;\n return node;\n}\n\nUpSyntax* UpParseGroup(UpArena* heap, UpSyntax* expr, int line, int col) {\n if (!expr || expr->type == UpSetSyntaxType) {\n UpSyntax1* group = (UpSyntax1*)UpCreateSyntaxWithType(heap, UpGroupSyntaxType, line, col);\n group->value = expr;\n return (UpSyntax*)group;\n } else {\n return expr;\n }\n}\n\nUpSyntax* UpParseWhere(UpArena* heap, UpSyntax* node, UpSyntax* body, UpSyntax* assignments) {\n UpSyntax2* n = (UpSyntax2*)node;\n n->value = body;\n n->value2 = assignments;\n return node;\n}\n\nstatic UpArgumentSyntax* _FindLastArgument(UpArgumentSyntax* arg) {\n for (; arg; arg = (UpArgumentSyntax*)arg->next) {\n if (!arg->next) {\n return arg;\n }\n }\n return NULL;\n}\n\nUpSyntax* UpAppendArgs(UpArena* heap, UpScanner* scanner, UpSyntax* head, UpSyntax* arg) {\n if (head->type == UpIdSyntaxType) {\n UpCallSyntax* call = (UpCallSyntax*)UpCreateSyntaxWithType(heap, UpCallSyntaxType,\n head->line, head->col);\n call->callable = head;\n return UpAppendArgs(heap, scanner, (UpSyntax*)call, arg);\n } else if (head->type == UpCallSyntaxType) {\n UpCallSyntax* call = (UpCallSyntax*)head;\n UpArgumentSyntax* lastArg = _FindLastArgument((UpArgumentSyntax*)call->args);\n if (lastArg) {\n lastArg->next = arg;\n } else {\n call->args = arg;\n }\n return head;\n } else {\n UpScannerSetErrorAtNode(scanner, \"Syntax does not accept arguments\", head);\n return NULL;\n }\n}\n\nUpSyntax* UpAppendArg(UpArena* heap, UpSyntax* arg, UpSyntax* args) {\n UpArgumentSyntax* n = (UpArgumentSyntax*)arg;\n n->next = args;\n return arg;\n}\n\nUpSyntax* UpParseIs(UpArena* heap, UpSyntax* node, UpSyntax* subject, UpSyntax* transforms) {\n UpSyntax2* n = (UpSyntax2*)node;\n n->value = subject;\n n->value2 = transforms;\n return node;\n}\n\nUpSyntax* UpParseHas(UpArena* heap, UpSyntax* node, UpSyntax* subject, UpSyntax* transforms) {\n UpSyntax2* n = (UpSyntax2*)node;\n n->value = subject;\n n->value2 = transforms;\n return node;\n}\n\nUpSyntax* UpParseCast(UpArena* heap, UpSyntax* node, UpSyntax* expr,\n UpSyntax* typeSig) {\n UpCastSyntax* n = (UpCastSyntax*)node;\n n->expr = expr;\n n->typeSig = typeSig;\n return node;\n}\n\nUpSyntax* UpAppendType(UpArena* heap, UpSyntax* left, UpSyntax* right) {\n if (left->type == UpTypeSyntaxType) {\n UpTypeSyntax* n = (UpTypeSyntax*)left;\n UpSyntaxSetAppend(heap, n->params, right);\n return left;\n } else {\n UpTypeSyntax* n = (UpTypeSyntax*)UpCreateSyntaxWithType(heap, UpTypeSyntaxType,\n left->line, left->col);\n n->params = UpCreateSyntaxWithType(heap, UpSetSyntaxType, left->line, left->col);\n UpSyntaxSetAppend(heap, n->params, left);\n return UpAppendType(heap, (UpSyntax*)n, right);\n }\n}\n\nUpSyntax* UpParseSubtype(UpArena* heap, UpSyntax* node, UpSyntax* left,\n UpSyntax* right) {\n UpSyntax2* n = (UpSyntax2*)node;\n n->value = left;\n n->value2 = right;\n return node;\n}\n" }, { "alpha_fraction": 0.4813568890094757, "alphanum_fraction": 0.48920661211013794, "avg_line_length": 26.438461303710938, "blob_id": "39384cdf8926239719a9d43d0c5f063d31fa8e50", "content_id": "91c34947db3c6c388b8cfb1dcef0600ed3297d3b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3567, "license_type": "permissive", "max_line_length": 100, "num_lines": 130, "path": "/make.py", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom make import *\n\n# **************************************************************************************************\n# Configurations\n\nconfigurations = {\n \"iPhoneSimulator\": {\n \"platform\": \"ios\",\n \"device\": \"iPhoneSimulator\",\n \"sdk\": \"iPhoneSimulator4.1\",\n \"arch\": \"i386\"\n },\n \"iPadSimulator\": {\n \"platform\":\"ios\",\n \"device\": \"iPadSimulator\",\n \"sdk\": \"iPhoneSimulator3.2\",\n \"arch\": \"i386\"\n },\n \"iPhone4\": {\n \"platform\": \"ios\",\n \"device\": \"iPhoneOS\",\n \"sdk\": \"iPhoneOS4.1\",\n \"arch\": \"armv6\"\n },\n \"iPad\": {\n \"platform\":\"ios\",\n \"device\": \"iPhoneOS\",\n \"sdk\": \"iPhoneOS3.2\",\n \"arch\": \"armv6\"\n },\n \"Android\": {\n \"platform\":\"android\",\n \"device\": \"android\",\n \"sdk\": \"android-8\",\n # \"arch\": \"armv5te\",\n \"arch\": \"armv7-a\"\n },\n \"MacOSX\": {\n \"platform\": \"mac\",\n \"device\": \"\",\n \"sdk\": \"\",\n \"arch\": \"\"\n }\n}\n\n# **************************************************************************************************\n# Tests and metrics\n\ntests = ('./tests/core', ['uptests'])\nmetrics = ('./metrics', ['benchmarks'])\n\n# **************************************************************************************************\n# External Projects\n\n@project\ndef libffi(self):\n self.external = True\n self.path = \"/usr/local/lib/libffi-3.0.11\"\n self.exports = { \"include\" : \"libffi\" }\n self.includes = [\"/usr/local/lib/libffi-3.0.11/include\"]\n self.libs = [\"/usr/local/lib/libffi.a\"]\n\n@project\ndef pcre(self):\n self.external = True\n self.path = \"/usr/local\"\n self.exports = { \"include\" : \"pcre\" }\n self.includes = [\"/usr/local/include/pcre\"]\n self.libs = [\"/usr/local/lib/libpcre.a\"]\n\n# **************************************************************************************************\n# Projects\n\n@project\ndef upversion(self):\n self.path = \".git/logs/refs/heads\"\n self.build = GitVersion()\n\n# @project\n# def upbenchmarks(self):\n# self.path = \"metrics\"\n# self.build = Probes()\n\n@project\ndef upvm(self):\n self.path = \"src/vm\"\n self.exports = { \"include\" : \"Up\" }\n self.build = Probes() >> FlexParse() >> BisonParse() >> Compile() >> LinkStaticLib()\n\n@project\ndef upregex(self):\n self.path = \"src/regex\"\n self.exports = { \"include\" : \"UpRegex\" }\n self.ignoreLibs = [upvm]\n self.build = Compile() >> LinkDynamicLib()\n\n # Prevents an ld warning caused by libffi related to C++ exceptions (which we don't need)\n self.linkerFlags = \"-Wl,-no_compact_unwind\"\n\n@project\ndef upjson(self):\n self.path = \"src/json\"\n self.exports = { \"include\" : \"UpJSON\" }\n self.ignoreLibs = [upvm]\n self.build = Compile() >> LinkDynamicLib()\n\n # Prevents an ld warning caused by libffi related to C++ exceptions (which we don't need)\n self.linkerFlags = \"-Wl,-no_compact_unwind\"\n\n@project\ndef up(self):\n self.path = \"src/up\"\n self.build = Compile() >> LinkExecutable()\n if self.platform == 'mac':\n self.frameworks = [\"Foundation\", \"AppKit\", \"QuartzCore\", \"WebKit\", \"OpenGL\"]\n\n # Prevents an ld warning caused by libffi related to C++ exceptions (which we don't need)\n self.linkerFlags = \"-Wl,-no_compact_unwind\"\n\n@project\ndef upstdlib(self):\n self.path = \"lib\"\n self.install = [(\".\", \"lib/up\")]\n self.build = StaticFiles()\n\n# **************************************************************************************************\n\nmake(configurations, tests, metrics)\n" }, { "alpha_fraction": 0.6606190204620361, "alphanum_fraction": 0.6606190204620361, "avg_line_length": 33.703704833984375, "blob_id": "fb43701e5fc875db91e510040988b8b7f805e788", "content_id": "e2f85f6c89dcc95d24100ea9f187259b786d93aa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 937, "license_type": "permissive", "max_line_length": 98, "num_lines": 27, "path": "/src/vm/UpCompiler.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPCOMPILER_H\n#define UP_UPCOMPILER_H\n\n#include \"Up/UpGlobal.h\"\n\ntypedef struct UpCompilerFrame UpCompilerFrame;\n\ntypedef struct {\n char* sourcePath;\n char* moduleName;\n UpCompilerFrame* frame;\n UpScope* scope;\n UpBuffer* probeId;\n bool isCompilingUpDotUp;\n} UpCompiler;\n\nUpCompiler* UpCompilerCreate();\n\nUpStatus UpCompilerCompileImperative(UpCompiler* compiler, UpSyntax* ast, const char* sourcePath,\n const char* moduleName, UpFunctionDef** outDef);\nUpStatus UpCompilerCompileDeclarative(UpCompiler* compiler, UpSyntax* ast, const char* sourcePath,\n const char* moduleName, UpFunctionDef** outDef,\n UpClassDef** outClassDef);\nUpStatus UpCompilerCompileEval(UpCompiler* self, UpSyntax* ast, UpFunctionDef* caller,\n UpFunctionDef** outDef);\n\n#endif // UP_UPCOMPILER_H\n" }, { "alpha_fraction": 0.5339847207069397, "alphanum_fraction": 0.5403568148612976, "avg_line_length": 21.84465980529785, "blob_id": "4485df324e05c4a50d1e9bff9a59a03a2e87ecf5", "content_id": "4b04a786dd5997206f802c333106ba683dd7b954", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2354, "license_type": "permissive", "max_line_length": 99, "num_lines": 103, "path": "/src/vm/UpArena.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpLibrary.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nUpArena* UpArenaCreate() {\n UpArena* self = malloc(sizeof(UpArena));\n memset(self, 0, sizeof(UpArena));\n return self;\n}\n\nvoid UpArenaFree(UpArena* self) {\n free(self);\n}\n\nvoid* UpArenaAllocate(UpArena* self, size_t size, const char* typeName) {\n void* o = malloc(size);\n if (!o) {\n UpTerminate(\"Out of memory\");\n }\n memset(o, 0, size);\n return o;\n}\n\nvoid* UpArenaAllocateCopy(UpArena* self, size_t size, void* source, size_t sourceSize) {\n void* o = malloc(size);\n if (!o) {\n UpTerminate(\"Out of memory\");\n }\n if (source) {\n memcpy(o, source, sourceSize);\n if (size > sourceSize) {\n memset((char*)o+sourceSize, 0, size - sourceSize);\n }\n } else {\n memset(o, 0, size);\n }\n return o;\n}\n\nvoid* UpArenaReallocate(UpArena* self, size_t size, void* source, size_t sourceSize) {\n void* o = realloc(source, size);\n if (!o) {\n UpTerminate(\"Out of memory\");\n }\n if (source) {\n if (size > sourceSize) {\n memset((char*)o+sourceSize, 0, size - sourceSize);\n }\n } else {\n memset(o, 0, size);\n }\n return o;\n}\n\nvoid* UpArenaAllocateBuffer(UpArena* self, size_t size) {\n void* o = malloc(size);\n if (!o) {\n UpTerminate(\"Out of memory\");\n } else {\n // XXXjoe Is this even necessary?\n memset(o, 0, size);\n }\n return o;\n}\n\nchar* UpArenaCopyString(UpArena* self, const char* s) {\n if (s) {\n char* s2 = UpArenaNewString(self, strlen(s));\n strcpy(s2, s);\n return s2;\n } else {\n return NULL;\n }\n}\n\nchar* UpArenaCopyStringN(UpArena* self, const char* s, size_t length) {\n if (s) {\n char* s2 = UpArenaNewString(self, length);\n strncpy(s2, s, length);\n return s2;\n } else {\n return NULL;\n }\n}\n\nchar* UpArenaFormatString(UpArena* self, const char* str, ...) {\n va_list args;\n\n va_start(args, str);\n int count = vsnprintf(NULL, 0, str, args);\n va_end(args);\n\n char* buf = UpArenaNewString(self, count);\n\n va_start(args, str);\n vsnprintf(buf, count+1, str, args);\n va_end(args);\n\n return buf;\n}\n" }, { "alpha_fraction": 0.5286519527435303, "alphanum_fraction": 0.5313338041305542, "avg_line_length": 31.391843795776367, "blob_id": "0068495aff5764b018564b560de5763932729806", "content_id": "68f3fb2d759a078c2c9331417a4d1d417509a167", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 18271, "license_type": "permissive", "max_line_length": 103, "num_lines": 564, "path": "/src/vm/UpScanner.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpScanner.h\"\n#include \"Up/UpContext.h\"\n#include \"UpParsing.h\"\n#include \"UpParser.h\"\n#include \"UpArray.h\"\n#include \"UpBuffer.h\"\n#include \"Up/UpArena.h\"\n#include \"Up.tab.h\"\n#include \"Up.yy.h\"\n\n// ************************************************************************************************\n\nextern int UpScan(yystype* yylval, yyltype* yylloc, UpScanner* scanner, void* yyscanner);\n\ntypedef struct {\n int token;\n yystype lval;\n yyltype loc;\n} TokenData;\n\ntypedef enum {\n ExpectIndent,\n ExpectNewLine\n} ScanState;\n\nstatic int kIndentSpaces = 4;\n\n// ************************************************************************************************\n\nstatic bool _IsNewLineToken(int token) {\n return token == NEWLINE || token == CLOSE_BLOCK || token == 0;\n}\n\nstatic bool _IsWhitespace(char c) {\n return c == '\\n' || c == '\\r' || c == ' ' || c == '\\t';\n}\n\nstatic bool _IsNewLine(char c) {\n return c == '\\n' || c == '\\r';\n}\n\nstatic void _PushToken(UpScanner* self, int token, yystype* yylval, yyltype* yylloc) {\n TokenData data;\n data.token = token;\n if (yylval) {\n data.lval = *yylval;\n } else {\n yystype lval = {-1};\n data.lval = lval;\n }\n if (yylloc) {\n data.loc = *yylloc;\n } else {\n yyltype loc = {0,0,0,0};\n data.loc = loc;\n }\n UpArrayAppend(self->tokenQueue, &data);\n}\n\nstatic int _PopToken(UpScanner* self, yystype* yylval, yyltype* yylloc) {\n TokenData tokenData;\n UpArrayShift(self->tokenQueue, &tokenData);\n\n *yylval = tokenData.lval;\n *yylloc = tokenData.loc;\n\n return self->lastToken = tokenData.token;\n}\n\nstatic UpStatus _FlushIndentation(UpScanner* self, yyltype* yylloc) {\n // If the previous token was a new line, then we have to deal with indentation changes\n // and delimiting of lines\n if (_IsNewLineToken(self->lastToken)) {\n int indentDiff = self->lastIndent - self->indentLevel;\n if (indentDiff == 0) {\n // Same indentation as before - just delimit lines unless this is the first token\n if (!self->firstToken) {\n _PushToken(self, NEWLINE, NULL, yylloc);\n } else {\n self->firstToken = false;\n }\n } else if (indentDiff == 1) {\n // Increased indentation - open a new block\n self->indentLevel = self->lastIndent;\n\n _PushToken(self, OPEN_BLOCK, NULL, yylloc);\n } else if (indentDiff < 0) {\n // Decreased indentation - close open blocks until new level\n self->indentLevel = self->lastIndent;\n\n _PushToken(self, NEWLINE, NULL, yylloc);\n for (int i = indentDiff; i < 0; ++i) {\n _PushToken(self, CLOSE_BLOCK, NULL, yylloc);\n _PushToken(self, NEWLINE, NULL, yylloc);\n }\n } else {\n UpScannerSetErrorAt(self, \"Invalid indentation\", yylloc->first_line, yylloc->first_column);\n return UpFailure;\n }\n }\n return UpSuccess;\n}\n\nstatic void _EnqueueToken(UpScanner* self, int token, yystype* yylval, yyltype* yylloc) {\n _FlushIndentation(self, yylloc);\n _PushToken(self, token, yylval, yylloc);\n}\n\nstatic bool _IsAllWhitespace(char* text) {\n for (char* c = text; *c; ++c) {\n if (!_IsWhitespace(*c)) {\n return false;\n }\n }\n return true;\n}\n\nstatic int _ReadIndent(UpScanner* self) {\n int indent = 0;\n while (1) {\n char s[1];\n if (!UpScannerRead(self, s, 1)) {\n break;\n }\n\n if (*s == ' ') {\n ++indent;\n } else {\n --self->cursor;\n break;\n }\n }\n return indent;\n}\n\nstatic char* _ReadIndentedLines(UpScanner* self, char quote, int* outIndent) {\n // Move location to next line since this is always called when at the end of a line\n ++self->line;\n self->column = 1;\n\n UpBuffer* buffer = UpBufferCreate(UpGetHeap());\n\n ScanState state = ExpectIndent;\n int indent = 0;\n int expectedIndent = (self->indentLevel+1) * kIndentSpaces;\n bool trimTrailingWhitespace = true;\n char s[1];\n\n while (1) {\n if (!UpScannerRead(self, s, 1)) {\n break;\n }\n\n if (state == ExpectIndent) {\n if (*s == quote) {\n trimTrailingWhitespace = false;\n break;\n } else if (indent == expectedIndent) {\n state = ExpectNewLine;\n } else if (*s == ' ') {\n ++indent;\n } else if (*s == '\\n') {\n state = ExpectNewLine;\n } else if (indent < expectedIndent) {\n --self->cursor;\n *outIndent = indent;\n break;\n } else {\n state = ExpectNewLine;\n }\n }\n if (state == ExpectNewLine) {\n if (*s == '\\n') {\n state = ExpectIndent;\n indent = 0;\n }\n UpBufferWriteChar(buffer, s[0]);\n }\n }\n\n char* result = UpBufferString(buffer);\n result[UpBufferCount(buffer)] = 0;\n\n int trailingIndex = strlen(result)-1;\n if (trimTrailingWhitespace) {\n while (trailingIndex >= 0 && _IsWhitespace(result[trailingIndex])) {\n result[trailingIndex--] = 0;\n }\n } else {\n if (_IsNewLine(result[trailingIndex])) {\n result[trailingIndex] = 0;\n }\n }\n\n return result;\n}\n\nstatic void _ParseDelimiter(UpScanner* self, char* text, yyltype* yylloc) {\n char* begin = text;\n for (char* c = begin; ; ++c) {\n if (*c == '.' || *c == 0) {\n char* value = UpArenaCopyStringN(UpGetHeap(), begin, c-begin);\n\n yystype lval;\n lval.stringValue = value;\n _PushToken(self, IDENTIFIER, &lval, yylloc);\n\n if (*c == '.') {\n _PushToken(self, DOT, NULL, yylloc);\n begin = c+1;\n if (*(c+1) == '`') {\n _PushToken(self, POUND, NULL, yylloc);\n ++c;\n begin = c+1;\n }\n } else {\n break;\n }\n }\n }\n}\n\nstatic UpSyntax* _ParseCFunction(UpScanner* self, const char* value, const char* library,\n yyltype* yylloc) {\n UpSyntax* node;\n if (!UpParseC(value, &node)) {\n UpScannerSetException(self, UpGetError());\n return NULL;\n }\n\n if (node->type == UpSetSyntaxType) {\n for (UpSyntaxItem* item = ((UpSetSyntax*)node)->first; item; item = item->next) {\n UpCFunctionSyntax* cfunc = (UpCFunctionSyntax*)item->value;\n cfunc->library = library;\n }\n } else if (node->type == UpCFunctionSyntaxType) {\n UpCFunctionSyntax* cfunc = (UpCFunctionSyntax*)node;\n cfunc->library = library;\n }\n\n node->line = yylloc->first_line;\n node->col = yylloc->first_column;\n\n yystype yylval;\n yylval.objectValue = node;\n _PushToken(self, CFUNCTION, &yylval, yylloc);\n _PushToken(self, NEWLINE, NULL, yylloc);\n\n return node;\n}\n\nstatic void _ParseRawString(UpScanner* self, char* text, yyltype* yylloc) {\n yystype yylval;\n yylval.objectValue = UpParseStr(UpGetHeap(), text, NULL,\n yylloc->first_line, yylloc->first_column);\n _PushToken(self, STRING, &yylval, yylloc);\n}\n\nstatic UpStatus _ParseFormattedString(UpScanner* self, char* text, yyltype* yylloc) {\n if (!text || !*text) {\n _ParseRawString(self, text, yylloc);\n }\n\n UpScanner* scanner = UpScannerCreate();\n scanner->addTrailingLine = false;\n UpScannerSetInput(scanner, text);\n\n yyscan_t yyscanner;\n Uplex_init_extra(scanner, &yyscanner);\n scanner->yyscanner = yyscanner;\n\n UpPushState(TextParseState, yyscanner);\n\n UpStatus status = UpSuccess;\n bool textMode = true;\n bool delimit = false;\n\n yystype yylval;\n for (int token = 1; token;) {\n token = UpScannerNext(scanner, &yylval, yylloc);\n if (textMode) {\n if (token == STRING) {\n if (delimit) {\n _PushToken(self, CONCATSTR, NULL, yylloc);\n } else {\n delimit = true;\n }\n _PushToken(self, token, &yylval, yylloc);\n } else if (token == INLINE_EXPR) {\n if (delimit) {\n _PushToken(self, CONCATSTR, NULL, yylloc);\n } else {\n delimit = true;\n }\n\n _ParseDelimiter(self, yylval.stringValue, yylloc);\n } else {\n // XXXjoe Do we really want to just ignore these tokens?\n }\n } else {\n if (token == FSTRING) {\n _PushToken(self, LP, NULL, yylloc);\n if (!_ParseFormattedString(self, yylval.stringValue, yylloc)) {\n status = UpFailure;\n break;\n }\n _PushToken(self, RP, NULL, yylloc);\n } else {\n _PushToken(self, token, &yylval, yylloc);\n }\n }\n }\n\n UpPopState(yyscanner);\n Uplex_destroy(yyscanner);\n return status;\n}\n\nstatic UpStatus _ParseString(UpScanner* self, char* text, char* specialty, bool formatted,\n yyltype* yylloc) {\n if (!specialty) {\n if (formatted) {\n _ParseFormattedString(self, text, yylloc);\n } else {\n _ParseRawString(self, text, yylloc);\n }\n return UpSuccess;\n } else if (!strcmp(specialty, \"C\")) {\n return _ParseCFunction(self, text, NULL, yylloc);\n } else if (strstr(specialty, \"C@\") == specialty) {\n return _ParseCFunction(self, text, specialty+2, yylloc);\n } else {\n yystype yylval;\n yylval.stringValue = specialty;\n _EnqueueToken(self, IDENTIFIER, &yylval, yylloc);\n _PushToken(self, LP, NULL, yylloc);\n _ParseString(self, text, NULL, formatted, yylloc);\n _PushToken(self, RP, NULL, yylloc);\n\n return UpSuccess;\n }\n}\n\n// ************************************************************************************************\n\nUpScanner* UpScannerCreate() {\n\tUpScanner* self = UpArenaNew(UpGetHeap(), UpScanner);\n self->tokenQueue = UpArrayCreate(UpGetHeap(), sizeof(TokenData));\n\tself->firstToken = true;\n self->line = 1;\n self->addTrailingLine = true;\n\treturn self;\n}\n\nvoid UpScannerSetInput(UpScanner* self, char* input) {\n\tself->cursor = input;\n\tself->cursorMax = input + strlen(input);\n self->cursor = input;\n}\n\nint UpScannerGetLineNumber(UpScanner* self) {\n\treturn self->line;\n}\n\nint UpScannerGetColumnNumber(UpScanner* self) {\n\treturn self->column;\n}\n\nvoid UpScannerLocate(UpScanner* self, const char* text, yyltype* yylloc) {\n yylloc->first_line = self->line;\n yylloc->first_column = self->column;\n const char* before = text;\n for (char* t = strstr(text, \"\\n\"); t; t = strstr(t, \"\\n\")) {\n ++self->line;\n self->column = 1;\n before = ++t;\n }\n self->column += strlen(before);\n}\n\nint UpScannerRead(UpScanner* self, char* buf, long int max_size) {\n max_size = 1;\n\n int bytesRemaining = self->cursorMax - self->cursor;\n if (bytesRemaining == 0) {\n if (self->addTrailingLine) {\n // Return a new line at the end of file to ensure that\n // Flex patterns expecting a new line will match at EOF\n memcpy(buf, \"\\n\", 1);\n ++self->cursor;\n return 1;\n } else {\n return 0;\n }\n } else if (bytesRemaining == -1) {\n return 0;\n } else {\n if (bytesRemaining < max_size) {\n memcpy(buf, self->cursor, bytesRemaining);\n // printf(\"read(%d) %d\\n\", bytesRemaining, *self->cursor); fflush(stdout);\n self->cursor = self->cursorMax;\n return bytesRemaining;\n } else {\n memcpy(buf, self->cursor, max_size);\n // printf(\"read(%d) %d\\n\", max_size, *self->cursor); fflush(stdout);\n self->cursor += max_size;\n return max_size;\n }\n }\n}\n\nint UpScannerNext(UpScanner* self, yystype* yylval, yyltype* yylloc) {\n if (self->error) {\n return ERROR_TOKEN;\n }\n\n // First, return tokens that have been queued up\n if (UpArrayCount(self->tokenQueue)) {\n int token = _PopToken(self, yylval, yylloc);\n if (token == NEWLINE) {\n if (yylval->indentLevel >= 0) {\n self->lastIndent = yylval->indentLevel;\n }\n self->lastToken = token;\n }\n return token;\n }\n\n // Read tokens from the stream until we find one that is not whitespace\n int token;\n do {\n token = UpScan(yylval, yylloc, self, self->yyscanner);\n if (token == ADD || token == SUBTRACT) {\n if (self->lastToken == NEWLINE || self->lastToken == OPEN_BLOCK || !self->lastToken) {\n token = token == ADD ? PUBLIC : PRIVATE;\n _EnqueueToken(self, token, yylval, yylloc);\n } else {\n _EnqueueToken(self, token, yylval, yylloc);\n }\n } else if (token == STRING) {\n UpStringSyntax* string = (UpStringSyntax*)(yylval->objectValue);\n if (string->specialty) {\n if (!_ParseString(self, string->value, string->specialty, false, yylloc)) {\n return ERROR_TOKEN;\n }\n } else {\n _EnqueueToken(self, token, yylval, yylloc);\n }\n } else if (token == FSTRING) {\n if (!_FlushIndentation(self, yylloc)) return ERROR_TOKEN;\n\n UpStringSyntax* string = (UpStringSyntax*)yylval->objectValue;\n if (!_ParseString(self, string->value, string->specialty, true, yylloc)) {\n return ERROR_TOKEN;\n }\n } else if (token == STRING_LINE || token == FSTRING_LINE) {\n if (!_FlushIndentation(self, yylloc)) return ERROR_TOKEN;\n\n UpStringSyntax* string = (UpStringSyntax*)(yylval->objectValue);\n\n if (string->value) {\n if (token == STRING_LINE) {\n if (string->specialty) {\n if (!_ParseString(self, string->value, string->specialty, false, yylloc)) {\n return ERROR_TOKEN;\n }\n } else {\n _PushToken(self, STRING, yylval, yylloc);\n }\n } else {\n if (!_ParseString(self, string->value, string->specialty, true, yylloc)) {\n return ERROR_TOKEN;\n }\n }\n\n int indent = _ReadIndent(self);\n yystype yylval;\n yylval.indentLevel = indent / kIndentSpaces;\n _PushToken(self, NEWLINE, &yylval, yylloc);\n } else {\n int indent = -1;\n if (token == STRING_LINE) {\n char* text = _ReadIndentedLines(self, '\\'', &indent);\n if (!UpUnescapeString(self, text)) {\n return UpFailure;\n }\n\n if (!_ParseString(self, text, string->specialty, false, yylloc)) {\n return ERROR_TOKEN;\n }\n } else {\n char* text = _ReadIndentedLines(self, '\"', &indent);\n if (!_ParseString(self, text, string->specialty, true, yylloc)) {\n return ERROR_TOKEN;\n }\n }\n\n if (indent >= 0) {\n yystype yylval;\n yylval.indentLevel = indent / kIndentSpaces;\n _PushToken(self, NEWLINE, &yylval, yylloc);\n }\n }\n } else if (token == NEWLINE) {\n if (yylval->indentLevel >= 0) {\n self->lastIndent = yylval->indentLevel;\n }\n self->lastToken = token;\n } else if (token == OPERATOR || token == OPERATORQ) {\n if (!_FlushIndentation(self, yylloc)) return ERROR_TOKEN;\n _PushToken(self, token == OPERATOR ? OPEN_OPERATOR : OPEN_OPERATORQ, NULL, yylloc);\n _ParseDelimiter(self, yylval->stringValue, yylloc);\n _PushToken(self, CLOSE_OPERATOR, NULL, yylloc);\n } else if (token) {\n _EnqueueToken(self, token, yylval, yylloc);\n } else {\n self->lastIndent = 0;\n self->lastToken = 0;\n _EnqueueToken(self, token, NULL, yylloc);\n }\n } while (token == NEWLINE);\n\n int finalToken = _PopToken(self, yylval, yylloc);\n return finalToken;\n}\n\nvoid UpScannerSetError(UpScanner* self, const char* description) {\n UpScannerSetErrorAt(self, description, self->line, self->column);\n}\n\nvoid UpScannerSetErrorAt(UpScanner* self, const char* description, int line, int column) {\n // If we haven't seen a token yet, that's just an empty file, which is not an error\n if (self->lastToken || strcmp(description, \"syntax error, unexpected $end\")) {\n // Usually we prohibit double exceptions, but in this case, Flex will\n // always return an error after we have already indicated an error from\n // within our own parsing code, so we ignore the Flex error\n if (!self->error) {\n self->error = UpSetError(description);\n UpSetCompileLocation(line, column);\n }\n }\n}\n\nvoid UpScannerSetErrorAtNode(UpScanner* self, const char* description, UpSyntax* node) {\n UpScannerSetErrorAt(self, description, node->line, node->col);\n}\n\nvoid UpScannerSetException(UpScanner* self, UpException* exc) {\n // Usually we prohibit double exceptions, but in this case, Flex will\n // always return an error after we have already indicated an error from\n // within our own parsing code, so we ignore the Flex error\n if (!self->error) {\n self->error = exc;\n }\n UpSetCompileLocation(self->line, self->column);\n}\n\nvoid UpScannerPushToken(UpScanner* self, int token) {\n TokenData data;\n data.token = token;\n UpArrayAppend(self->tokenQueue, &data);\n}\n" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.5555555820465088, "avg_line_length": 26.275861740112305, "blob_id": "db5fc35de43b240c5721a3e91c78fffe0bc3cb41", "content_id": "cb14c1d7c5fb8af407d3c09f3d9b880234f13a3a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 792, "license_type": "permissive", "max_line_length": 100, "num_lines": 29, "path": "/src/vm/UpEvents.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPEVENTS_H\n#define UP_UPEVENTS_H\n\n#include \"Up/UpGlobal.h\"\n\n// *************************************************************************************************\n\ntypedef enum {\n UpEventNone,\n UpEventResumeTask,\n UpEventResumeTaskImmediate,\n UpEventStartTask,\n UpEventStartTaskImmediate,\n} UpEventType;\n\nstruct UpEvent {\n UpEventType eventType;\n UpThreadId threadId;\n UpTask* task;\n UpFunction* function;\n};\n\n// *************************************************************************************************\n\nUpEvent* UpCreateNullEvent(UpArena* heap);\nUpEvent* UpCreateResumeEvent(UpArena* heap, UpTask* task, char* cursor, bool isImmediate);\nUpEvent* UpCreateStartEvent(UpArena* heap, UpFunction* function, bool isImmediate);\n\n#endif // UP_UPEVENTS_H\n" }, { "alpha_fraction": 0.7303754091262817, "alphanum_fraction": 0.7303754091262817, "avg_line_length": 16.176469802856445, "blob_id": "067141aeeaf14777763a15646772ff843c00b502", "content_id": "a52fcb221307c718aedb2006a37985a3b59e4592", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 293, "license_type": "permissive", "max_line_length": 45, "num_lines": 17, "path": "/src/vm/include/UpCPointer.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPCPOINTER_H\n#define UP_UPCPOINTER_H\n\n#include \"Up/UpObject.h\"\n\nstruct UpCPointer {\n UpObject __base;\n void* ptr;\n};\n\nUpCPointer* UpCPointerCreate(void* ptr);\n\nchar* UpCPointerGetAddress(UpCPointer* self);\n\nbool UpCPointerIsNull(UpCPointer* self);\n\n#endif // UP_UPCPOINTER_H\n" }, { "alpha_fraction": 0.5970149040222168, "alphanum_fraction": 0.5982797741889954, "avg_line_length": 28.714284896850586, "blob_id": "256ea0c748dc3227b0c0a1a4de6d5993fe547286", "content_id": "e03f1ebcb3abb491c99dab54d122c63ea27c50bd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3953, "license_type": "permissive", "max_line_length": 100, "num_lines": 133, "path": "/src/vm/UpException.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"Up/UpException.h\"\n#include \"Up/UpObject.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpFunction.h\"\n#include \"UpBuffer.h\"\n#include \"UpArray.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nstatic int _GetFrameLine(UpCallFrame* frame) {\n int line = -1;\n\n UpBuffer* ops = frame->functionDef->ops;\n int opOffset = frame->cursor - ops->buf;\n UpArray* lines = frame->functionDef->lines;\n int lineCount = UpArrayCount(lines);\n\n for (int i = 0; i < lineCount; ++i) {\n UpLineItem item;\n UpArrayGet(lines, i, &item);\n if (item.opOffset >= opOffset) {\n break;\n }\n line = item.lineOffset;\n }\n return line;\n}\n\nstatic char* _MakeRelativePath(char* path) {\n if (!path) {\n return NULL;\n }\n\n UpArena* heap = UpGetHeap();\n UpArray* searchPaths = UpGetSearchPaths();\n int len = UpArrayCount(searchPaths);\n for (int i = 0; i < len; ++i) {\n const char* searchPath;\n UpArrayGet(searchPaths, i, &searchPath);\n\n if (strstr(path, searchPath)) {\n return UpArenaFormatString(heap, \".%s\", path + strlen(searchPath));\n }\n }\n return path;\n}\n\n// ************************************************************************************************\n\nUpException* UpExceptionCreate(const char* description) {\n UpClass* cls = UpGetBuiltinClasses()->exceptionClass;\n UpException* self = (UpException*)(cls\n ? UpClassInstantiate(cls)\n : UpObjectCreateWithClass(UP_BUILTIN(exception)));\n\n self->description = UpArenaCopyString(UpGetHeap(), description);\n return self;\n}\n\nvoid UpExceptionInit(UpException* self, const char* description) {\n self->description = UpArenaCopyString(UpGetHeap(), description);\n}\n\nchar* UpExceptionGetDescription(UpException* self) {\n return self->description;\n}\n\nUpCompileFrame* UpExceptionGetCompileFrame(UpException* self) {\n return self->compileFrame;\n}\n\nvoid UpExceptionSetCompileFrame(UpException* self, UpCompileFrame* frame) {\n self->compileFrame = frame;\n}\n\nUpCallFrame* UpExceptionGetCallFrame(UpException* self) {\n return self->callFrame;\n}\n\nvoid UpExceptionSetCallFrame(UpException* self, UpCallFrame* frame) {\n self->callFrame = frame;\n}\n\nvoid UpExceptionLog(UpException* self) {\n if (self->compileFrame) {\n UpCompileFrame* frame = self->compileFrame;\n DO_EXC(self->description ? self->description : \"\", frame->file, frame->line, 0);\n } else if (self->callFrame) {\n UpCallFrame* frame = self->callFrame;\n int line = _GetFrameLine(frame);\n DO_EXC(self->description ? self->description : \"\", frame->functionDef->sourcePath, line, 0);\n }\n}\n\nchar* UpExceptionGetStackTrace(UpException* self) {\n UpBuffer* buf = UpBufferCreate(UpGetHeap());\n\n if (self->description) {\n UpBufferPrint(buf, \"Exception: \");\n UpBufferPrint(buf, \"%s\", self->description);\n UpBufferPrint(buf, \"\\n\");\n } else {\n UpBufferPrint(buf, \"Exception\\n\");\n }\n\n if (self->compileFrame) {\n UpCompileFrame* frame = self->compileFrame;\n while (frame) {\n // Use paths relative to cwd so that our call stacks are less verbose\n char* path = _MakeRelativePath(frame->file);\n UpBufferPrint(buf, \"%s, line %d\\n\", path, frame->line);\n frame = frame->previous;\n }\n }\n\n if (self->callFrame) {\n UpCallFrame* frame = self->callFrame;\n while (frame) {\n char* path = _MakeRelativePath(frame->functionDef->sourcePath);\n\n int line = _GetFrameLine(frame);\n const char* name = frame->functionDef->name;\n UpBufferPrint(buf, \"%s, line %d, in %s\\n\", path, line, name);\n frame = frame->previous;\n }\n }\n\n return UpBufferString(buf);\n}\n" }, { "alpha_fraction": 0.5782327651977539, "alphanum_fraction": 0.5810344815254211, "avg_line_length": 25.965116500854492, "blob_id": "c4dabaa81d1822bd8f173b8907c328ecdc3611f7", "content_id": "93420e4bc70ba9c224f0fcbb7974de18468916dd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4640, "license_type": "permissive", "max_line_length": 100, "num_lines": 172, "path": "/src/vm/UpBuffer.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": " \n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpBuffer.h\"\n#include \"Up/UpArena.h\"\n\n// ************************************************************************************************\n\nstatic const int kAllocationSize = 1024*1;\n\n// ************************************************************************************************\n\nbool _MakeRoom(UpBuffer* self, size_t count) {\n int startingOffset = self->cursor - self->buf;\n \n if (self->cursor+count >= self->buf+self->length) {\n int oldLength = self->length;\n self->length += count < kAllocationSize ? kAllocationSize : count; \n self->buf = (char*)UpArenaResizeArray(self->heap, self->buf, oldLength, char, self->length);\n self->cursor = self->buf+startingOffset;\n return true;\n } else {\n return false;\n }\n}\n\nstatic int _Remaining(UpBuffer* self) {\n return self->length - ((self->cursor - self->buf)+1);\n}\n\nvoid _UpBufferPrintIndent(UpBuffer* self) {\n for (int i = 0; i < self->indentLevel; ++i) {\n UpBufferWriteChar(self, ' ');\n }\n}\n\n// ************************************************************************************************\n\nUpBuffer* UpBufferCreate(UpArena* heap) {\n return UpBufferCreateWithSize(heap, kAllocationSize);\n}\n\nUpBuffer* UpBufferCreateWithSize(UpArena* heap, size_t size) {\n UpBuffer* self = UpArenaNew(heap, UpBuffer);\n self->heap = heap;\n self->length = size;\n self->buf = UpArenaNewArrayBuffer(heap, char, size);\n self->cursor = self->buf;\n return self;\n}\n\nint UpBufferCount(UpBuffer* self) {\n return self->cursor - self->buf;\n}\n\nchar* UpBufferString(UpBuffer* self) {\n return self->buf;\n}\n\nint UpBufferCursor(UpBuffer* self) {\n return self->cursor - self->buf;\n}\n\nvoid UpBufferSeek(UpBuffer* self, int cursor) {\n self->cursor = self->buf + cursor;\n}\n\nvoid UpBufferSeekChar(UpBuffer* self, char c) {\n char* newOffset = strchr(self->buf, c);\n self->cursor = newOffset ? newOffset : self->buf;\n}\n\nvoid UpBufferRSeekChar(UpBuffer* self, char c) {\n char* newOffset = strrchr(self->buf, c);\n self->cursor = newOffset ? newOffset : self->buf;\n}\n\nvoid UpBufferWrite(UpBuffer* self, void* value, size_t size) {\n _MakeRoom(self, size);\n\n memcpy(self->cursor, value, size);\n self->cursor += size;\n}\n\nvoid UpBufferWriteChar(UpBuffer* self, char value) {\n UpBufferWrite(self, &value, sizeof(value));\n}\n\nvoid UpBufferWriteInt(UpBuffer* self, int value) {\n UpBufferWrite(self, &value, sizeof(value));\n}\n\nvoid UpBufferWriteLongLong(UpBuffer* self, long long value) {\n UpBufferWrite(self, &value, sizeof(value));\n}\n\nvoid UpBufferWriteDouble(UpBuffer* self, double value) {\n UpBufferWrite(self, &value, sizeof(value));\n}\n\nvoid UpBufferWritePointer(UpBuffer* self, void* value) {\n UpBufferWrite(self, &value, sizeof(void*));\n}\n\nvoid UpBufferPrint(UpBuffer* self, const char* str, ...) {\n va_list args;\n va_start(args, str);\n int count = vsnprintf(self->cursor, _Remaining(self), str, args);\n va_end(args);\n\n if (_MakeRoom(self, count+1)) {\n va_start(args, str);\n count = vsprintf(self->cursor, str, args);\n va_end(args);\n }\n self->cursor += count;\n}\n\nvoid UpBufferOpen(UpBuffer* self, const char* str, ...) {\n UpBufferWriteChar(self, '\\n');\n _UpBufferPrintIndent(self);\n\n va_list args;\n va_start(args, str);\n int count = vsnprintf(self->cursor, _Remaining(self), str, args);\n va_end(args);\n\n if (_MakeRoom(self, count+1)) {\n va_start(args, str);\n count = vsprintf(self->cursor, str, args);\n va_end(args);\n }\n self->cursor += count;\n\n self->indentLevel += 4;\n}\n\nvoid UpBufferClose(UpBuffer* self, const char* str, ...) {\n self->indentLevel -= 4;\n if (str) {\n UpBufferWriteChar(self, '\\n');\n _UpBufferPrintIndent(self);\n\n va_list args;\n va_start(args, str);\n int count = vsnprintf(self->cursor, _Remaining(self), str, args);\n va_end(args);\n\n if (_MakeRoom(self, count+1)) {\n va_start(args, str);\n count = vsprintf(self->cursor, str, args);\n va_end(args);\n }\n self->cursor += count;\n } \n}\n\nvoid UpBufferLine(UpBuffer* self, const char* str, ...) {\n UpBufferWriteChar(self, '\\n');\n _UpBufferPrintIndent(self);\n\n va_list args;\n va_start(args, str);\n int count = vsnprintf(self->cursor, _Remaining(self), str, args);\n va_end(args);\n\n if (_MakeRoom(self, count+1)) {\n va_start(args, str);\n count = vsprintf(self->cursor, str, args);\n va_end(args);\n }\n self->cursor += count;\n}\n" }, { "alpha_fraction": 0.46095848083496094, "alphanum_fraction": 0.4614804685115814, "avg_line_length": 38.667179107666016, "blob_id": "eba93bbe158672318cb504878188995e1877efb5", "content_id": "23fbdb5e9e205db36e4cf17d3e33fee741dfd5ad", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 51727, "license_type": "permissive", "max_line_length": 105, "num_lines": 1304, "path": "/src/vm/UpTask.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpTask.h\"\n#include \"Up/UpDebug.h\"\n#include \"Up/UpClass.h\"\n#include \"Up/UpFunction.h\"\n#include \"Up/UpCFunction.h\"\n#include \"Up/UpBool.h\"\n#include \"Up/UpInteger.h\"\n#include \"Up/UpLong.h\"\n#include \"Up/UpFloat.h\"\n#include \"Up/UpString.h\"\n#include \"Up/UpList.h\"\n#include \"Up/UpMap.h\"\n#include \"Up/UpChannel.h\"\n#include \"Up/UpException.h\"\n#include \"Up/UpContext.h\"\n#include \"Up/UpProperty.h\"\n#include \"Up/UpArena.h\"\n#include \"UpScheduler.h\"\n#include \"UpEvents.h\"\n#include \"UpBuffer.h\"\n#include \"UpArray.h\"\n#include \"UpStrTable.h\"\n\n// ************************************************************************************************\n\ntypedef struct UpCatcher UpCatcher;\n\nstruct UpCatcher {\n int catchOpOffset;\n int finallyOpOffset;\n int opOffset;\n UpCatcher* previous;\n};\n\nstruct UpStorageFrame {\n UpList* stack;\n UpVariables* locals;\n UpCatcher* catcher;\n UpChannel* channel;\n UpException* exception;\n UpStorageFrame* previous;\n};\n\n// ************************************************************************************************\n\n#define RETHROWIT \\\n stackDepth = _Catch(self, cursor, call, storage, &call, &storage); \\\n goto ExitFunctionLoop;\n\n#define THROWIT \\\n UpSetErrorFrame(call); \\\n RETHROWIT;\n\n#define SAFECALL(_THING) \\\n if (!(_THING)) { THROWIT }\n\n#define NYI \\\n UpSetError(\"NYI\"); \\\n THROWIT;\n\n// ************************************************************************************************\n\n#ifdef UP_ENABLE_PROBES\nstatic UpSymbol _GetPropertyId(UpObject* left) {\n UpClassDef* cls = UpObjectGetClassDef(left);\n return cls ? cls->probeId : 0;\n}\n#endif\n\nstatic void _StackPushOnFrame(UpTask* self, UpObject* object, UpStorageFrame* storage) {\n if (object) {\n UpListAppend(storage->stack, object);\n } else {\n UpListAppend(storage->stack, UpUndefined());\n }\n}\n\nstatic UpObject* _StackPopFromFrame(UpTask* self, UpStorageFrame* storage) {\n return UpListPop(storage->stack);\n}\n\nstatic void _StackPush(UpTask* self, UpObject* object) {\n if (object) {\n UpListAppend(self->storageFrame->stack, object);\n } else {\n UpListAppend(self->storageFrame->stack, UpUndefined());\n }\n}\n\nstatic UpObject* _StackPop(UpTask* self) {\n return UpListPop(self->storageFrame->stack);\n}\n\nstatic UpObject* _StackPeek(UpTask* self) {\n return UpListGetBack(self->storageFrame->stack);\n}\n\nstatic void _CreateFrame(UpTask* self, UpFunctionDef* functionDef, UpVariables* closure,\n UpObject* this, char* cursor, UpFunctionDef* caller, UpCallFrame** outCall,\n UpStorageFrame** outStorage) {\n UpCallFrame* call = UpArenaNew(UpGetHeap(), UpCallFrame);\n call->functionDef = functionDef;\n call->cursor = functionDef->ops->buf;\n\n UpStorageFrame* storage = UpArenaNew(UpGetHeap(), UpStorageFrame);\n storage->stack = UpListCreate();\n\n int localsCount = UpArrayCount(functionDef->scope->locals);\n if (caller) {\n // This happens only in eval() - here we reallocate the closure if\n // eval compilation happened to create more variables\n if (UpArrayCount(functionDef->scope->locals) != closure->count) {\n closure->variables = UpArenaResizeArray(UpGetHeap(),\n closure->variables, closure->count,\n UpObject, localsCount);\n closure->count = localsCount;\n }\n\n // We re-use the caller's closure instead of creating a new one, since\n // eval is not a function call, but affects the scope of the calling function\n storage->locals = closure;\n } else {\n UpObject** variables = UpArenaNewArray(UpGetHeap(), UpObject, localsCount);\n variables[functionDef->thisIndex] = this;\n\n UpVariables* locals = UpArenaNew(UpGetHeap(), UpVariables);\n locals->variables = variables;\n locals->count = localsCount;\n locals->previous = closure;\n storage->locals = locals;\n }\n\n *outStorage = storage;\n *outCall = call;\n}\n\nstatic int _PushFrame(UpTask* self, UpFunctionDef* functionDef, UpVariables* closure,\n UpObject* this, char* cursor, UpFunctionDef* caller, UpCallFrame** outCall,\n UpStorageFrame** outStorage) {\n // printf(\"%s\\n\", UpBytecodeToString(functionDef)); fflush(stdout);\n BEGIN_CALL(functionDef->probeId);\n if (self->callFrame && cursor) {\n self->callFrame->cursor = cursor;\n }\n\n UpCallFrame* call = NULL;\n UpStorageFrame* storage = NULL;\n _CreateFrame(self, functionDef, closure, this, cursor, caller, &call, &storage);\n\n call->previous = self->callFrame;\n self->callFrame = call;\n\n storage->previous = self->storageFrame;\n self->storageFrame = storage;\n\n *outStorage = storage;\n *outCall = call;\n return ++self->stackDepth;\n}\n\nstatic int _ResumeFrame(UpTask* self, UpCallFrame* call, UpStorageFrame* storage,\n int stackDepth, char* cursor, UpCallFrame** outCall,\n UpStorageFrame** outStorage) {\n if (self->callFrame && cursor) {\n self->callFrame->cursor = cursor;\n }\n\n UpCallFrame* parentCall = call;\n UpStorageFrame* parentStorage = storage;\n while (parentCall) {\n if (!parentCall->previous) {\n parentCall->previous = self->callFrame;\n parentStorage->previous = self->storageFrame;\n break;\n } else {\n parentCall = call->previous;\n parentStorage = storage->previous;\n }\n }\n\n self->callFrame = call;\n self->storageFrame = storage;\n\n BEGIN_CALL(call->functionDef->probeId);\n\n *outStorage = storage;\n *outCall = call;\n return self->stackDepth += stackDepth;\n}\n\nstatic int _PopFrame(UpTask* self, UpCallFrame** outCall, UpStorageFrame** outStorage) {\n END_CALL(self->callFrame->functionDef->probeId);\n *outStorage = self->storageFrame = self->storageFrame->previous;\n *outCall = self->callFrame = self->callFrame->previous;\n return --self->stackDepth;\n}\n\nstatic int _Catch(UpTask* self, char* cursor,\n UpCallFrame* call, UpStorageFrame* storage,\n UpCallFrame** outCall, UpStorageFrame** outStorage) {\n call->cursor = cursor;\n\n while (call) {\n if (storage->catcher) {\n if (storage->catcher->catchOpOffset >= 0) {\n call->cursor = call->functionDef->ops->buf + storage->catcher->catchOpOffset;\n break;\n } else {\n call->cursor = call->functionDef->ops->buf + storage->catcher->finallyOpOffset;\n storage->catcher = NULL;\n storage->exception = UpClaimError();\n break;\n }\n }\n _PopFrame(self, &call, &storage);\n }\n\n *outStorage = storage;\n *outCall = call;\n return self->stackDepth;\n}\n\nstatic void _PushArgs(UpTask* self, int expectedArgCount, int argCount,\n UpStorageFrame* callingFrame) {\n // if (argCount > expectedArgCount) {\n // WARN(\"Too many arguments\");\n // }\n\n for (int i = argCount; i < expectedArgCount; ++i) {\n _StackPush(self, UpUndefined());\n }\n\n for (int i = 0; i < argCount; ++i) {\n UpObject* arg = _StackPopFromFrame(self, callingFrame);\n _StackPush(self, arg);\n }\n}\n\nstatic void _PushArgsAcrossTask(UpTask* self, UpTask* otherTask, int expectedArgCount,\n int argCount, UpStorageFrame* callingFrame) {\n for (int i = argCount; i < expectedArgCount; ++i) {\n UpTaskPushArgument(otherTask, UpUndefined());\n }\n\n for (int i = 0; i < argCount; ++i) {\n UpObject* arg = _StackPopFromFrame(self, callingFrame);\n UpTaskPushArgument(otherTask, arg);\n }\n}\n\nstatic UpObject** _FindLocals(UpStorageFrame* storage, int frameIndex) {\n UpObject** variables;\n UpVariables* locals = storage->locals;\n for (int i = 0; i <= frameIndex; ++i) {\n variables = locals->variables;\n locals = locals->previous;\n }\n return variables;\n}\n\nstatic char* _ReadInt(char* buffer, int* value) {\n memcpy(value, buffer, sizeof(int));\n return buffer += sizeof(int);\n}\n\nstatic char* _ReadLongLong(char* buffer, long long* value) {\n memcpy(value, buffer, sizeof(long long));\n return buffer += sizeof(long long);\n}\n\nstatic char* _ReadDouble(char* buffer, double* value) {\n memcpy(value, buffer, sizeof(double));\n return buffer += sizeof(double);\n}\n\nstatic char* _ReadPointer(char* buffer, void** value) {\n memcpy(value, buffer, sizeof(void*));\n return buffer += sizeof(void*);\n}\n\nstatic UpObject* _CreateException(const char* str) {\n return (UpObject*)UpStringCreate(str);\n}\n\n// ************************************************************************************************\n\nUpTask* UpTaskCreate(UpArena* heap) {\n UpTask* self = UpArenaNew(heap, UpTask);\n return self;\n}\n\nUpTask* UpTaskCreateWithFunctionDef(UpArena* heap, UpFunctionDef* functionDef, UpVariables* closure,\n UpObject* boundThis) {\n UpTask* self = UpTaskCreate(heap);\n\n UpStorageFrame* storage;\n UpCallFrame* call;\n _PushFrame(self, functionDef, closure, boundThis, NULL, NULL, &call, &storage);\n\n return self;\n}\n\nUpTask* UpTaskCreateWithFunction(UpArena* heap, UpFunction* function) {\n UpTask* self = UpTaskCreate(heap);\n\n UpStorageFrame* storage;\n UpCallFrame* call;\n _PushFrame(self, function->def, UpFunctionGetClosure(function), function->boundThis, NULL,\n NULL, &call, &storage);\n\n return self;\n}\n\nvoid UpTaskExpectReturn(UpTask* self) {\n if (!self->returnChannel) {\n self->returnChannel = (UpChannel*)UpChannelCreate(UpGetHeap());\n }\n}\n\nbool UpTaskIsGenerator(UpTask* self) {\n return !!self->generatorChannel;\n}\n\nvoid UpTaskMakeGenerator(UpTask* self, UpChannel* channel) {\n if (channel) {\n self->generatorChannel = channel;\n channel->generator = self;\n } else if (self->generatorChannel) {\n self->generatorChannel->generator = NULL;\n self->generatorChannel = NULL;\n }\n}\n\nbool UpTaskIsOnCurrentThread(UpTask* self) {\n return true;\n}\n\nvoid UpTaskPushArgument(UpTask* self, UpObject* argument) {\n _StackPush(self, argument);\n}\n\nUpStatus UpTaskGetReturnValue(UpTask* self, UpObject** out) {\n if (!self->returnChannel || !UpChannelHasMessage(self->returnChannel)) {\n UpSetError(\"Deadlock.\");\n return UpFailure;\n } else {\n *out = UpChannelPopMessage(self->returnChannel);\n return UpSuccess;\n }\n}\n\nUpStatus UpTaskResume(UpTask* self, UpScheduler* scheduler) {\n UpArena* heap = UpGetHeap();\n\n UpStorageFrame* storage = self->storageFrame;\n UpCallFrame* call = self->callFrame;\n\n int recursionLimit = UpGetRecursionLimit();\n int stackDepth = self->stackDepth;\n UpObject* _undefined = UpUndefined();\n UpObject* _true = UpTrue();\n UpObject* _false = UpFalse();\n UpObject* _eval = UpEval();\n\n UpSymbol getName;\n int argCount;\n\n while (call) {\n // printf(\"Calling %s\\n\", call->functionDef->name); fflush(stdout);\n\n if (stackDepth > recursionLimit) {\n UpSetError(\"Stack overflow\");\n UpSetErrorFrame(call->previous);\n break;\n }\n\n UpBuffer* ops = call->functionDef->ops;\n char* lastOp = ops->buf + UpBufferCount(ops);\n char* cursor = call->cursor;\n\n while (cursor < lastOp) {\n int opcode;\n cursor = _ReadInt(cursor, &opcode);\n\n COUNT_INSTRUCTION(opcode);\n\n switch (opcode) {\n case UpInstructionPop: {\n _StackPop(self);\n break;\n }\n case UpInstructionJump: {\n int offset;\n _ReadInt(cursor, &offset);\n\n cursor = ops->buf + offset;\n break;\n }\n case UpInstructionJumpIf: {\n int offset;\n cursor = _ReadInt(cursor, &offset);\n\n UpObject* condition = _StackPeek(self);\n if (UpAsTruth(condition)) {\n cursor = ops->buf + offset;\n }\n break;\n }\n case UpInstructionJumpIfNot: {\n int offset;\n cursor = _ReadInt(cursor, &offset);\n\n UpObject* condition = _StackPeek(self);\n if (!UpAsTruth(condition)) {\n cursor = ops->buf + offset;\n }\n break;\n }\n case UpInstructionJumpIfDefined: {\n int offset;\n cursor = _ReadInt(cursor, &offset);\n\n UpObject* condition = _StackPeek(self);\n if (condition != _undefined) {\n cursor = ops->buf + offset;\n } else {\n _StackPop(self);\n }\n break;\n }\n case UpInstructionJumpIfNotDefined: {\n int offset;\n cursor = _ReadInt(cursor, &offset);\n\n UpObject* condition = _StackPeek(self);\n if (condition == _undefined) {\n cursor = ops->buf + offset;\n }\n break;\n }\n case UpInstructionJumpIfHas: {\n int offset;\n cursor = _ReadInt(cursor, &offset);\n UpSymbol symbol;\n cursor = _ReadInt(cursor, &offset);\n\n UpObject* condition = _StackPeek(self);\n if (UpObjectHas(condition, symbol)) {\n cursor = ops->buf + offset;\n }\n break;\n }\n case UpInstructionJumpIfHasNot: {\n int offset;\n cursor = _ReadInt(cursor, &offset);\n UpSymbol symbol;\n cursor = _ReadInt(cursor, &symbol);\n\n UpObject* condition = _StackPeek(self);\n if (!UpObjectHas(condition, symbol)) {\n cursor = ops->buf + offset;\n }\n break;\n }\n case UpInstructionLoadObject: {\n UpObject* value;\n cursor = _ReadPointer(cursor, (void**)&value);\n\n _StackPush(self, value);\n break;\n }\n case UpInstructionLoadInteger: {\n int value;\n cursor = _ReadInt(cursor, &value);\n\n UpInteger* n = UpIntegerCreateWithUnit(value, UpNullSymbol);\n _StackPush(self, (UpObject*)n);\n\n cursor -= sizeof(int) + sizeof(int);\n int newOp = UpInstructionLoadObject;\n memcpy(cursor, &newOp, sizeof(int));\n cursor += sizeof(int);\n memcpy(cursor, &n, sizeof(void*));\n cursor += sizeof(void*);\n if (sizeof(int) > sizeof(void*)) {\n cursor += sizeof(int) - sizeof(void*);\n }\n break;\n }\n case UpInstructionLoadLong: {\n long long value;\n cursor = _ReadLongLong(cursor, &value);\n\n UpLong* n = UpLongCreateWithUnit(value, UpNullSymbol);\n _StackPush(self, (UpObject*)n);\n\n cursor -= sizeof(int) + sizeof(long long);\n int newOp = UpInstructionLoadObject;\n memcpy(cursor, &newOp, sizeof(int));\n cursor += sizeof(int);\n memcpy(cursor, &n, sizeof(void*));\n cursor += sizeof(void*);\n if (sizeof(long long) > sizeof(void*)) {\n cursor += sizeof(long long) - sizeof(void*);\n }\n break;\n }\n case UpInstructionLoadFloat: {\n double value;\n cursor = _ReadDouble(cursor, &value);\n\n UpFloat* n = UpFloatCreateWithUnit(value, UpNullSymbol);\n _StackPush(self, (UpObject*)n);\n\n cursor -= sizeof(int) + sizeof(double);\n int newOp = UpInstructionLoadObject;\n memcpy(cursor, &newOp, sizeof(int));\n cursor += sizeof(int);\n memcpy(cursor, &n, sizeof(void*));\n cursor += sizeof(void*);\n if (sizeof(double) > sizeof(void*)) {\n cursor += sizeof(double) - sizeof(void*);\n }\n break;\n }\n case UpInstructionGetLocal: {\n int frameIndex, localIndex;\n cursor = _ReadInt(cursor, &frameIndex);\n cursor = _ReadInt(cursor, &localIndex);\n\n UpObject** frameLocals = _FindLocals(storage, frameIndex);\n UpObject* value = frameLocals[localIndex];\n _StackPush(self, value);\n break;\n }\n case UpInstructionSetLocal: {\n int frameIndex, localIndex;\n cursor = _ReadInt(cursor, &frameIndex);\n cursor = _ReadInt(cursor, &localIndex);\n\n UpObject* right = _StackPeek(self);\n UpObject** frameLocals = _FindLocals(storage, frameIndex);\n frameLocals[localIndex] = right;\n break;\n }\n case UpInstructionDeleteLocal: {\n int frameIndex, localIndex;\n cursor = _ReadInt(cursor, &frameIndex);\n cursor = _ReadInt(cursor, &localIndex);\n\n UpObject** frameLocals = _FindLocals(storage, frameIndex);\n frameLocals[localIndex] = _undefined;\n _StackPush(self, _undefined);\n break;\n }\n case UpInstructionSetArgument: {\n int localIndex;\n cursor = _ReadInt(cursor, &localIndex);\n\n UpObject* right = _StackPop(self);\n storage->locals->variables[localIndex] = right;\n break;\n }\n case UpInstructionGetProperty: {\n cursor = _ReadInt(cursor, &getName);\n\n _StackPush(self, UpUndefined());\n\n // printf(\"get %s\\n\", UpGetSymbolName(getName)); fflush(stdout);\n\n GetProperty: {\n UpObject* defaultValue = _StackPop(self);\n UpObject* left = _StackPop(self);\n\n UpProperty* property;\n UpGetterDef* getterDef;\n UpClass* cls;\n SAFECALL(UpObjectGetGetter(left, getName, true, &property, &getterDef, &cls));\n\n COUNT_GET(getName, _GetPropertyId(left));\n\n if (property && property->isValidating) {\n UpSetError(\"Circular property reference\");\n THROWIT;\n } else if (property && property->isValid) {\n _StackPush(self, property->value);\n } else if (cls && getterDef) {\n if (property) {\n property->isValidating = getterDef->isCached;\n }\n\n stackDepth = _PushFrame(self, getterDef->funcDef, cls->closure,\n left, cursor, NULL, &call, &storage);\n goto ExitFunctionLoop;\n } else {\n _StackPush(self, defaultValue);\n }\n break;\n }\n }\n case UpInstructionLookupProperty: {\n UpObject* left = _StackPop(self);\n UpObject* defaultValue = _StackPop(self);\n\n UpObject* nameObject = _StackPop(self);\n if (!UpIsTypeOf(nameObject, UP_BUILTIN(string))) {\n UpSetError(\"Illegal property name\");\n THROWIT;\n }\n\n getName = UpGetSymbol(((UpString*)nameObject)->value);\n _StackPush(self, left);\n _StackPush(self, defaultValue);\n goto GetProperty;\n }\n case UpInstructionSetProperty:\n case UpInstructionValidateProperty: {\n UpSymbol setName;\n cursor = _ReadInt(cursor, &setName);\n\n UpObject* left = _StackPop(self);\n\n COUNT_SET(setName, _GetPropertyId(left));\n\n bool isSet = opcode == UpInstructionSetProperty;\n\n UpProperty* property;\n UpClass* cls;\n UpFunctionDef* setterDef;\n SAFECALL(UpObjectGetSetter(left, setName, isSet, &property, &cls, &setterDef));\n if (property) {\n property->isValid = true;\n property->isValidating = false;\n property->isOverriden = isSet;\n }\n\n if (!setterDef) {\n property->value = _StackPeek(self);\n } else {\n UpStorageFrame* callingFrame = storage;\n stackDepth = _PushFrame(self, setterDef, cls->closure, left,\n cursor, NULL, &call, &storage);\n _PushArgs(self, 1, 1, callingFrame);\n goto ExitFunctionLoop;\n }\n break;\n }\n case UpInstructionDeleteProperty: {\n UpSymbol deleteName;\n cursor = _ReadInt(cursor, &deleteName);\n\n UpObject* left = _StackPop(self);\n\n COUNT_DELETE(deleteName, _GetPropertyId(left));\n\n UpObjectUnset(left, deleteName);\n _StackPush(self, _undefined);\n break;\n }\n case UpInstructionCallOperator:\n case UpInstructionCallOperator2:\n case UpInstructionCallProperty: {\n cursor = _ReadInt(cursor, &argCount);\n UpSymbol name;\n cursor = _ReadInt(cursor, &name);\n\n // printf(\"call %s\\n\", UpGetSymbolName(name)); fflush(stdout);\n\n UpObject* left;\n if (opcode == UpInstructionCallOperator) {\n UpObject* right = _StackPop(self);\n left = _StackPop(self);\n _StackPush(self, right);\n } else if (opcode == UpInstructionCallOperator2) {\n UpObject* right = _StackPop(self);\n UpObject* right2 = _StackPop(self);\n left = _StackPop(self);\n _StackPush(self, right2);\n _StackPush(self, right);\n } else {\n left = _StackPop(self);\n }\n\n UpProperty* property;\n UpGetterDef* getterDef;\n UpClass* cls;\n SAFECALL(UpObjectGetGetter(left, name, false, &property, &getterDef, &cls));\n\n if (property && property->isValidating) {\n UpSetError(\"Circular property reference\");\n THROWIT;\n }\n\n if (property && property->isValid) {\n _StackPush(self, property->value);\n goto CallFunction;\n } else if (cls && getterDef) {\n if (property) {\n // property->isValidating = true;\n }\n UpStorageFrame* callingFrame = storage;\n stackDepth = _PushFrame(self, getterDef->funcDef, cls->closure,\n left, cursor, NULL, &call, &storage);\n _PushArgs(self, getterDef->funcDef->argsCount, argCount, callingFrame);\n goto ExitFunctionLoop;\n } else {\n UpSetError(\"Method '%s' not found\", UpGetSymbolName(name));\n THROWIT;\n }\n }\n case UpInstructionIs: {\n UpObject* right = _StackPop(self);\n UpObject* left = _StackPop(self);\n bool result;\n SAFECALL(UpIs(left, right, &result));\n _StackPush(self, result ? _true : _false);\n break;\n }\n case UpInstructionIsDefined: {\n UpObject* operand = _StackPop(self);\n if (operand == _undefined){\n _StackPush(self, _false);\n } else {\n _StackPush(self, _true);\n }\n break;\n }\n case UpInstructionImport: {\n UpSymbol fullName;\n cursor = _ReadInt(cursor, &fullName);\n\n UpObject* module = NULL;\n UpFunctionDef* def = NULL;\n UpClassDef* classDef = NULL;\n if (!UpImport(fullName, &module, &def, &classDef)) {\n THROWIT;\n } else {\n if (module) {\n _StackPush(self, (UpObject*)module);\n } else {\n stackDepth = _PushFrame(self, def, NULL, NULL, cursor,\n NULL, &call, &storage);\n goto ExitFunctionLoop;\n }\n }\n break;\n }\n case UpInstructionSetImport: {\n UpSymbol fullName;\n cursor = _ReadInt(cursor, &fullName);\n\n UpObject* module = _StackPeek(self);\n UpSetImport(fullName, module);\n break;\n }\n case UpInstructionCall: {\n cursor = _ReadInt(cursor, &argCount);\n\n CallFunction: {\n UpObject* callable = _StackPop(self);\n\n if (callable == _eval) {\n UpObject* evalFunc = NULL;\n UpObject* evalSource = NULL;\n if (argCount >= 2) {\n evalFunc = _StackPop(self);\n }\n if (argCount >= 1) {\n evalSource = _StackPop(self);\n }\n\n if (!evalSource || !UpIsTypeOf(evalSource, UP_BUILTIN(string))) {\n UpSetError(\"Can't eval object\");\n THROWIT;\n }\n\n if (evalFunc && !UpIsTypeOf(evalFunc, UP_BUILTIN(function))) {\n UpSetError(\"Illegal object passed as eval function\");\n THROWIT;\n }\n\n const char* source = ((UpString*)evalSource)->value;\n if (!*source) {\n UpSetError(\"Can't eval object\");\n THROWIT;\n }\n\n UpFunctionDef* functionDef;\n UpVariables* closure;\n if (evalFunc) {\n UpFunction* func = (UpFunction*)evalFunc;\n functionDef = func->def;\n closure = UpFunctionGetClosure(func);\n } else {\n functionDef = call->functionDef;\n closure = storage->locals;\n }\n\n UpFunctionDef* def;\n if (!UpCompileEval(source, functionDef, &def)) {\n THROWIT;\n }\n\n stackDepth = _PushFrame(self, def, closure, NULL, cursor,\n functionDef, &call, &storage);\n goto ExitFunctionLoop;\n } else if (UpIsClass(callable)) {\n UpClass* cls = (UpClass*)callable;\n UpObject* obj = UpClassInstantiate(cls);\n\n UpStorageFrame* callingFrame = storage;\n UpFunctionDef* def = cls->def->constructorDef;\n if (def) {\n stackDepth = _PushFrame(self, def, cls->closure,\n obj, cursor, NULL, &call, &storage);\n _PushArgs(self, def->argsCount, argCount, callingFrame);\n goto ExitFunctionLoop;\n } else {\n _StackPush(self, obj);\n }\n } else if (UpIsTypeOf(callable, UP_BUILTIN(function))) {\n UpStorageFrame* callingFrame = storage;\n UpFunction* fn = (UpFunction*)callable;\n UpVariables* closure = UpFunctionGetClosure(fn);\n stackDepth = _PushFrame(self, fn->def, closure,\n fn->boundThis, cursor, NULL, &call, &storage);\n _PushArgs(self, fn->def->argsCount, argCount, callingFrame);\n goto ExitFunctionLoop;\n } else if (UpIsTypeOf(callable, UP_BUILTIN(cfunction))) {\n UpCFunction* fn = (UpCFunction*)callable;\n\n int expectedArgCount = UpCFunctionDefGetArgumentCount(fn->def);\n UpObject* stackArgs[16];\n UpObject** args;\n\n if (expectedArgCount > 16) {\n // XXXjoe Allocate buffer for excessive arguments\n args = NULL;\n } else {\n args = stackArgs;\n }\n\n // Discard extra arguments\n while (argCount > expectedArgCount) {\n _StackPop(self);\n --argCount;\n }\n // Fill in expected arguments\n for (int i = (expectedArgCount-argCount); i < expectedArgCount; ++i) {\n args[(expectedArgCount-1)-i] = _StackPop(self);\n }\n // Fill in undefined for missing arguments\n for (int i = argCount; i < expectedArgCount; ++i) {\n args[i] = _undefined;\n }\n\n UpObject* result;\n SAFECALL(UpCFunctionCall(fn, args, &result));\n _StackPush(self, result);\n break;\n } else {\n UpSetError(\"Object is not callable\");\n THROWIT;\n }\n break;\n }\n }\n case UpInstructionReturn: {\n UpObject* retval = _StackPop(self);\n if (storage->previous) {\n _StackPushOnFrame(self, retval, storage->previous);\n } else if (self->returnChannel) {\n UpChannelAddMessage(self->returnChannel, retval);\n } else {\n // Discard return\n }\n cursor = lastOp;\n break;\n }\n case UpInstructionThrow: {\n UpObject* thrown = _StackPop(self);\n if (UpIsTypeOf(thrown, UP_BUILTIN(exception))) {\n UpSetException((UpException*)thrown);\n } else if (UpIsTypeOf(thrown, UP_BUILTIN(string))) {\n UpString* thrownString = (UpString*)thrown;\n UpSetError(\"%s\", thrownString->value);\n } else {\n UpSetError(\"throw called with illegal object\");\n }\n THROWIT;\n break;\n }\n case UpInstructionNewObject: {\n UpClassDef* def;\n cursor = _ReadPointer(cursor, (void*)&def);\n\n UpClass* baseClass = UP_BUILTIN(object);\n UpClass* cls = UpClassCreate(def, storage->locals, baseClass, 0);\n UpObject* object = UpClassInstantiate(cls);\n _StackPush(self, object);\n break;\n }\n case UpInstructionNewList: {\n int itemCount;\n cursor = _ReadInt(cursor, &itemCount);\n\n UpList* list = UpListCreate();\n for (int i = 0; i < itemCount; ++i) {\n UpObject* item = _StackPop(self);\n // UpLog(\"%s\", UpObjectToString(item));\n UpListInsert(list, 0, item);\n }\n\n _StackPush(self, (UpObject*)list);\n break;\n }\n case UpInstructionNewMap: {\n // int itemCount;\n // cursor = _ReadInt(cursor, &itemCount);\n\n UpMap* map = UpMapCreate();\n // for (int i = 0; i < itemCount; ++i) {\n // UpObject* value = _StackPop(self);\n // UpObject* key = _StackPop(self);\n // UpMapSet(map, key, value);\n // }\n\n _StackPush(self, (UpObject*)map);\n break;\n }\n case UpInstructionNewClass: {\n UpClassDef* def;\n cursor = _ReadPointer(cursor, (void**)&def);\n\n UpClass* baseClass = NULL;\n UpObject* base = _StackPop(self);\n if (UpIsClass(base)) {\n baseClass = (UpClass*)base;\n } else if (base != _undefined) {\n UpSetError(\"Illegal inheritance\");\n THROWIT;\n }\n\n UpClass* cls = UpClassCreate(def, storage->locals, baseClass, 0);\n _StackPush(self, (UpObject*)cls);\n break;\n }\n case UpInstructionBindClass: {\n UpObject* value;\n cursor = _ReadPointer(cursor, (void**)&value);\n\n UpClass* cls = (UpClass*)value;\n // Pop the base class, which is only here for compatibility\n // with the NewClass instruction\n UpObject* baseClass = _StackPop(self);\n if (UpIsClass(baseClass)) {\n cls->baseClass = (UpClass*)baseClass;\n } else if (baseClass != _undefined) {\n UpSetError(\"Illegal base class\");\n THROWIT;\n }\n\n cls->closure = storage->locals;\n _StackPush(self, value);\n break;\n }\n case UpInstructionNewGenerator: {\n UpFunctionDef* def;\n cursor = _ReadPointer(cursor, (void*)&def);\n int isImmediate;\n cursor = _ReadInt(cursor, &isImmediate);\n\n UpChannel* channel = (UpChannel*)UpChannelCreate(heap);\n UpTask* task = UpTaskCreateWithFunctionDef(heap, def, storage->locals, NULL);\n UpTaskPushArgument(task, (UpObject*)channel);\n if (isImmediate) {\n UpSchedulerResumeTask(scheduler, task);\n } else {\n UpTaskMakeGenerator(task, channel);\n }\n _StackPush(self, (UpObject*)channel);\n break;\n }\n case UpInstructionNewFunction: {\n UpFunctionDef* def;\n cursor = _ReadPointer(cursor, (void*)&def);\n\n UpFunction* fn = UpFunctionCreate(def, storage->locals, NULL);\n\n _StackPush(self, (UpObject*)fn);\n break;\n }\n case UpInstructionNewCFunction: {\n UpCFunctionDef* def;\n cursor = _ReadPointer(cursor, (void*)&def);\n\n UpCFunction* fn = UpCFunctionCreate(def);\n _StackPush(self, (UpObject*)fn);\n break;\n }\n case UpInstructionSchedule: {\n cursor = _ReadInt(cursor, &argCount);\n\n UpObject* callable = _StackPop(self);\n\n if (UpIsTypeOf(callable, UP_BUILTIN(function))) {\n UpFunction* fn = (UpFunction*)callable;\n UpTask* task = UpTaskCreateWithFunction(heap, fn);\n UpObject* channel = UpChannelCreate(heap);\n task->returnChannel = (UpChannel*)channel;\n _PushArgsAcrossTask(self, task, fn->def->argsCount, argCount, storage);\n\n _StackPush(self, channel);\n UpSchedulerResumeTask(scheduler, task);\n } else {\n UpSetError(\"Object can not be called as new task.\");\n THROWIT;\n }\n break;\n }\n case UpInstructionSubscribe: {\n UpObject* readable = _StackPop(self);\n if (!UpIsTypeOf(readable, UP_BUILTIN(channel))) {\n UpSetError(\"Expected a channel.\");\n THROWIT;\n }\n\n UpChannel* channel = (UpChannel*)readable;\n\n UpFunction* function = (UpFunction*)_StackPop(self);\n UpChannelAddSubscriber(channel, UpCreateStartEvent(heap, function, true));\n if (UpChannelHasGenerator(channel)) {\n UpTask* generator = channel->generator;\n UpTaskMakeGenerator(generator, NULL);\n UpSchedulerResumeTask(scheduler, generator);\n }\n break;\n }\n case UpInstructionRead: {\n UpObject* readable = _StackPop(self);\n if (!UpIsTypeOf(readable, UP_BUILTIN(channel))) {\n UpSetError(\"Expected a channel.\");\n THROWIT;\n }\n\n UpChannel* channel = (UpChannel*)readable;\n if (UpChannelHasMessage(channel)) {\n UpObject* message = UpChannelPopMessage(channel);\n _StackPush(self, message);\n UpEvent* writer = UpChannelPopWriter(channel);\n if (writer->eventType != UpEventNone) {\n UpSchedulerScheduleEvent(scheduler, writer, NULL);\n }\n } else if (UpChannelIsClosed(channel)) {\n UpSetError(\"Channel is closed.\");\n THROWIT;\n } else if (UpChannelHasGenerator(channel) && channel->generator != self) {\n UpChannelAddReader(channel, UpCreateResumeEvent(heap, self, cursor, true));\n UpTask* generator = UpChannelGetGenerator(channel);\n UpSchedulerResumeTask(scheduler, generator);\n return UpSuccess;\n } else {\n UpChannelAddReader(channel, UpCreateResumeEvent(heap, self, cursor, true));\n return UpSuccess;\n }\n break;\n }\n case UpInstructionWrite: {\n UpObject* message = _StackPop(self);\n UpObject* readable = _StackPop(self);\n if (!UpIsTypeOf(readable, UP_BUILTIN(channel))) {\n UpSetError(\"Expected a channel.\");\n THROWIT;\n }\n\n UpChannel* channel = (UpChannel*)readable;\n bool wasClosed = UpChannelIsClosed(channel);\n bool isClosing = message == UpClosed();\n if (isClosing) {\n UpChannelClose(channel);\n }\n\n _StackPush(self, UpUndefined());\n\n if (!isClosing) {\n bool hasSubscribers = UpChannelHasSubscribers(channel);\n if (hasSubscribers) {\n unsigned int subscriberCount;\n UpEvent** subscribers = UpChannelCloneSubscribers(channel, &subscriberCount);\n for (unsigned int i = 0; i < subscriberCount; ++i) {\n UpSchedulerDoEvent(scheduler, subscribers[i], message);\n }\n }\n }\n\n if (UpChannelHasReader(channel)) {\n call->cursor = cursor;\n unsigned int readerCount;\n UpEvent** readers = UpChannelPopAllReaders(channel, &readerCount);\n for (unsigned int i = 0; i < readerCount; ++i) {\n UpSchedulerDoEvent(scheduler, readers[i], message);\n }\n\n // UpEvent* reader = UpChannelPopReader(channel);\n // call->cursor = cursor;\n // UpSchedulerDoEvent(scheduler, reader, message);\n } else if (wasClosed) {\n UpSetError(\"Channel is closed.\");\n THROWIT;\n } else {\n UpChannelAddMessage(channel, message);\n UpChannelAddWriter(channel, UpCreateNullEvent(heap));\n }\n break;\n }\n case UpInstructionWriteAwaitRead: {\n UpObject* message = _StackPop(self);\n UpObject* readable = _StackPop(self);\n if (!UpIsTypeOf(readable, UP_BUILTIN(channel))) {\n UpSetError(\"Expected a channel.\");\n THROWIT;\n }\n\n UpChannel* channel = (UpChannel*)readable;\n bool wasClosed = UpChannelIsClosed(channel);\n bool isClosing = message == UpClosed();\n if (isClosing) {\n UpChannelClose(channel);\n }\n\n _StackPush(self, UpUndefined());\n\n bool hasSubscribers = UpChannelHasSubscribers(channel);\n if (!isClosing) {\n if (hasSubscribers) {\n unsigned int subscriberCount;\n UpEvent** subscribers = UpChannelCloneSubscribers(channel, &subscriberCount);\n for (unsigned int i = 0; i < subscriberCount; ++i) {\n UpEvent* subscriber = subscribers[i];\n UpSchedulerDoEvent(scheduler, subscriber, message);\n }\n }\n }\n\n if (UpChannelHasReader(channel)) {\n call->cursor = cursor;\n unsigned int readerCount;\n UpEvent** readers = UpChannelPopAllReaders(channel, &readerCount);\n for (unsigned int i = 0; i < readerCount; ++i) {\n UpSchedulerDoEvent(scheduler, readers[i], message);\n }\n\n // UpEvent* reader = UpChannelPopReader(channel);\n // call->cursor = cursor;\n // UpSchedulerDoEvent(scheduler, reader, message);\n if (!isClosing && !hasSubscribers && UpTaskIsGenerator(self)) {\n return UpSuccess;\n }\n } else if (wasClosed) {\n UpSetError(\"Channel is closed.\");\n THROWIT;\n } else if (hasSubscribers) {\n UpChannelAddMessage(channel, message);\n UpChannelAddWriter(channel, UpCreateNullEvent(heap));\n } else if (UpChannelHasGenerator(channel) && channel->generator != self) {\n UpChannelAddMessage(channel, message);\n if (isClosing) {\n UpChannelAddWriter(channel, UpCreateNullEvent(heap));\n UpSchedulerResumeTask(scheduler, UpChannelGetGenerator(channel));\n } else {\n UpChannelAddWriter(channel, UpCreateResumeEvent(heap, self, cursor, true));\n UpSchedulerResumeTask(scheduler, UpChannelGetGenerator(channel));\n return UpSuccess;\n }\n } else {\n UpChannelAddMessage(channel, message);\n if (isClosing) {\n UpChannelAddWriter(channel, UpCreateNullEvent(heap));\n } else {\n UpChannelAddWriter(channel, UpCreateResumeEvent(heap, self, cursor, true));\n return UpSuccess;\n }\n }\n break;\n }\n case UpInstructionWriteAwaitResponse: {\n UpObject* message = _StackPop(self);\n UpObject* readable = _StackPop(self);\n if (!UpIsTypeOf(readable, UP_BUILTIN(channel))) {\n UpSetError(\"Expected a channel.\");\n THROWIT;\n }\n\n UpChannel* channel = (UpChannel*)readable;\n\n UpChannelAddRespondee(channel, UpCreateResumeEvent(heap, self, cursor, true));\n UpChannelAddWriter(channel, UpCreateNullEvent(heap));\n\n if (UpChannelHasReader(channel)) {\n UpEvent* reader = UpChannelPopReader(channel);\n call->cursor = cursor;\n UpSchedulerDoEvent(scheduler, reader, message);\n return UpSuccess;\n } else if (UpChannelIsClosed(channel)) {\n UpSetError(\"Channel is closed.\");\n THROWIT;\n } else if (UpChannelHasGenerator(channel) && channel->generator != self) {\n UpChannelAddMessage(channel, message);\n UpSchedulerResumeTask(scheduler, UpChannelGetGenerator(channel));\n return UpSuccess;\n } else {\n UpChannelAddMessage(channel, message);\n return UpSuccess;\n }\n break;\n }\n case UpInstructionRespond: {\n UpObject* message = _StackPop(self);\n UpObject* readable = _StackPop(self);\n if (!UpIsTypeOf(readable, UP_BUILTIN(channel))) {\n UpSetError(\"Expected a channel.\");\n THROWIT;\n }\n\n UpChannel* channel = (UpChannel*)readable;\n\n if (UpChannelHasRespondee(channel)) {\n UpEvent* reader = UpChannelPopRespondee(channel);\n call->cursor = cursor;\n UpSchedulerDoEvent(scheduler, reader, message);\n if (UpTaskIsGenerator(self)) {\n return UpSuccess;\n }\n } else if (UpChannelIsClosed(channel)) {\n UpSetError(\"Channel is closed.\");\n THROWIT;\n } else {\n UpSetError(\"Nothing to respond to.\");\n THROWIT;\n }\n break;\n }\n case UpInstructionJumpIfChannelClosed: {\n int offset;\n cursor = _ReadInt(cursor, &offset);\n\n UpChannel* channel = (UpChannel*)_StackPop(self);\n if (UpChannelIsClosed(channel)) {\n _StackPop(self);\n cursor = ops->buf + offset;\n }\n break;\n }\n case UpInstructionBeginTry: {\n int catchOpOffset, finallyOpOffset;\n cursor = _ReadInt(cursor, &catchOpOffset);\n cursor = _ReadInt(cursor, &finallyOpOffset);\n\n UpCatcher* catcher = UpArenaNew(heap, UpCatcher);\n catcher->catchOpOffset = catchOpOffset;\n catcher->finallyOpOffset = finallyOpOffset;\n\n catcher->previous = storage->catcher;\n storage->catcher = catcher;\n break;\n }\n case UpInstructionEndTry: {\n UpCatcher* catcher = storage->catcher;\n storage->catcher = storage->catcher->previous;\n cursor = ops->buf + catcher->finallyOpOffset;\n break;\n }\n case UpInstructionCatch: {\n _StackPush(self, (UpObject*)UpClaimError());\n break;\n }\n case UpInstructionFinally: {\n if (storage->exception) {\n UpSetException(storage->exception);\n RETHROWIT;\n }\n break;\n }\n default:\n UpWarn(\"Invalid instruction: %d\\n\", opcode);\n cursor = lastOp;\n break;\n }\n }\n\n stackDepth = _PopFrame(self, &call, &storage);\n\n ExitFunctionLoop:\n ;\n }\n\n if (UpGetError()) {\n return UpFailure;\n } else {\n return UpSuccess;\n }\n}\n" }, { "alpha_fraction": 0.7201166152954102, "alphanum_fraction": 0.728863000869751, "avg_line_length": 17.052631378173828, "blob_id": "35c6cf8d70e462739a30af15df4ed968717d647d", "content_id": "0f0faf321bf887e03223e9b298d378ffaf941e9e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 343, "license_type": "permissive", "max_line_length": 62, "num_lines": 19, "path": "/src/vm/include/UpProperty.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPPROPERTY_H\n#define UP_UPPROPERTY_H\n\n#include \"Up/UpGlobal.h\"\n\nstruct UpProperty {\n UpObject* value;\n bool isValid:1;\n bool isValidating:1;\n bool isOverriden:1;\n};\n\nUpProperty* UpNullProperty();\n\nUpProperty* UpPropertyCreate();\n\nvoid UpPropertyInvalidate(UpProperty* self, UpObject* object);\n\n#endif // UP_UPPROPERTY_H\n" }, { "alpha_fraction": 0.7837837934494019, "alphanum_fraction": 0.7837837934494019, "avg_line_length": 16.266666412353516, "blob_id": "c3b1cb10c9747628a4944729cb0a488b5619e798", "content_id": "6ea1dfcbedd516d563a77ff46e8f56dbd76f733b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 259, "license_type": "permissive", "max_line_length": 43, "num_lines": 15, "path": "/src/vm/UpMacEventLoop.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "#ifndef UP_UPMACEVENTLOOP_H\n#define UP_UPMACEVENTLOOP_H\n\n#include \"Up/UpGlobal.h\"\n\ntypedef struct UpEventLoop UpEventLoop;\n\nstruct UpEventLoop {\n};\n\nUpEventLoop* UpEventLoopCreate();\n\nUpStatus UpEventLoopRun(UpEventLoop* self);\n\n#endif // UP_UPMACEVENTLOOP_H\n" }, { "alpha_fraction": 0.6819645762443542, "alphanum_fraction": 0.6819645762443542, "avg_line_length": 27.86046600341797, "blob_id": "fe216c5c95295f58b77632bb2246391e4e5ad7c9", "content_id": "21eff03ec21261ea9a8b294691cd8bde846c8f74", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1242, "license_type": "permissive", "max_line_length": 100, "num_lines": 43, "path": "/src/vm/include/UpMap.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPMAP_H\n#define UP_UPMAP_H\n\n#include \"Up/UpObject.h\"\n\ntypedef struct UpMapItem UpMapItem;\n\nstruct UpMap {\n UpObject __base;\n UpMapItem** buckets;\n unsigned int bucketsAllocated;\n unsigned int bucketsUsed;\n unsigned int itemCount;\n UpMapItem* searchItem;\n};\n\n// *************************************************************************************************\n\nUpMap* UpMapCreate();\nUpMap* UpMapClone(UpMap* self);\n\nunsigned int UpMapCount(UpMap* self);\nUpObject* UpMapKeys(UpMap* self);\nUpObject* UpMapValues(UpMap* self);\n\nUpObject* UpMapBeginGet(UpMap* self, UpHash hash);\nUpObject* UpMapNextGet(UpMap* self, UpHash hash);\nUpObject* UpMapEndGet(UpMap* self);\n\nUpObject* UpMapBeginSet(UpMap* self, UpObject* key, UpHash hash, UpObject* value);\nUpObject* UpMapNextSet(UpMap* self, UpObject* key, UpHash hash, UpObject* value);\nvoid UpMapEndSet(UpMap* self, UpObject* value);\n\nUpObject* UpMapBeginDelete(UpMap* self, UpHash hash);\nUpObject* UpMapNextDelete(UpMap* self, UpHash hash);\nbool UpMapEndDelete(UpMap* self, UpHash hash);\n\n/**\n * This will add a key/value pair, but it will not check for collisions, so be careful.\n */\nvoid UpMapAppend(UpMap* self, UpObject* key, UpObject* value);\n\n#endif // UP_UPMAP_H\n" }, { "alpha_fraction": 0.5497737526893616, "alphanum_fraction": 0.5497737526893616, "avg_line_length": 28.399999618530273, "blob_id": "4b16beb2518078a271860aef360ce7063f517d4f", "content_id": "c112c4e68024402c17a1512681b258c3ca2105cf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 442, "license_type": "permissive", "max_line_length": 100, "num_lines": 15, "path": "/src/regex/include/UpRegex.h", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#ifndef UP_UPREGEX_H\n#define UP_UPREGEX_H\n\n#include \"Up/UpObject.h\"\n\ntypedef struct UpRegex UpRegex;\n\n// *************************************************************************************************\n\nUpObject* UpRegexCreate(const char* pattern, bool isAnchored, bool isCaseSensitive,\n bool isMultiLine);\n\nUpObject* UpRegexMatch(UpRegex* self, const char* subject, int start, bool capture);\n\n#endif // UP_UPREGEX_H\n" }, { "alpha_fraction": 0.5840708017349243, "alphanum_fraction": 0.5840708017349243, "avg_line_length": 24.095237731933594, "blob_id": "31b0f3c0289a4e13f8e5efea1e9578e9fff09f81", "content_id": "38becd913310bfa80b9f737fb99b6c9f237ff95d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1582, "license_type": "permissive", "max_line_length": 99, "num_lines": 63, "path": "/src/vm/UpParser.c", "repo_name": "joehewitt/up", "src_encoding": "UTF-8", "text": "\n#include \"pch.h\"\n#include \"UpInternal.h\"\n#include \"UpParser.h\"\n#include \"Up/UpContext.h\"\n#include \"UpScanner.h\"\n#include \"UpParsing.h\"\n#include \"Up/UpArena.h\"\n#include \"Up.tab.h\"\n#include \"Up.yy.h\"\n\n// ************************************************************************************************\n\nextern int Upparse(void*);\n\n// ************************************************************************************************\n\nUpStatus UpParse(const char* source, UpSyntax** node) {\n char* sourceCopy = UpArenaCopyString(UpGetHeap(), source);\n\n\tUpScanner* scanner = UpScannerCreate();\n UpScannerSetInput(scanner, sourceCopy);\n\n yyscan_t yyscanner;\n Uplex_init_extra(scanner, &yyscanner);\n \tscanner->yyscanner = yyscanner;\n\n Upparse(scanner);\n Uplex_destroy(yyscanner);\n\n if (scanner->error) {\n return UpFailure;\n } else {\n *node = scanner->rootSyntax;\n return UpSuccess;\n }\n}\n\nUpStatus UpParseC(const char* source, UpSyntax** node) {\n char* sourceCopy = UpArenaCopyString(UpGetHeap(), source);\n\n UpScanner* scanner = UpScannerCreate();\n scanner->addTrailingLine = false;\n UpScannerSetInput(scanner, sourceCopy);\n\n yyscan_t yyscanner;\n Uplex_init_extra(scanner, &yyscanner);\n scanner->yyscanner = yyscanner;\n\n UpPushState(CParseState, yyscanner);\n\n UpScannerPushToken(scanner, OPEN_C);\n Upparse(scanner);\n\n UpPopState(yyscanner);\n Uplex_destroy(yyscanner);\n\n if (scanner->error) {\n return UpFailure;\n } else {\n *node = scanner->rootSyntax;\n return UpSuccess;\n }\n}\n" } ]
113
Childcity/TelegramNotifier
https://github.com/Childcity/TelegramNotifier
3e4fe23f2ac858166986a23ab61730f38e771c69
a5d8d65ff10fb56f24ce10b252e8fd1be112e1d0
70e66c0479a81ff6c514d54e65f1bad0fc430640
refs/heads/main
2023-01-04T18:50:28.687006
2020-10-22T08:51:52
2020-10-22T08:51:52
306,277,150
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5936654806137085, "alphanum_fraction": 0.6125917434692383, "avg_line_length": 22.324323654174805, "blob_id": "69359133082b33c8752fcf3dd050b602a9e3067c", "content_id": "f4a6ed6efa19527cce7109065f2b789b41017a21", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2597, "license_type": "permissive", "max_line_length": 83, "num_lines": 111, "path": "/src/main.py", "repo_name": "Childcity/TelegramNotifier", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\nfrom telethon import TelegramClient\n\nasync def CreateTelegaApiClient() -> TelegramClient:\n api_id = 123123123\n api_hash = '21b3b123b13b123bb123b'\n token = '576575757:AAE_-favKJBLJB Jb KJVvlvl'\n phone = '+380999445665'\n\n client = TelegramClient('session', api_id, api_hash)\n\n # connecting and building the session\n await client.connect()\n\n # in case of script ran first time it will\n # ask either to input token or otp sent to\n # number or sent or your telegram id\n if not await client.is_user_authorized():\n from telethon.errors import SessionPasswordNeededError\n\n try:\n await client.send_code_request(phone)\n # signing in the client\n await client.sign_in(phone=phone, code=input('Enter the code: '))\n except SessionPasswordNeededError:\n import getpass\n await client.sign_in(password=getpass.getpass(prompt='Enter the password:'))\n except Exception as e:\n print(e)\n\n return client if client.is_connected() else None\n\n\nasync def DestroyTelegaApiClient(client: TelegramClient):\n await client.disconnect()\n\n\ndef DoCheck() -> list:\n import requests\n import regex as re\n\n url = 'http://194.44.37.173/certificates.html'\n\n resp = requests.get(url=url)\n resp.encoding = 'utf-8'\n\n # Finding what I want\n founded:list = re.findall('[гГ]ородец', resp.text)\n\n return founded\n\n\nasync def NotifyMe(client: TelegramClient, message: str):\n print(message)\n\n try:\n receiver = await client.get_input_entity('@skulazkiy')\n\n # sending message using telegram client\n await client.send_message(receiver, message, parse_mode='html')\n except Exception as e:\n print(e)\n\n return\n\n\nasync def StartChecker():\n import time\n import datetime\n\n client:TelegramClient = None\n\n try:\n client = await CreateTelegaApiClient()\n\n if not (client):\n print('Client not connected!')\n return\n\n while (True):\n resp:list = DoCheck()\n\n if resp:\n await NotifyMe(client=client, message=str(resp))\n return\n else:\n print(str(datetime.datetime.now()) + ': Nothing...')\n\n time.sleep(60)\n\n finally:\n if (client):\n await DestroyTelegaApiClient(client)\n\n return\n\n\nif __name__ == '__main__':\n import asyncio\n\n print('Started')\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(\n asyncio.wait(\n [StartChecker()]\n )\n )\n\n print('Exited')\n" }, { "alpha_fraction": 0.8095238208770752, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 30.5, "blob_id": "7b3e7aa913d36f5a961ee993f21fd7f9ce17f4b9", "content_id": "f9a2cc916aa483611654fb734f0ef5d515f93625", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 63, "license_type": "permissive", "max_line_length": 43, "num_lines": 2, "path": "/README.md", "repo_name": "Childcity/TelegramNotifier", "src_encoding": "UTF-8", "text": "# TelegramNotifier\nOn some event notify me, using Telegram API\n" } ]
2
ScoRoc/dogcollector
https://github.com/ScoRoc/dogcollector
066e8098fbcde4a6687539790cfdb45dbffc26a1
1881f940b2955bdf7b79a20a8a30602bde12b2fb
db8553f258a500079aaeb12d66fd7ee69b285997
refs/heads/master
2020-03-08T06:31:28.758643
2018-04-03T22:04:00
2018-04-03T22:04:00
127,973,793
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7904762029647827, "alphanum_fraction": 0.7904762029647827, "avg_line_length": 20, "blob_id": "b6d51bc9d9586a80acc361115891afb15c1b3daf", "content_id": "e1f0198250dc78ffbda13be0b73b1a9c1ecadbf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 39, "num_lines": 5, "path": "/dogcollectorapp/apps.py", "repo_name": "ScoRoc/dogcollector", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass DogcollectorappConfig(AppConfig):\n name = 'dogcollectorapp'\n" }, { "alpha_fraction": 0.6114649772644043, "alphanum_fraction": 0.6146496534347534, "avg_line_length": 40.86666488647461, "blob_id": "90dbaed8326791854081e19d03db47e20d2fc261", "content_id": "2afdb490b86630298d5cbe7504818fbd42f29414", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 628, "license_type": "no_license", "max_line_length": 70, "num_lines": 15, "path": "/dogcollectorapp/urls.py", "repo_name": "ScoRoc/dogcollector", "src_encoding": "UTF-8", "text": "from django.urls import path, re_path\nfrom . import views\n\nurlpatterns = [\n # path( url/, view, kwargs(key word arguments), name)\n path('', views.index, name='index'),\n # re_path(r'^([0-9]+)/$', views.show, name='show'), # using RegEx\n path('<int:dog_id>/', views.show, name='show'),\n path('post_dog/', views.post_dog, name='post_dog'),\n path('user/<user_name>', views.profile, name='profile'),\n path('login/', views.login_view, name='login'),\n path('logout/', views.logout_view, name='logout'),\n path('signup/', views.signup, name='signup'),\n path('like_dog/', views.like_dog, name='like_dog')\n]\n" } ]
2
TheZorcerer/path.py
https://github.com/TheZorcerer/path.py
f116c7a21a49787bf30314a530654410f1fb1126
0ad280ee90264689c777ade6f3f1e9456499a8b4
7ca68c62d7c75e6ed526636361632641b51711ee
refs/heads/main
2023-01-19T03:20:17.096204
2020-12-02T14:59:59
2020-12-02T14:59:59
316,242,100
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6460674405097961, "alphanum_fraction": 0.6615168452262878, "avg_line_length": 32.85714340209961, "blob_id": "7e86b1ac673e6b83d91b7e2700aa416c7a2a821b", "content_id": "8fdab2c2260efeb3873daf65b7efebbf06c41316", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 712, "license_type": "no_license", "max_line_length": 78, "num_lines": 21, "path": "/data.py", "repo_name": "TheZorcerer/path.py", "src_encoding": "UTF-8", "text": "import json\n\nclass Data(object):\n\tdef __init__(self):\n\t\twith open(\"data/gunsmith-weapons.json\") as f1:\n\t\t\twith open(\"data/gunsmith-attachments.json\") as f2:\n\t\t\t\tself.weapons_data = json.load(f1)\n\t\t\t\tself.attachments_data = json.load(f2)\n\t\t\t\tf1.close()\n\t\t\t\tf2.close()\n\t\tself.weapons = list()\n\t\tfor weapon in self.weapons_data:\n\t\t\tself.weapons.append((weapon['name'],weapon['id']))\n\t\tself.path_data = dict()\n\t\tfor n in range(len(self.weapons_data)):\n\t\t\tself.path_data[self.weapons[n][1]] = self.weapons_data[n]['path']\n\tdef search(self,name):\n\t\tfor weapon in self.weapons:\n\t\t\tif(weapon[0].lower() == name.lower() or weapon[1].lower() == name.lower()):\n\t\t\t\treturn weapon[0],self.path_data[weapon[1]]\n\t\treturn None\n\n" }, { "alpha_fraction": 0.5669291615486145, "alphanum_fraction": 0.5905511975288391, "avg_line_length": 20.16666603088379, "blob_id": "f03abc06a24a5a35f24162322b39c3b77dcc1654", "content_id": "733437b88cf4982ed8fa3d63664d39f1a08d033e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "no_license", "max_line_length": 33, "num_lines": 6, "path": "/init_bot.py", "repo_name": "TheZorcerer/path.py", "src_encoding": "UTF-8", "text": "f = open(\"token.txt\",\"w\")\nf.write(input(\"your token? \"))\nf.close()\nf2 = open(\"guild_prefs.json\",\"w\")\nf2.write(\"{}\")\nf2.close()\n" }, { "alpha_fraction": 0.6478161215782166, "alphanum_fraction": 0.6542528867721558, "avg_line_length": 29.20833396911621, "blob_id": "f98f6e5478c40b0a270ecb66543476790a70f834", "content_id": "7699a7326854f0dc492c51ffe5728ff94f420bb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2175, "license_type": "no_license", "max_line_length": 108, "num_lines": 72, "path": "/command_handler.py", "repo_name": "TheZorcerer/path.py", "src_encoding": "UTF-8", "text": "import discord\nimport data\nimport bot_utils\n\nclass handler():\n\tdef __init__(self,client):\n\t\tself.client = client\n\t\tself.gun_data = data.Data()\n\n\tdef handle(self,message):\n\t\tprefix = self.client.guild_preferences[str(message.guild.id)][\"prefix\"]\n\t\tif(message.content[:len(prefix)] == prefix):\n\t\t\treturn self.handler(message,prefix)\n\n\n\tdef handler(self,message,prefix):\n\t\tdata = message.content.split(\" \")\n\t\tmodule = data[0][len(prefix):7].lower()\n\t\tif(module == \"server\"):\n\t\t\treturn self.server(message)\n\t\telif(module == \"gun\"):\n\t\t\treturn self.gun(message)\n\t\treturn None\n\n\tdef server(self,message):\n\t\tdata = message.content.split(\" \")\n\t\tif(str(message.author.id) == \"358869991459782666\"):\n\t\t\tif(data[1] == \"prefix\"):\n\t\t\t\tself.client.guild_preferences[str(message.guild.id)][\"prefix\"] = data[2]\n\t\t\t\treturn \"I have changed the prefix to \" + data[2]\n\t\t\telif(data[1] == \"allow\"):\n\t\t\t\tself.client.guild_preferences[str(message.guild.id)][\"allowed_channels\"].append(str(message.channel.id))\n\t\t\t\treturn \"added channel with id\" + str(message.channel.id)\n\t\telse:\n\t\t\treturn \"You aint tazer\"\n\n\tdef gun(self,message):\n\t\tif(str(message.channel.id) in self.client.guild_preferences[str(message.guild.id)][\"allowed_channels\"]):\n\t\t\tdata = message.content.split(\" \")\n\t\t\tif(data[1].lower() == \"help\"):\n\t\t\t\ttry:\n\t\t\t\t\tif(data[2]):\n\t\t\t\t\t\treturn bot_utils.weapon_help(self.gun_data.weapons,data[2])\n\t\t\t\texcept IndexError:\n\t\t\t\t\tpass\n\t\t\t\treturn bot_utils.weapon_help(self.gun_data.weapons,\"1\")\n\t\t\tweapon_data = self.gun_data.search(data[1])\n\t\t\tname,weapon_data = weapon_data[0],weapon_data[1]\n\t\t\tif(weapon_data == None):\n\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tfirerate = str(weapon_data['fr']) + \" rpm\"\n\t\t\t\texcept KeyError:\n\t\t\t\t\tfirerate = \"Not Available\"\n\t\t\t\ttry:\n\t\t\t\t\treld = str(weapon_data[\"reload\"]) + \"s\"\n\t\t\t\texcept KeyError:\n\t\t\t\t\treld = \"Not Available\"\n\t\t\t\ttry:\n\t\t\t\t\tads = str(weapon_data[\"ads\"]) + \" ms\"\n\t\t\t\texcept KeyError:\n\t\t\t\t\tads = \"Not Available\"\n\t\t\t\tmag = weapon_data[\"mag\"]\n\t\t\t\tmax_ammo = weapon_data[\"maxAmmo\"]\n\t\t\t\tembed = bot_utils.build_gun_embed(name,mag,max_ammo,reld,firerate,ads)\n\t\t\t\treturn embed\n\t\telse:\n\t\t\treturn \"Bot is not allowed in this channel\"\n\n\tdef gulag(self,message):\n\t\tpass\n" }, { "alpha_fraction": 0.6827622056007385, "alphanum_fraction": 0.7149532437324524, "avg_line_length": 40, "blob_id": "2270ecd350a0752c183c285dc4a754061f706bf9", "content_id": "8a1411b968b575aa200c3154444fcc5304a106f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1926, "license_type": "no_license", "max_line_length": 172, "num_lines": 47, "path": "/bot_utils.py", "repo_name": "TheZorcerer/path.py", "src_encoding": "UTF-8", "text": "import discord\nimport json\n\ndef check_guilds(client):\n\tf = open(\"guild_prefs.json\")\n\tguild_preferences = json.load(f)\n\tfor guild in client.guilds:\n\t\tif(str(guild.id) not in list(guild_preferences.keys())):\n\t\t\tprint(\"new guild with ID\",guild.id)\n\t\t\tguild_preferences[guild.id] = {\"allowed_channels\":[],\"on\":True,\"prefix\":\"=\"}\n\tclient.guild_preferences = guild_preferences\n\tf.close()\n\tf = open(\"guild_prefs.json\",\"w+\")\n\tjson.dump(guild_preferences,f)\n\tf.close()\n\treturn guild_preferences\n\ndef save_preferences(client):\n\twith open(\"guild_prefs.json\",\"w\") as f:\n\t\tjson.dump(client.guild_preferences,f)\n\t\tf.close()\n\tprint(\"saved it!\")\n\ndef build_gun_embed(name,mag,maxammo,reld,firerate,ads):\n\tembed = discord.Embed(title=name, description=\"Stats on the \"+name+\" courtesy path.exe and PatchyTheDog.\", color=0x00ff00)\n\tembed.set_thumbnail(url=\"https://cdn.discordapp.com/attachments/781241634822029312/781799759576170516/gath.png\")\n\tembed.add_field(name = \"Magazine Capacity\",value = mag,inline=False)\n\tembed.add_field(name = \"Reserve Ammo\",value = maxammo,inline=False)\n\tembed.add_field(name = \"Reload Time\",value = reld,inline=False)\n\tembed.add_field(name = \"Firerate\",value = firerate,inline=False)\n\tembed.add_field(name = \"ADS Time\",value = ads,inline=False)\n\tembed.set_footer(text = \"Made by TaZeR/zahran#5909\")\n\treturn embed\n\ndef weapon_help(weapons,page):\n\ttry:\n\t\tif(int(page)*10 > len(weapons)):\n\t\t\treturn \"Not a valid page\"\n\t\telse:\n\t\t\tpage = int(page)\n\texcept ValueError:\n\t\treturn \"Not a valid page\"\n\tembed = discord.Embed(title=\"All Weapons\",description=\"The list of all weapons and the associated id's. You can use the id to search for them by =gun <id>\",color=0x00ff00)\n\tfor n in range((page-1)*10,min((page)*10+1,len(weapons)+1)):\n\t\tembed.add_field(name = str(n+1)+\". \"+ weapons[n][0], value = \"ID: \"+weapons[n][1],inline=False)\n\tembed.set_footer(text = \"Use =gun help <page> for the next set of weapons\")\n\treturn embed" }, { "alpha_fraction": 0.7654321193695068, "alphanum_fraction": 0.7654321193695068, "avg_line_length": 39.5, "blob_id": "bfa4ebb3eb6050910e7e26d55f96e0d3ae6bb170", "content_id": "c9f9ddd628dc83e65a87e4eb35441860d598de5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 81, "license_type": "no_license", "max_line_length": 70, "num_lines": 2, "path": "/README.md", "repo_name": "TheZorcerer/path.py", "src_encoding": "UTF-8", "text": "# path.py\n A discord bot for general stats and data based on those from path.exe\n" }, { "alpha_fraction": 0.7437425255775452, "alphanum_fraction": 0.7449344396591187, "avg_line_length": 23, "blob_id": "966558239f09468080a50245f9483f427cde83d9", "content_id": "7b54afaabe557fb0fa5989d4d3abb905d622b0a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 839, "license_type": "no_license", "max_line_length": 70, "num_lines": 35, "path": "/path.py", "repo_name": "TheZorcerer/path.py", "src_encoding": "UTF-8", "text": "import json\nimport discord\nimport command_handler\nimport bot_utils\nimport signal\nimport sys\n\nclient = discord.Client()\nhandler = command_handler.handler(client)\n\ndef on_exit(signal, frame):\n\tbot_utils.save_preferences(client)\n\tprint(\"closing!\")\n\tsys.exit(0)\n\n\[email protected]\nasync def on_ready():\n\tprint(\"logged in as\"+str(client))\n\tawait client.change_presence(activity=discord.Game(name='=gun help'))\n\tguild_preferences = bot_utils.check_guilds(client)\n\tclient.guild_preferences = guild_preferences\n\[email protected]\nasync def on_message(message):\n\treply = handler.handle(message)\n\tif(reply != None):\n\t\tif(type(reply) == type(\"never gonna give you up\")):\n\t\t\tawait message.channel.send(reply)\n\t\telse:\n\t\t\tawait message.channel.send(embed=reply)\n\nwith open(\"token.txt\") as token:\n\tsignal.signal(signal.SIGINT, on_exit)\n\tclient.run(token.read())" } ]
6