bugged
stringlengths 4
228k
| fixed
stringlengths 0
96.3M
| __index_level_0__
int64 0
481k
|
---|---|---|
def _get_key_value(self, keyval): # A - Z / a - z _char_key = {'a' : 38, 'b' : 56, 'c' : 54, 'd' : 40, 'e' : 26, 'f' : 41, 'g' : 42, 'h' : 43, 'i' : 31, 'j' : 44, 'k' : 45, 'l' : 46, 'm' : 58, 'n' : 57, 'o' : 32, 'p' : 33, 'q' : 24, 'r' : 27, 's' : 39, 't' : 28, 'u' : 30, 'v' : 55, 'w' : 25, 'x' : 53, 'y' : 29, 'z' : 52} # 0 - 9 _digit_key = {'0' : 19, '1' : 10, '2' : 11, '3' : 12, '4' : 13, '5' : 14, '6' : 15, '7' : 16, '8' : 17, '9' : 18} # Symbols _symbol_key_val = {'-' : 20, '=' : 21, '[' : 34, ']' : 35, ';' : 47, '\'' : 48, '`' : 49, '\\' : 51, ' :' : 59, '.' : 60, '/' : 61, ' ' : 65} _symbol_shift_key_val = {'!' : 10, '@' : 11, '#' : 12, '$' : 13, '%' : 14, '^' : 15, '&' : 16, '*' : 17, '(' : 18, ')' : 19, '_' : 20, '+' : 21, '{' : 34, '}' : 35, ':' : 47, '"' :48, '~' : 49, '|' : 51, '<' : 59, '>' : 60, '?' : 61}
|
def _get_key_value(self, keyval): # A - Z / a - z _char_key = {'a' : 38, 'b' : 56, 'c' : 54, 'd' : 40, 'e' : 26, 'f' : 41, 'g' : 42, 'h' : 43, 'i' : 31, 'j' : 44, 'k' : 45, 'l' : 46, 'm' : 58, 'n' : 57, 'o' : 32, 'p' : 33, 'q' : 24, 'r' : 27, 's' : 39, 't' : 28, 'u' : 30, 'v' : 55, 'w' : 25, 'x' : 53, 'y' : 29, 'z' : 52} # 0 - 9 _digit_key = {'0' : 19, '1' : 10, '2' : 11, '3' : 12, '4' : 13, '5' : 14, '6' : 15, '7' : 16, '8' : 17, '9' : 18} # Symbols _symbol_key_val = {'-' : 20, '=' : 21, '[' : 34, ']' : 35, ';' : 47, '\'' : 48, '`' : 49, '\\' : 51, ',' : 59, '.' : 60, '/' : 61, ' ' : 65} _symbol_shift_key_val = {'!' : 10, '@' : 11, '#' : 12, '$' : 13, '%' : 14, '^' : 15, '&' : 16, '*' : 17, '(' : 18, ')' : 19, '_' : 20, '+' : 21, '{' : 34, '}' : 35, ':' : 47, '"' :48, '~' : 49, '|' : 51, '<' : 59, '>' : 60, '?' : 61}
| 479,900 |
def selectitem(self, window_name, object_name, item_name): ''' Select combo box / layered pane item @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param item_name: Item name to select @type object_name: string
|
def selectitem(self, window_name, object_name, item_name): ''' Select combo box / layered pane item @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param item_name: Item name to select @type object_name: string
| 479,901 |
def selectindex(self, window_name, object_name, item_index): ''' Select combo box item / layered pane based on index @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param item_index: Item index to select @type object_name: integer
|
def selectindex(self, window_name, object_name, item_index): ''' Select combo box item / layered pane based on index @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param item_index: Item index to select @type object_name: integer
| 479,902 |
def getallitem(self, window_name, object_name): ''' Select combo box item @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string
|
def getallitem(self, window_name, object_name): ''' Select combo box item @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string
| 479,903 |
def getallitem(self, window_name, object_name): ''' Select combo box item @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string
|
def getallitem(self, window_name, object_name): ''' Select combo box item @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string
| 479,904 |
def verifyselect(self, window_name, object_name, item_name): ''' Verify the item selected in combo box @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param item_name: Item name to select @type object_name: string
|
def verifyselect(self, window_name, object_name, item_name): ''' Verify the item selected in combo box @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param item_name: Item name to select @type object_name: string
| 479,905 |
def verifyselect(self, window_name, object_name, item_name): ''' Verify the item selected in combo box @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param item_name: Item name to select @type object_name: string
|
def verifyselect(self, window_name, object_name, item_name): ''' Verify the item selected in combo box @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param item_name: Item name to select @type object_name: string
| 479,906 |
def geotag(request): """ accepts a block of text, extracts addresses, locations and places and geocodes them. """ # XXX this is very brutal and wacky looking... # it re-uses as much of the existing way of doing things # as possible without regard to time costs or instanity of # interface. Once this has a more clear form, a more # optimized way of attacking this could be devised if needed. text = request.REQUEST.get('q', '').strip() pre = '<geotagger:location>' post = '</geotagger:location>' text = tag_addresses(text, pre=pre, post=post) text = location_tagger(pre=pre, post=post)(text) text = place_tagger(pre=pre, post=post)(text) all_pat = re.compile('%s(.*?)%s' % (pre, post)) results = [] all_locations = [] for loc in all_pat.findall(text): try: all_locations.append(loc) results += _build_geocoder_results(loc) except DoesNotExist: pass response = {'locations': results, 'searched': all_locations} return HttpResponse(json.dumps(response), mimetype="application/json")
|
def geotag(request): """ accepts a block of text, extracts addresses, locations and places and geocodes them. """ # XXX this is very brutal and wacky looking... # it re-uses as much of the existing way of doing things # as possible without regard to time costs or instanity of # interface. Once this has a more clear form, a more # optimized way of attacking this could be devised if needed. text = request.REQUEST.get('q', '').strip() pre = '<geotagger:location>' post = '</geotagger:location>' text = tag_addresses(text, pre=pre, post=post) text = location_tagger(pre=pre, post=post)(text) text = place_tagger(pre=pre, post=post)(text) all_pat = re.compile('%s(.*?)%s' % (pre, post)) results = [] all_locations = [] for loc in all_pat.findall(text): try: all_locations.append(loc) results += _build_geocoder_results(loc) except DoesNotExist: pass response = {'locations': results, 'searched': all_locations} return HttpResponse(json.dumps(response, indent=2), mimetype="application/json")
| 479,907 |
def update_block_numbers(): Block.objects.exclude(right_city=SHORT_NAME.upper()).exclude(left_city=SHORT_NAME.upper()).delete() for b in Block.objects.all(): (from_num, to_num) = make_block_numbers(b.left_from_num, b.left_to_num, b.right_from_num, b.right_to_num) if b.from_num != from_num and b.to_num != to_num: b.from_num = from_num b.to_num = to_num b.save()
|
def update_block_numbers(): Block.objects.exclude(right_city=settings.SHORT_NAME.upper()).exclude(left_city=settings.SHORT_NAME.upper()).delete() for b in Block.objects.all(): (from_num, to_num) = make_block_numbers(b.left_from_num, b.left_to_num, b.right_from_num, b.right_to_num) if b.from_num != from_num and b.to_num != to_num: b.from_num = from_num b.to_num = to_num b.save()
| 479,908 |
def main(argv=None): url = 'http://calendar.boston.com/search?acat=&cat=&commit=Search&new=n&rss=1&search=true&sort=0&srad=20&srss=50&ssrss=5&st=event&st_select=any&svt=text&swhat=&swhen=today&swhere=&trim=1' schema = 'events' try: schema = Schema.objects.get(slug=schema) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema sys.exit(0) f = feedparser.parse(url) geocoder = SmartGeocoder() for e in f.entries: try: item = NewsItem.objects.get(title=e.title, description=e.description) except NewsItem.DoesNotExist: item = NewsItem() item.schema = schema item.title = e.title item.description = e.description item.url = e.link item.location_name = e['x-calconnect-street'] item.item_date = datetime.datetime.strptime(e.dtstart, "%Y-%m-%d %H:%M:%S +0000") item.pub_date = datetime.datetime(*e.updated_parsed[:6]) try: add = geocoder.geocode(item.location_name) item.location = add['point'] item.block = add['block'] except: pass item.save() print "Added: %s" % item.title
|
def main(argv=None): url = 'http://calendar.boston.com/search?acat=&cat=&commit=Search&new=n&rss=1&search=true&sort=0&srad=20&srss=50&ssrss=5&st=event&st_select=any&svt=text&swhat=&swhen=today&swhere=&trim=1' schema = 'events' try: schema = Schema.objects.get(slug=schema) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema sys.exit(0) f = feedparser.parse(url) geocoder = SmartGeocoder() for e in f.entries: try: item = NewsItem.objects.get(title=e.title, description=e.description) except NewsItem.DoesNotExist: item = NewsItem() item.schema = schema item.title = e.title item.description = e.description item.url = e.link item.item_date = datetime.datetime(*e.updated_parsed[:6]) item.pub_date = datetime.datetime(*e.updated_parsed[:6]) try: add = geocoder.geocode(item.location_name) item.location = add['point'] item.block = add['block'] except: pass item.save() print "Added: %s" % item.title
| 479,909 |
def main(argv=None): url = 'http://calendar.boston.com/search?acat=&cat=&commit=Search&new=n&rss=1&search=true&sort=0&srad=20&srss=50&ssrss=5&st=event&st_select=any&svt=text&swhat=&swhen=today&swhere=&trim=1' schema = 'events' try: schema = Schema.objects.get(slug=schema) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema sys.exit(0) f = feedparser.parse(url) geocoder = SmartGeocoder() for e in f.entries: try: item = NewsItem.objects.get(title=e.title, description=e.description) except NewsItem.DoesNotExist: item = NewsItem() item.schema = schema item.title = e.title item.description = e.description item.url = e.link item.location_name = e['x-calconnect-street'] item.item_date = datetime.datetime.strptime(e.dtstart, "%Y-%m-%d %H:%M:%S +0000") item.pub_date = datetime.datetime(*e.updated_parsed[:6]) try: add = geocoder.geocode(item.location_name) item.location = add['point'] item.block = add['block'] except: pass item.save() print "Added: %s" % item.title
|
def main(argv=None): url = 'http://calendar.boston.com/search?acat=&cat=&commit=Search&new=n&rss=1&search=true&sort=0&srad=20&srss=50&ssrss=5&st=event&st_select=any&svt=text&swhat=&swhen=today&swhere=&trim=1' schema = 'events' try: schema = Schema.objects.get(slug=schema) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema sys.exit(0) f = feedparser.parse(url) geocoder = SmartGeocoder() for e in f.entries: try: item = NewsItem.objects.get(title=e.title, description=e.description) except NewsItem.DoesNotExist: item = NewsItem() item.schema = schema item.title = e.title item.description = e.description item.url = e.link item.location_name = e['x-calconnect-street'] item.item_date = datetime.datetime.strptime(e.dtstart, "%Y-%m-%d %H:%M:%S +0000") item.pub_date = datetime.datetime(*e.updated_parsed[:6])
| 479,910 |
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
|
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
| 479,911 |
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
|
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
| 479,912 |
def save(self, old_record, list_record, detail_record): kwargs = self.pk_fields(list_record) summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u''
|
def save(self, old_record, list_record, detail_record): kwargs = self.pk_fields(list_record) summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u''
| 479,913 |
def save(self, old_record, list_record, detail_record): kwargs = self.pk_fields(list_record) summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u''
|
def save(self, old_record, list_record, detail_record): kwargs = self.pk_fields(list_record) summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u''
| 479,914 |
def create_newsitem(self, attributes, **kwargs): """ Creates and saves a NewsItem with the given kwargs. Returns the new NewsItem.
|
def create_newsitem(self, attributes, **kwargs): """ Creates and saves a NewsItem with the given kwargs. Returns the new NewsItem.
| 479,915 |
def create_newsitem(self, attributes, **kwargs): """ Creates and saves a NewsItem with the given kwargs. Returns the new NewsItem.
|
def create_newsitem(self, attributes, **kwargs): """ Creates and saves a NewsItem with the given kwargs. Returns the new NewsItem.
| 479,916 |
def create_newsitem(self, attributes, **kwargs): """ Creates and saves a NewsItem with the given kwargs. Returns the new NewsItem.
|
def create_newsitem(self, attributes, **kwargs): """ Creates and saves a NewsItem with the given kwargs. Returns the new NewsItem.
| 479,917 |
def __init__(self, shapefile, city=None, layer_id=0): ds = DataSource(shapefile) self.layer = ds[layer_id] self.city = city and city or Metro.objects.get_current().name self.fcc_pat = re.compile('^(' + '|'.join(VALID_FCC_PREFIXES) + ')\d$')
|
def __init__(self, shapefile, city=None, layer_id=0, encoding='utf8', verbose=False): self.verbose = verbose self.encoding = encoding ds = DataSource(shapefile) self.layer = ds[layer_id] self.city = city and city or Metro.objects.get_current().name self.fcc_pat = re.compile('^(' + '|'.join(VALID_FCC_PREFIXES) + ')\d$')
| 479,918 |
def install_aggdraw(options): """ workaround for broken aggdraw on certain platforms, may require additional fixes for 64 bit plaforms, unclear. """ os.chdir(options.env_root) sh('env CFLAGS=-fpermissive %s/bin/pip install aggdraw' % options.env_root)
|
def install_aggdraw(options): """ workaround for broken aggdraw on certain platforms, may require additional fixes for 64 bit plaforms, unclear. """ os.chdir(options.env_root) sh('env CFLAGS=-fpermissive %s/bin/pip install aggdraw' % options.env_root)
| 479,919 |
def get_tile(request, version, layername, z, x, y, extension='png'): 'Returns a map tile in the requested format' z, x, y = int(z), int(x), int(y) response = TileResponse(render_tile(layername, z, x, y, extension=extension)) return response(extension)
|
def get_tile(request, version, layername, z, x, y, extension='png'): 'Returns a map tile in the requested format' z, x, y = int(z), int(x), int(y) response = TileResponse(render_tile(layername, z, x, y, extension='png')) return response(extension)
| 479,920 |
def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs)
|
def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs)
| 479,921 |
def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs)
|
def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs)
| 479,922 |
def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs)
|
def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs)
| 479,923 |
def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs)
|
defrender(self,name,value,attrs=None):#Updatethetemplateparameterswithanyattributespassedin.ifattrs:self.params.update(attrs)
| 479,924 |
def get_app_settings(options): settings_module = '%s.settings_default' % options.app user_settings_module = '%s.settings' % (options.app, options.user_settings) try: __import__(settings_module) except: exit_with_traceback("Problem with %s or %s, see above" % (settings_module, user_settings_module)) return sys.modules[settings_module]
|
def get_app_settings(options): settings_module = '%s.settings_default' % options.app user_settings_module = '%s.settings' % options.app try: __import__(settings_module) except: exit_with_traceback("Problem with %s or %s, see above" % (settings_module, user_settings_module)) return sys.modules[settings_module]
| 479,925 |
def get_app_settings(options): settings_module = '%s.settings_default' % options.app user_settings_module = '%s.settings' % (options.app, options.user_settings) try: __import__(settings_module) except: exit_with_traceback("Problem with %s or %s, see above" % (settings_module, user_settings_module)) return sys.modules[settings_module]
|
def get_app_settings(options): settings_module = '%s.settings_default' % options.app user_settings_module = '%s.settings' % (options.app, options.user_settings) try: __import__(user_settings_module) except: exit_with_traceback("Problem with %s or %s, see above" % (settings_module, user_settings_module)) return sys.modules[settings_module]
| 479,926 |
def get_app_settings(options): settings_module = '%s.settings_default' % options.app user_settings_module = '%s.settings' % (options.app, options.user_settings) try: __import__(settings_module) except: exit_with_traceback("Problem with %s or %s, see above" % (settings_module, user_settings_module)) return sys.modules[settings_module]
|
def get_app_settings(options): settings_module = '%s.settings_default' % options.app user_settings_module = '%s.settings' % (options.app, options.user_settings) try: __import__(settings_module) except: exit_with_traceback("Problem with %s or %s, see above" % (settings_module, user_settings_module)) return sys.modules[user_settings_module]
| 479,927 |
def main(argv=None): logger.info("Starting add_news") if argv: url = argv[0] else: url = 'http://search.boston.com/search/api?q=*&sort=-articleprintpublicationdate&subject=massachusetts&scope=bonzai' schema_slug = 'local-news' try: schema = Schema.objects.get(slug=schema_slug) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema_slug sys.exit(1) f = feedparser.parse(url) for entry in f.entries: try: item = NewsItem.objects.get(schema__id=schema.id, title=entry.title, description=entry.description) print "Already have %r (id %d)" % (item.title, item.id) except NewsItem.DoesNotExist: item = NewsItem() try: item.schema = schema item.title = convert_entities(entry.title) item.description = convert_entities(entry.description) item.url = entry.link item.location_name = entry.get('x-calconnect-street') or entry.get('georss_featurename') item.item_date = datetime.datetime(*entry.updated_parsed[:6]) item.pub_date = datetime.datetime(*entry.updated_parsed[:6]) # feedparser bug: depending on which parser it magically uses, # we either get the xml namespace in the key name, or we don't. point = entry.get('georss_point') or entry.get('point') if point: x, y = point.split(' ') else: # Fall back on geocoding. text = item.title + ' ' + item.description try: x, y = quick_dirty_fallback_geocode(text, parse=True) except GeocodingException: logger.debug("Geocoding exception on %r:" % text) log_exception(level=logging.DEBUG) continue if None in (x, y): logger.debug("couldn't geocode '%s...'" % item.title[:30]) continue item.location = Point((float(y), float(x))) if item.location.x == 0.0 and item.location.y == 0.0: # There's a lot of these. Maybe attempt to # parse and geocode if we haven't already? logger.info("Skipping %r as it has bad location 0,0" % item.title) continue if not item.location_name: # Fall back to reverse-geocoding. from ebpub.geocoder import reverse try: block, distance = reverse.reverse_geocode(item.location) logger.debug(" Reverse-geocoded point to %r" % block.pretty_name) item.location_name = block.pretty_name except reverse.ReverseGeocodeError: logger.debug(" Failed to reverse geocode %s for %r" % (item.location.wkt, item.title)) item.location_name = u'' item.save() logger.info("Saved: %s" % item.title) except: logger.error("Warning: couldn't save %r. Traceback:" % item.title) log_exception() logger.info("Finished add_news")
|
def main(argv=None): logger.info("Starting add_news") if argv: url = argv[0] else: url = 'http://search.boston.com/search/api?q=*&sort=-articleprintpublicationdate&subject=massachusetts&scope=bonzai' schema_slug = 'local-news' try: schema = Schema.objects.get(slug=schema_slug) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema_slug sys.exit(1) f = feedparser.parse(url) for entry in f.entries: try: item = NewsItem.objects.get(schema__id=schema.id, url=entry.link) print "Already have %r (id %d)" % (item.title, item.id) except NewsItem.DoesNotExist: item = NewsItem() try: item.schema = schema item.title = convert_entities(entry.title) item.description = convert_entities(entry.description) item.url = entry.link item.location_name = entry.get('x-calconnect-street') or entry.get('georss_featurename') item.item_date = datetime.datetime(*entry.updated_parsed[:6]) item.pub_date = datetime.datetime(*entry.updated_parsed[:6]) # feedparser bug: depending on which parser it magically uses, # we either get the xml namespace in the key name, or we don't. point = entry.get('georss_point') or entry.get('point') if point: x, y = point.split(' ') else: # Fall back on geocoding. text = item.title + ' ' + item.description try: x, y = quick_dirty_fallback_geocode(text, parse=True) except GeocodingException: logger.debug("Geocoding exception on %r:" % text) log_exception(level=logging.DEBUG) continue if None in (x, y): logger.debug("couldn't geocode '%s...'" % item.title[:30]) continue item.location = Point((float(y), float(x))) if item.location.x == 0.0 and item.location.y == 0.0: # There's a lot of these. Maybe attempt to # parse and geocode if we haven't already? logger.info("Skipping %r as it has bad location 0,0" % item.title) continue if not item.location_name: # Fall back to reverse-geocoding. from ebpub.geocoder import reverse try: block, distance = reverse.reverse_geocode(item.location) logger.debug(" Reverse-geocoded point to %r" % block.pretty_name) item.location_name = block.pretty_name except reverse.ReverseGeocodeError: logger.debug(" Failed to reverse geocode %s for %r" % (item.location.wkt, item.title)) item.location_name = u'' item.save() logger.info("Saved: %s" % item.title) except: logger.error("Warning: couldn't save %r. Traceback:" % item.title) log_exception() logger.info("Finished add_news")
| 479,928 |
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
|
defaddcount = updatecount = 0 main():addcount = updatecount = 0 """addcount = updatecount = 0 Downloadaddcount = updatecount = 0 Calendaraddcount = updatecount = 0 RSSaddcount = updatecount = 0 feedaddcount = updatecount = 0 andaddcount = updatecount = 0 updateaddcount = updatecount = 0 databaseaddcount = updatecount = 0 """addcount = updatecount = 0 logger.info("Startingaddcount = updatecount = 0 add_events")addcount = updatecount = 0 urladdcount = updatecount = 0 =addcount = updatecount = 0 """http://calendar.boston.com/search?acat=&cat=&commit=Search\
| 479,929 |
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
|
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
| 479,930 |
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
|
defaddcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue main():addcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue """addcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue Downloadaddcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue Calendaraddcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue RSSaddcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue feedaddcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue andaddcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue updateaddcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue databaseaddcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue """addcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue logger.info("Startingaddcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue add_events")addcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue urladdcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue =addcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue """http://calendar.boston.com/search?acat=&cat=&commit=Search\
| 479,931 |
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
|
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
| 479,932 |
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
|
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
| 479,933 |
def natural_key(self): return self.schema.slug
|
def natural_key(self): return self.schema.slug
| 479,934 |
def main(argv=None): logger.info("Starting add_news") if argv: url = argv[0] else: url = 'http://search.boston.com/search/api?q=*&sort=-articleprintpublicationdate&subject=massachusetts&scope=bonzai' schema_slug = 'local-news' try: schema = Schema.objects.get(slug=schema_slug) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema_slug sys.exit(1) f = feedparser.parse(url) for entry in f.entries: try: item = NewsItem.objects.get(title=entry.title, description=entry.description) print "Already have %r (id %d)" % (item.title, item.id) except NewsItem.DoesNotExist: item = NewsItem() try: item.schema = schema item.title = convert_entities(entry.title) item.description = convert_entities(entry.description) item.url = entry.link item.location_name = entry.get('x-calconnect-street') or entry.get('georss_featurename') item.item_date = datetime.datetime(*entry.updated_parsed[:6]) item.pub_date = datetime.datetime(*entry.updated_parsed[:6]) # feedparser bug: depending on which parser it magically uses, # we either get the xml namespace in the key name, or we don't. if 'point' in entry: x,y = entry.point.split(' ') elif 'georss_point' in entry: x,y = entry.georss_point.split(' ') else: # Fall back on geocoding. text = item.title + ' ' + item.description try: x, y = quick_dirty_fallback_geocode(text, parse=True) except GeocodingException: logger.debug("Geocoding exception on %r:" % text) log_exception(level=logging.DEBUG) continue if None in (x, y): logger.debug("couldn't geocode '%s...'" % item.title[:30]) continue item.location = Point((float(y), float(x))) if item.location.x == 0.0 and item.location.y == 0.0: # There's a lot of these. Maybe attempt to # parse and geocode if we haven't already? logger.info("Skipping %r as it has bad location 0,0" % item.title) continue if not item.location_name: # Fall back to reverse-geocoding. from ebpub.geocoder import reverse try: block, distance = reverse.reverse_geocode(item.location) logger.debug(" Reverse-geocoded point to %r" % block.pretty_name) item.location_name = block.pretty_name except reverse.ReverseGeocodeError: logger.debug(" Failed to reverse geocode %s for %r" % (item.location.wkt, item.title)) item.location_name = u'' item.save() logger.info("Saved: %s" % item.title) except: logger.error("Warning: couldn't save %r. Traceback:" % item.title) log_exception() logger.info("Finished add_news")
|
def main(argv=None): logger.info("Starting add_news") if argv: url = argv[0] else: url = 'http://search.boston.com/search/api?q=*&sort=-articleprintpublicationdate&subject=massachusetts&scope=bonzai' schema_slug = 'local-news' try: schema = Schema.objects.get(slug=schema_slug) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema_slug sys.exit(1) f = feedparser.parse(url) for entry in f.entries: try: item = NewsItem.objects.get(schema__id=schema.id, title=entry.title, description=entry.description) print "Already have %r (id %d)" % (item.title, item.id) except NewsItem.DoesNotExist: item = NewsItem() try: item.schema = schema item.title = convert_entities(entry.title) item.description = convert_entities(entry.description) item.url = entry.link item.location_name = entry.get('x-calconnect-street') or entry.get('georss_featurename') item.item_date = datetime.datetime(*entry.updated_parsed[:6]) item.pub_date = datetime.datetime(*entry.updated_parsed[:6]) # feedparser bug: depending on which parser it magically uses, # we either get the xml namespace in the key name, or we don't. if 'point' in entry: x,y = entry.point.split(' ') elif 'georss_point' in entry: x,y = entry.georss_point.split(' ') else: # Fall back on geocoding. text = item.title + ' ' + item.description try: x, y = quick_dirty_fallback_geocode(text, parse=True) except GeocodingException: logger.debug("Geocoding exception on %r:" % text) log_exception(level=logging.DEBUG) continue if None in (x, y): logger.debug("couldn't geocode '%s...'" % item.title[:30]) continue item.location = Point((float(y), float(x))) if item.location.x == 0.0 and item.location.y == 0.0: # There's a lot of these. Maybe attempt to # parse and geocode if we haven't already? logger.info("Skipping %r as it has bad location 0,0" % item.title) continue if not item.location_name: # Fall back to reverse-geocoding. from ebpub.geocoder import reverse try: block, distance = reverse.reverse_geocode(item.location) logger.debug(" Reverse-geocoded point to %r" % block.pretty_name) item.location_name = block.pretty_name except reverse.ReverseGeocodeError: logger.debug(" Failed to reverse geocode %s for %r" % (item.location.wkt, item.title)) item.location_name = u'' item.save() logger.info("Saved: %s" % item.title) except: logger.error("Warning: couldn't save %r. Traceback:" % item.title) log_exception() logger.info("Finished add_news")
| 479,935 |
def main(argv=None): logger.info("Starting add_news") if argv: url = argv[0] else: url = 'http://search.boston.com/search/api?q=*&sort=-articleprintpublicationdate&subject=massachusetts&scope=bonzai' schema_slug = 'local-news' try: schema = Schema.objects.get(slug=schema_slug) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema_slug sys.exit(1) f = feedparser.parse(url) for entry in f.entries: try: item = NewsItem.objects.get(title=entry.title, description=entry.description) print "Already have %r (id %d)" % (item.title, item.id) except NewsItem.DoesNotExist: item = NewsItem() try: item.schema = schema item.title = convert_entities(entry.title) item.description = convert_entities(entry.description) item.url = entry.link item.location_name = entry.get('x-calconnect-street') or entry.get('georss_featurename') item.item_date = datetime.datetime(*entry.updated_parsed[:6]) item.pub_date = datetime.datetime(*entry.updated_parsed[:6]) # feedparser bug: depending on which parser it magically uses, # we either get the xml namespace in the key name, or we don't. if 'point' in entry: x,y = entry.point.split(' ') elif 'georss_point' in entry: x,y = entry.georss_point.split(' ') else: # Fall back on geocoding. text = item.title + ' ' + item.description try: x, y = quick_dirty_fallback_geocode(text, parse=True) except GeocodingException: logger.debug("Geocoding exception on %r:" % text) log_exception(level=logging.DEBUG) continue if None in (x, y): logger.debug("couldn't geocode '%s...'" % item.title[:30]) continue item.location = Point((float(y), float(x))) if item.location.x == 0.0 and item.location.y == 0.0: # There's a lot of these. Maybe attempt to # parse and geocode if we haven't already? logger.info("Skipping %r as it has bad location 0,0" % item.title) continue if not item.location_name: # Fall back to reverse-geocoding. from ebpub.geocoder import reverse try: block, distance = reverse.reverse_geocode(item.location) logger.debug(" Reverse-geocoded point to %r" % block.pretty_name) item.location_name = block.pretty_name except reverse.ReverseGeocodeError: logger.debug(" Failed to reverse geocode %s for %r" % (item.location.wkt, item.title)) item.location_name = u'' item.save() logger.info("Saved: %s" % item.title) except: logger.error("Warning: couldn't save %r. Traceback:" % item.title) log_exception() logger.info("Finished add_news")
|
def main(argv=None): logger.info("Starting add_news") if argv: url = argv[0] else: url = 'http://search.boston.com/search/api?q=*&sort=-articleprintpublicationdate&subject=massachusetts&scope=bonzai' schema_slug = 'local-news' try: schema = Schema.objects.get(slug=schema_slug) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema_slug sys.exit(1) f = feedparser.parse(url) for entry in f.entries: try: item = NewsItem.objects.get(title=entry.title, description=entry.description) print "Already have %r (id %d)" % (item.title, item.id) except NewsItem.DoesNotExist: item = NewsItem() try: item.schema = schema item.title = convert_entities(entry.title) item.description = convert_entities(entry.description) item.url = entry.link item.location_name = entry.get('x-calconnect-street') or entry.get('georss_featurename') item.item_date = datetime.datetime(*entry.updated_parsed[:6]) item.pub_date = datetime.datetime(*entry.updated_parsed[:6]) # feedparser bug: depending on which parser it magically uses, # we either get the xml namespace in the key name, or we don't. point = entry.get('georss_point') or entry.get('point') if point: x, y = point.split(' ') else: # Fall back on geocoding. text = item.title + ' ' + item.description try: x, y = quick_dirty_fallback_geocode(text, parse=True) except GeocodingException: logger.debug("Geocoding exception on %r:" % text) log_exception(level=logging.DEBUG) continue if None in (x, y): logger.debug("couldn't geocode '%s...'" % item.title[:30]) continue item.location = Point((float(y), float(x))) if item.location.x == 0.0 and item.location.y == 0.0: # There's a lot of these. Maybe attempt to # parse and geocode if we haven't already? logger.info("Skipping %r as it has bad location 0,0" % item.title) continue if not item.location_name: # Fall back to reverse-geocoding. from ebpub.geocoder import reverse try: block, distance = reverse.reverse_geocode(item.location) logger.debug(" Reverse-geocoded point to %r" % block.pretty_name) item.location_name = block.pretty_name except reverse.ReverseGeocodeError: logger.debug(" Failed to reverse geocode %s for %r" % (item.location.wkt, item.title)) item.location_name = u'' item.save() logger.info("Saved: %s" % item.title) except: logger.error("Warning: couldn't save %r. Traceback:" % item.title) log_exception() logger.info("Finished add_news")
| 479,936 |
def auto(options): # determine the root of the virutal env options.env_root = os.path.abspath(os.environ.get('VIRTUAL_ENV', '.')) # XXX better test. if not os.path.exists(os.path.join(options.env_root, 'bin', 'paver')): print "It does not appear that your virutal environment is activated or that you are in its root." print "please activate your environment and try again." sys.exit(0) print "Using virtual env %s" % options.env_root
|
def auto(options): # determine the root of the virutal env options.env_root = os.path.abspath(os.environ.get('VIRTUAL_ENV', '.')) # XXX better test. if not os.path.exists(os.path.join(options.env_root, 'bin', 'paver')): print "It does not appear that your virutal environment is activated or that you are in its root." print "please activate your environment and try again." sys.exit(0) print "Using virtual env %s" % options.env_root
| 479,937 |
def get_place_info_for_request(request, *args, **kwargs): """ A utility function that abstracts getting commonly used location-related information: a place, its type, a queryset of intersecting NewsItems, a bbox, nearby locations, etc. """ info = dict(bbox=None, nearby_locations=[], location=None, is_block=False, block_radius=None, is_saved=False, pid='', #place_wkt = '', # Unused? cookies_to_set={}, ) saved_place_lookup={} newsitem_qs = kwargs.get('newsitem_qs') or NewsItem.objects.all() info['place'] = place = url_to_place(*args, **kwargs) nearby = Location.objects.filter(location_type__is_significant=True) nearby = nearby.select_related().exclude(id=place.id) nearby = nearby.order_by('location_type__id', 'name') if place.location is None: # No geometry. info['bbox'] = get_metro()['extent'] saved_place_lookup = {'location__id': place.id} info['newsitem_qs'] = newsitem_qs.filter( newsitemlocation__location__id=place.id) elif isinstance(place, Block): info['is_block'] = True xy_radius, block_radius, cookies_to_set = block_radius_value(request) search_buf = make_search_buffer(place.location.centroid, block_radius) info['nearby_locations'] = nearby.filter( location__bboverlaps=search_buf ) info['bbox'] = search_buf.extent saved_place_lookup = {'block__id': place.id} info['block_radius'] = block_radius info['cookies_to_set'] = cookies_to_set info['newsitem_qs'] = newsitem_qs.filter( location__bboverlaps=search_buf) info['pid'] = make_pid(place, block_radius) else: # If the location is a point, or very small, we want to expand # the area we care about via make_search_buffer(). But if # it's not, we probably want the extent of its geometry. # Let's just take the union to cover both cases. info['location'] = place saved_place_lookup = {'location__id': place.id} search_buf = make_search_buffer(place.location.centroid, 3) search_buf = search_buf.union(place.location) info['bbox'] = search_buf.extent nearby = nearby.filter(location__bboverlaps=search_buf) info['nearby_locations'] = nearby.exclude(id=place.id) info['newsitem_qs'] = newsitem_qs.filter( newsitemlocation__location__id=place.id) # TODO: place_wkt is unused? preserved from the old generic_place_page() #info['place_wkt'] = place.location.simplify(tolerance=0.001, # preserve_topology=True) info['pid'] = make_pid(place) # Determine whether this is a saved place. if not request.user.is_anonymous(): saved_place_lookup['user_id'] = request.user.id # TODO: request.user.id should not do a DB lookup info['is_saved'] = SavedPlace.objects.filter(**saved_place_lookup).count() return info
|
def get_place_info_for_request(request, *args, **kwargs): """ A utility function that abstracts getting commonly used location-related information: a place, its type, a queryset of intersecting NewsItems, a bbox, nearby locations, etc. """ info = dict(bbox=None, nearby_locations=[], location=None, is_block=False, block_radius=None, is_saved=False, pid='', #place_wkt = '', # Unused? cookies_to_set={}, ) saved_place_lookup={} newsitem_qs = kwargs.get('newsitem_qs') if newsitem_qs is None: newsitem_qs = NewsItem.objects.all() info['place'] = place = url_to_place(*args, **kwargs) nearby = Location.objects.filter(location_type__is_significant=True) nearby = nearby.select_related().exclude(id=place.id) nearby = nearby.order_by('location_type__id', 'name') if place.location is None: # No geometry. info['bbox'] = get_metro()['extent'] saved_place_lookup = {'location__id': place.id} info['newsitem_qs'] = newsitem_qs.filter( newsitemlocation__location__id=place.id) elif isinstance(place, Block): info['is_block'] = True xy_radius, block_radius, cookies_to_set = block_radius_value(request) search_buf = make_search_buffer(place.location.centroid, block_radius) info['nearby_locations'] = nearby.filter( location__bboverlaps=search_buf ) info['bbox'] = search_buf.extent saved_place_lookup = {'block__id': place.id} info['block_radius'] = block_radius info['cookies_to_set'] = cookies_to_set info['newsitem_qs'] = newsitem_qs.filter( location__bboverlaps=search_buf) info['pid'] = make_pid(place, block_radius) else: # If the location is a point, or very small, we want to expand # the area we care about via make_search_buffer(). But if # it's not, we probably want the extent of its geometry. # Let's just take the union to cover both cases. info['location'] = place saved_place_lookup = {'location__id': place.id} search_buf = make_search_buffer(place.location.centroid, 3) search_buf = search_buf.union(place.location) info['bbox'] = search_buf.extent nearby = nearby.filter(location__bboverlaps=search_buf) info['nearby_locations'] = nearby.exclude(id=place.id) info['newsitem_qs'] = newsitem_qs.filter( newsitemlocation__location__id=place.id) # TODO: place_wkt is unused? preserved from the old generic_place_page() #info['place_wkt'] = place.location.simplify(tolerance=0.001, # preserve_topology=True) info['pid'] = make_pid(place) # Determine whether this is a saved place. if not request.user.is_anonymous(): saved_place_lookup['user_id'] = request.user.id # TODO: request.user.id should not do a DB lookup info['is_saved'] = SavedPlace.objects.filter(**saved_place_lookup).count() return info
| 479,938 |
def test_make_pid__block__not_enough_args(self): b = self._makeBlock() self.assertRaises(TypeError, make_pid, b)
|
def test_make_pid__block__default_radius(self): b = self._makeBlock() self.assertRaises(TypeError, make_pid, b)
| 479,939 |
def test_make_pid__block__not_enough_args(self): b = self._makeBlock() self.assertRaises(TypeError, make_pid, b)
|
def test_make_pid__block__not_enough_args(self): b = self._makeBlock() self.assertRaises(TypeError, make_pid, b)
| 479,940 |
def get_query_set(self): return super(SchemaManager, self).get_query_set().filter(is_public=True)
|
def get_query_set(self): return super(SchemaManager, self).get_query_set().filter(is_public=True)
| 479,941 |
def url(self): return '/locations/%s/' % self.slug
|
def url(self): return '/locations/%s/' % self.slug
| 479,942 |
def parse_pid(pid): """ Returns a tuple of (place, block_radius, xy_radius), where block_radius and xy_radius are None for Locations. PID examples: 'b:12;1' (block ID 12, 1-block radius) 'l:32' (location ID 32) """ try: place_type, place_id = pid.split(':') if place_type == 'b': place_id, block_radius = place_id.split('.') place_id = int(place_id) except (KeyError, ValueError): raise Http404('Invalid place') if place_type == 'b': try: xy_radius = BLOCK_RADIUS_CHOICES[block_radius] except KeyError: raise Http404('Invalid radius') return (get_object_or_404(Block, id=place_id), block_radius, xy_radius) elif place_type == 'l': return (get_object_or_404(Location, id=place_id), None, None) else: raise Http404
|
def parse_pid(pid): """ Returns a tuple of (place, block_radius, xy_radius), where block_radius and xy_radius are None for Locations. PID examples: 'b:12.1' (block ID 12, 1-block radius) 'l:32' (location ID 32) """ try: place_type, place_id = pid.split(':') if place_type == 'b': place_id, block_radius = place_id.split('.') place_id = int(place_id) except (KeyError, ValueError): raise Http404('Invalid place') if place_type == 'b': try: xy_radius = BLOCK_RADIUS_CHOICES[block_radius] except KeyError: raise Http404('Invalid radius') return (get_object_or_404(Block, id=place_id), block_radius, xy_radius) elif place_type == 'l': return (get_object_or_404(Location, id=place_id), None, None) else: raise Http404
| 479,943 |
def ajax_place_newsitems(request): """ JSON -- expects request.GET['pid'] and request.GET['s'] (a schema ID). """ try: s = Schema.public_objects.get(id=int(request.GET['s'])) except (KeyError, ValueError, Schema.DoesNotExist): raise Http404('Invalid Schema') place, block_radius, xy_radius = parse_pid(request.GET.get('pid', '')) if isinstance(place, Block): search_buffer = make_search_buffer(place.location.centroid, block_radius) newsitem_qs = NewsItem.objects.filter(location__bboverlaps=search_buffer) else: newsitem_qs = NewsItem.objects.filter(newsitemlocation__location__id=place.id) # Make the JSON output. Note that we have to call dumps() twice because the # bunches are a special case. ni_list = list(newsitem_qs.filter(schema__id=s.id).order_by('-item_date')[:50]) bunches = simplejson.dumps(cluster_newsitems(ni_list, 26), cls=ClusterJSON) id_list = simplejson.dumps([ni.id for ni in ni_list]) return HttpResponse('{"bunches": %s, "ids": %s}' % (bunches, id_list), mimetype="application/javascript")
|
def ajax_place_newsitems(request): """ JSON -- expects request.GET['pid'] (a location ID) and request.GET['s'] (a schema ID). Returns a JSON mapping containing {'bunches': {scale: [list of clusters]}, 'ids': [list of newsitem ids]} where clusters are represented as [[list of newsitem IDs], [center x, y]] NB: the list of all newsitem IDs should be the union of all IDs in all the clusters. """ try: s = Schema.public_objects.get(id=int(request.GET['s'])) except (KeyError, ValueError, Schema.DoesNotExist): raise Http404('Invalid Schema') place, block_radius, xy_radius = parse_pid(request.GET.get('pid', '')) if isinstance(place, Block): search_buffer = make_search_buffer(place.location.centroid, block_radius) newsitem_qs = NewsItem.objects.filter(location__bboverlaps=search_buffer) else: newsitem_qs = NewsItem.objects.filter(newsitemlocation__location__id=place.id) # Make the JSON output. Note that we have to call dumps() twice because the # bunches are a special case. ni_list = list(newsitem_qs.filter(schema__id=s.id).order_by('-item_date')[:50]) bunches = simplejson.dumps(cluster_newsitems(ni_list, 26), cls=ClusterJSON) id_list = simplejson.dumps([ni.id for ni in ni_list]) return HttpResponse('{"bunches": %s, "ids": %s}' % (bunches, id_list), mimetype="application/javascript")
| 479,944 |
def ajax_place_newsitems(request): """ JSON -- expects request.GET['pid'] and request.GET['s'] (a schema ID). """ try: s = Schema.public_objects.get(id=int(request.GET['s'])) except (KeyError, ValueError, Schema.DoesNotExist): raise Http404('Invalid Schema') place, block_radius, xy_radius = parse_pid(request.GET.get('pid', '')) if isinstance(place, Block): search_buffer = make_search_buffer(place.location.centroid, block_radius) newsitem_qs = NewsItem.objects.filter(location__bboverlaps=search_buffer) else: newsitem_qs = NewsItem.objects.filter(newsitemlocation__location__id=place.id) # Make the JSON output. Note that we have to call dumps() twice because the # bunches are a special case. ni_list = list(newsitem_qs.filter(schema__id=s.id).order_by('-item_date')[:50]) bunches = simplejson.dumps(cluster_newsitems(ni_list, 26), cls=ClusterJSON) id_list = simplejson.dumps([ni.id for ni in ni_list]) return HttpResponse('{"bunches": %s, "ids": %s}' % (bunches, id_list), mimetype="application/javascript")
|
def ajax_place_newsitems(request): """ JSON -- expects request.GET['pid'] and request.GET['s'] (a schema ID). """ try: s = Schema.public_objects.get(id=int(request.GET['s'])) except (KeyError, ValueError, Schema.DoesNotExist): raise Http404('Invalid Schema') pid = request.GET.get('pid', '') place, block_radius, xy_radius = parse_pid(pid) if isinstance(place, Block): search_buffer = make_search_buffer(place.location.centroid, block_radius) newsitem_qs = NewsItem.objects.filter(location__bboverlaps=search_buffer) else: newsitem_qs = NewsItem.objects.filter(newsitemlocation__location__id=place.id) # Make the JSON output. Note that we have to call dumps() twice because the # bunches are a special case. ni_list = list(newsitem_qs.filter(schema__id=s.id).order_by('-item_date')[:50]) bunches = simplejson.dumps(cluster_newsitems(ni_list, 26), cls=ClusterJSON) id_list = simplejson.dumps([ni.id for ni in ni_list]) return HttpResponse('{"bunches": %s, "ids": %s}' % (bunches, id_list), mimetype="application/javascript")
| 479,945 |
def sync_all(options): """Use django-admin to initialize all our databases. """ settings_mod = "%s.settings" % options.app settings = get_app_settings(options) for dbname in settings.DATABASE_SYNC_ORDER: sh("django-admin.py syncdb --settings=%s --database=%s --noinput" % (settings_mod, dbname)) for dbname in settings.DATABASES.keys(): if dbname not in settings.DATABASE_SYNC_ORDER: sh("django-admin.py syncdb --settings=%s --database=%s --noinput" % (settings_mod, dbname)) # Need workaround here for # http://developer.openblockproject.org/ticket/74 because geometry # columns don't exist yet at the time that Django loads an app's # custom sql. Maybe just re-run the sqlcustom stuff and ignore # errors?
|
def sync_all(options): """Use django-admin to initialize all our databases. """ settings_mod = "%s.settings" % options.app settings = get_app_settings(options) for dbname in settings.DATABASE_SYNC_ORDER: sh("django-admin.py syncdb --settings=%s --database=%s --noinput" % (settings_mod, dbname)) sh("django-admin.py dbshell --settings=%s < ../../ebpub/ebpub/db/sql/location.sql" % settings_mod) for dbname in settings.DATABASES.keys(): if dbname not in settings.DATABASE_SYNC_ORDER: sh("django-admin.py syncdb --settings=%s --database=%s --noinput" % (settings_mod, dbname)) # Need workaround here for # http://developer.openblockproject.org/ticket/74 because geometry # columns don't exist yet at the time that Django loads an app's # custom sql. sh("django-admin.py dbshell --settings=%s < ../../ebpub/ebpub/db/sql/location.sql" % settings_mod) Maybe just re-run the sqlcustom stuff and ignore # errors? sh("django-admin.py dbshell --settings=%s < ../../ebpub/ebpub/db/sql/location.sql" % settings_mod)
| 479,946 |
def list_pages(self): yield self.get_html(self.url)
|
def list_pages(self): yield self.get_html(self.url)
| 479,947 |
def existing_record(self, list_record): unique_fields = self.unique_fields(list_record) qs = NewsItem.objects.filter(schema__id=self.schema.id, **unique_fields) try: return qs[0] except IndexError: return None
|
def existing_record(self, list_record): qs = NewsItem.objects.filter(schema__id=self.schema.id) qs = qs.by_attribute(self.schema_fields['guid'], list_record['id']) try: return qs[0] except IndexError: return None
| 479,948 |
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
|
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
| 479,949 |
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
|
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
| 479,950 |
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
|
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
| 479,951 |
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
|
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
| 479,952 |
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
|
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
| 479,953 |
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
|
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
| 479,954 |
def unique_fields(self, list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely idenfity # an article. import datetime date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u''
|
def unique_fields(self, list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely idenfity # an article. import datetime date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u''
| 479,955 |
def unique_fields(self, list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely idenfity # an article. import datetime date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u''
|
def unique_fields(self, list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely idenfity # an article. import datetime date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u''
| 479,956 |
def test_make_pid__block__not_enough_args(self): b = self._makeBlock() self.assertRaises(AssertionError, make_pid, b)
|
def test_make_pid__block__not_enough_args(self): b = self._makeBlock() self.assertRaises(AssertionError, make_pid, b)
| 479,957 |
def existing_record(self, record): # TODO return None
|
def existing_record(self, record): # TODO return None
| 479,958 |
def save(self, old_record, list_record, detail_record): summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' import datetime date = datetime.date(*list_record['updated_parsed'][:3]) title = list_record['title'] location = Point((float(list_record['geo_lat']), float(list_record['geo_long'])))
|
def save(self, old_record, list_record, detail_record): summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' import datetime date = datetime.date(*list_record['updated_parsed'][:3]) title = list_record['title'] location = Point((float(list_record['geo_lat']), float(list_record['geo_long'])))
| 479,959 |
def save(self, old_record, list_record, detail_record): summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' import datetime date = datetime.date(*list_record['updated_parsed'][:3]) title = list_record['title'] location = Point((float(list_record['geo_lat']), float(list_record['geo_long'])))
|
def save(self, old_record, list_record, detail_record): summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' import datetime date = datetime.date(*list_record['updated_parsed'][:3]) title = list_record['title'] location = Point((float(list_record['geo_lat']), float(list_record['geo_long'])))
| 479,960 |
def save(self, old_record, list_record, detail_record): summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' import datetime date = datetime.date(*list_record['updated_parsed'][:3]) title = list_record['title'] location = Point((float(list_record['geo_lat']), float(list_record['geo_long'])))
|
def save(self, old_record, list_record, detail_record): summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' import datetime date = datetime.date(*list_record['updated_parsed'][:3]) title = list_record['title'] location = Point((float(list_record['geo_lat']), float(list_record['geo_long'])))
| 479,961 |
def save(self, old_record, list_record, detail_record): summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' import datetime date = datetime.date(*list_record['updated_parsed'][:3]) title = list_record['title'] location = Point((float(list_record['geo_lat']), float(list_record['geo_long'])))
|
deffrom ebdata.retrieval import log_debug save(self,from ebdata.retrieval import log_debug old_record,from ebdata.retrieval import log_debug list_record,from ebdata.retrieval import log_debug detail_record):from ebdata.retrieval import log_debug summary_detailfrom ebdata.retrieval import log_debug =from ebdata.retrieval import log_debug list_record['summary_detail']['value']from ebdata.retrieval import log_debug contentfrom ebdata.retrieval import log_debug =from ebdata.retrieval import log_debug list_record['summary']from ebdata.retrieval import log_debug #from ebdata.retrieval import log_debug removefrom ebdata.retrieval import log_debug addressfrom ebdata.retrieval import log_debug andfrom ebdata.retrieval import log_debug ratingfrom ebdata.retrieval import log_debug fromfrom ebdata.retrieval import log_debug content,from ebdata.retrieval import log_debug ifrom ebdata.retrieval import log_debug guess.from ebdata.retrieval import log_debug contentfrom ebdata.retrieval import log_debug =from ebdata.retrieval import log_debug content.replace(summary_detail,from ebdata.retrieval import log_debug '')from ebdata.retrieval import log_debug importfrom ebdata.retrieval import log_debug refrom ebdata.retrieval import log_debug address_refrom ebdata.retrieval import log_debug =from ebdata.retrieval import log_debug re.compile(r'Address:from ebdata.retrieval import log_debug (.*?)<brfrom ebdata.retrieval import log_debug />')from ebdata.retrieval import log_debug addrfrom ebdata.retrieval import log_debug =from ebdata.retrieval import log_debug address_re.search(summary_detail)from ebdata.retrieval import log_debug iffrom ebdata.retrieval import log_debug addr:from ebdata.retrieval import log_debug addrfrom ebdata.retrieval import log_debug =from ebdata.retrieval import log_debug addr.group(1)from ebdata.retrieval import log_debug location_namefrom ebdata.retrieval import log_debug =from ebdata.retrieval import log_debug ',from ebdata.retrieval import log_debug '.join([part.strip()from ebdata.retrieval import log_debug forfrom ebdata.retrieval import log_debug partfrom ebdata.retrieval import log_debug infrom ebdata.retrieval import log_debug addr.split(',')])from ebdata.retrieval import log_debug else:from ebdata.retrieval import log_debug location_namefrom ebdata.retrieval import log_debug =from ebdata.retrieval import log_debug u''from ebdata.retrieval import log_debug importfrom ebdata.retrieval import log_debug datetimefrom ebdata.retrieval import log_debug datefrom ebdata.retrieval import log_debug =from ebdata.retrieval import log_debug datetime.date(*list_record['updated_parsed'][:3])from ebdata.retrieval import log_debug titlefrom ebdata.retrieval import log_debug =from ebdata.retrieval import log_debug list_record['title']from ebdata.retrieval import log_debug locationfrom ebdata.retrieval import log_debug =from ebdata.retrieval import log_debug Point((float(list_record['geo_lat']),from ebdata.retrieval import log_debug float(list_record['geo_long'])))
| 479,962 |
def item_date_url(self): return '/%s/by-date/%s/%s/%s/' % (self.schema.slug, self.item_date.year, self.item_date.month, self.item_date.day)
|
def item_date_url(self): return '/%s/by-date/%s/%s/%s/' % (self.schema.slug, self.item_date.year, self.item_date.month, self.item_date.day)
| 479,963 |
def preprocess_to_string(*args, **kw): """ like make_tree_and_preprocess() but returns a string. """ tree = make_tree_and_preprocess(*args, **kw) return tree.body.text.strip()
|
def preprocess_to_string(*args, **kw): """ like make_tree_and_preprocess() but returns a string. """ tree = make_tree_and_preprocess(*args, **kw) if tree.body.text: return tree.body.text.strip() else: return u''
| 479,964 |
def __unicode__(self): return u'User %s: %u' % (self.user_id, self.name())
|
def __unicode__(self): return u'User %s: %u' % (self.user_id, self.name())
| 479,965 |
def test_address_dir_northwest(self): # There was a typo in the regex for this, but mysteriously it still worked self.assertParses('123 Northwest Main St.', [('123 Northwest Main St.', '')])
|
def test_address_dir_northwest(self): # There was a typo in the regex for this, but mysteriously it still worked self.assertParses('123 Northwest Main St.', [('123 Northwest Main St.', '')])
| 479,966 |
def geo_example(request): import feedparser from ebdata.nlp.addresses import parse_addresses from ebpub.geocoder.base import AddressGeocoder feed_url = 'http://www.bpdnews.com/index.xml' feed = feedparser.parse(feed_url) geocoder = AddressGeocoder() geo_entries = [] for entry in feed.entries: addresses = parse_addresses(entry.description) point = None while not point: for address in addresses: try: location = geocoder.geocode(address[0]) point = location['point'] break except Exception: pass if not point: point = -1 if point and point is not -1: entry['point'] = point geo_entries.append(entry) print len(geo_entries) return render_to_response('db/geo_example.html', {'entries': geo_entries })
|
def geo_example(request): import feedparser from ebdata.nlp.addresses import parse_addresses from ebpub.geocoder.base import AddressGeocoder feed_url = 'http://www.bpdnews.com/index.xml' feed = feedparser.parse(feed_url) geocoder = AddressGeocoder() geo_entries = [] for entry in feed.entries: addresses = parse_addresses(entry.description) point = None while not point: for address in addresses: try: location = geocoder.geocode(address[0]) point = location['point'] break except Exception: pass if not point: point = -1 if point and point is not -1: entry['point'] = point geo_entries.append(entry) return render_to_response('db/geo_example.html', {'entries': geo_entries })
| 479,967 |
def list_pages(self): # Fetch the feed, paginating if necessary. # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues max_per_page = 200 max_pages = 10
|
def list_pages(self): # Fetch the feed, paginating if necessary. # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues max_per_page = 200 max_pages = 10
| 479,968 |
def save(self, old_record, list_record, detail_record): attributes = detail_record.pop('attributes', None) self.create_or_update(old_record, attributes, **detail_record)
|
def save(self, old_record, list_record, detail_record): attributes = detail_record.pop('attributes', None) self.create_or_update(old_record, attributes, **detail_record)
| 479,969 |
def get_unique_fields(list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely identify # an article. # TODO: 'id' is all we need for uniqueness, but what i'm doing # here is really cleaning? date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' return dict(id=list_record['id'], item_date=date, location_name=location_name, title=list_record['title'], )
|
def get_unique_fields(list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely identify # an article. # TODO: 'id' is all we need for uniqueness, but what i'm doing # here is really cleaning? date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' return dict(id=list_record['id'], item_date=date, location_name=location_name, title=list_record['title'], )
| 479,970 |
def get_unique_fields(list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely identify # an article. # TODO: 'id' is all we need for uniqueness, but what i'm doing # here is really cleaning? date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' return dict(id=list_record['id'], item_date=date, location_name=location_name, title=list_record['title'], )
|
def get_unique_fields(list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely identify # an article. # TODO: 'id' is all we need for uniqueness, but what i'm doing # here is really cleaning? date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' return dict(id=list_record['id'], item_date=date, location_name=location_name, title=list_record['title'], )
| 479,971 |
def get_unique_fields(list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely identify # an article. # TODO: 'id' is all we need for uniqueness, but what i'm doing # here is really cleaning? date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' return dict(id=list_record['id'], item_date=date, location_name=location_name, title=list_record['title'], )
|
def get_unique_fields(list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely identify # an article. # TODO: 'id' is all we need for uniqueness, but what i'm doing # here is really cleaning? date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' return dict(id=list_record['id'], item_date=date, location_name=location_name, title=list_record['title'], )
| 479,972 |
def list_pages(self): # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues # paginate if necessary. max_per_page = 1000 max_pages = 10 # First, figure out how long it's been since the last scrape; # seeclickfix has a 'start' option in hours. # We'll discard microseconds and round up. # The idea is not to be precise, but to get everything we haven't # seen yet and not much that we have seen. delta = datetime.datetime.now() - self.last_updated_time() hours_ago = math.ceil((delta.seconds / 3600.0) + (delta.days * 24)) for page in range(1, max_pages + 1): url = LIST_URL + '&start=%d&page=%d&num_results=%d' % ( hours_ago, page, max_per_page) yield self.get_html(url)
|
def list_pages(self): # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues # paginate if necessary.max_per_page = 200 max_pages = 10 # First, figure out how long it's been since the last scrape; # seeclickfix has a 'start' option in hours. # We'll discard microseconds and round up. # The idea is not to be precise, but to get everything we haven't # seen yet and not much that we have seen. delta = datetime.datetime.now() - self.last_updated_time() hours_ago = math.ceil((delta.seconds / 3600.0) + (delta.days * 24)) for page in range(1, max_pages + 1): url = LIST_URL + '&start=%d&page=%d&num_results=%d' % ( hours_ago, page, max_per_page) yield self.get_html(url)
| 479,973 |
def list_pages(self): # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues # paginate if necessary. max_per_page = 1000 max_pages = 10 # First, figure out how long it's been since the last scrape; # seeclickfix has a 'start' option in hours. # We'll discard microseconds and round up. # The idea is not to be precise, but to get everything we haven't # seen yet and not much that we have seen. delta = datetime.datetime.now() - self.last_updated_time() hours_ago = math.ceil((delta.seconds / 3600.0) + (delta.days * 24)) for page in range(1, max_pages + 1): url = LIST_URL + '&start=%d&page=%d&num_results=%d' % ( hours_ago, page, max_per_page) yield self.get_html(url)
|
def list_pages(self): # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues # paginate if necessary. max_per_page = 1000 max_pages = 10 # First, figure out how long it's been since the last scrape; # seeclickfix has a 'start' option in hours. # We'll discard microseconds and round up. # The idea is not to be precise, but to get everything we haven't # seen yet and not much that we have seen. delta = datetime.datetime.now() - self.last_updated_time() hours_ago = math.ceil((delta.seconds / 3600.0) + (delta.days * 24)) for page in range(1, max_pages + 1): url = LIST_URL + '&start=%d&page=%d&num_results=%d' % ( hours_ago, page, max_per_page) yield self.get_html(url)
| 479,974 |
def list_pages(self): # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues # paginate if necessary. max_per_page = 1000 max_pages = 10 # First, figure out how long it's been since the last scrape; # seeclickfix has a 'start' option in hours. # We'll discard microseconds and round up. # The idea is not to be precise, but to get everything we haven't # seen yet and not much that we have seen. delta = datetime.datetime.now() - self.last_updated_time() hours_ago = math.ceil((delta.seconds / 3600.0) + (delta.days * 24)) for page in range(1, max_pages + 1): url = LIST_URL + '&start=%d&page=%d&num_results=%d' % ( hours_ago, page, max_per_page) yield self.get_html(url)
|
def list_pages(self): # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues # paginate if necessary. max_per_page = 1000 max_pages = 10 # First, figure out how long it's been since the last scrape; # seeclickfix has a 'start' option in hours. # We'll discard microseconds and round up. # The idea is not to be precise, but to get everything we haven't # seen yet and not much that we have seen. delta = datetime.datetime.now() - self.last_updated_time() hours_ago = math.ceil((delta.seconds / 3600.0) + (delta.days * 24)) for page in range(1, max_pages + 1): feed_url = FEED_URL + '&start=%d&page=%d&num_results=%d' % ( hours_ago, page, max_per_page) yield self.get_html(url)
| 479,975 |
def list_pages(self): # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues # paginate if necessary. max_per_page = 1000 max_pages = 10 # First, figure out how long it's been since the last scrape; # seeclickfix has a 'start' option in hours. # We'll discard microseconds and round up. # The idea is not to be precise, but to get everything we haven't # seen yet and not much that we have seen. delta = datetime.datetime.now() - self.last_updated_time() hours_ago = math.ceil((delta.seconds / 3600.0) + (delta.days * 24)) for page in range(1, max_pages + 1): url = LIST_URL + '&start=%d&page=%d&num_results=%d' % ( hours_ago, page, max_per_page) yield self.get_html(url)
|
def list_pages(self): # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues # paginate if necessary. max_per_page = 1000 max_pages = 10 # First, figure out how long it's been since the last scrape; # seeclickfix has a 'start' option in hours. # We'll discard microseconds and round up. # The idea is not to be precise, but to get everything we haven't # seen yet and not much that we have seen. delta = datetime.datetime.now() - self.last_updated_time() hours_ago = math.ceil((delta.seconds / 3600.0) + (delta.days * 24)) for page in range(1, max_pages + 1): url = LIST_URL + '&start=%d&page=%d&num_results=%d' % ( hours_ago, page, max_per_page) yield self.get_html(url)
| 479,976 |
def existing_record(self, list_record): qs = NewsItem.objects.filter(schema__id=self.schema.id) qs = qs.by_attribute(self.schema_fields['guid'], list_record['id']) try: return qs[0] except IndexError: return None
|
def existing_record(self, cleaned_list_record): url = cleaned_list_record['id'].replace('https:', 'http:') qs = NewsItem.objects.filter(schema__id=self.schema.id, url=url) try: return qs[0] except IndexError: return None
| 479,977 |
def save(self, old_record, list_record, detail_record): if old_record is not None: self.logger.info("Stopping, we've already seen %s" % old_record) raise StopScraping()
|
def save(self, old_record, list_record, detail_record): if old_record is not None: self.logger.info("Stopping, we've already seen %s" % old_record) raise StopScraping()
| 479,978 |
def save(self, old_record, list_record, detail_record): if old_record is not None: self.logger.info("Stopping, we've already seen %s" % old_record) raise StopScraping()
|
def save(self, old_record, list_record, detail_record): if old_record is not None: self.logger.info("Stopping, we've already seen %s" % old_record) raise StopScraping()
| 479,979 |
def save(self, old_record, list_record, detail_record): if old_record is not None: self.logger.info("Stopping, we've already seen %s" % old_record) raise StopScraping()
|
def save(self, old_record, list_record, detail_record): if old_record is not None: self.logger.info("Stopping, we've already seen %s" % old_record) raise StopScraping()
| 479,980 |
def testBoatWindVersusWeather(self): settings = Settings() weather = Weather() weather.load(settings) wind = Wind(weather)
|
def testBoatWindVersusWeather(self): settings = Settings() weather = Weather() weather.load(settings) wind = Wind(weather)
| 479,981 |
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
|
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
| 479,982 |
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
|
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
| 479,983 |
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
|
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
| 479,984 |
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
|
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
| 479,985 |
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
|
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
| 479,986 |
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
|
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
| 479,987 |
def prevent_beaching(self, heading, look_ahead = None): if look_ahead == None: look_ahead = 250 # We'll construct a future course line... boat_position = self.boat.position # ... project it ahead... sail_vector = PolarVector(heading, look_ahead) future_position = boat_position + sail_vector sail_line = (self.boat.position, sail_vector, future_position) # Check if the projected line hits land... if self.map.hit(sail_line): # ... and if so, tack or gybe away from it wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) self.__log("Tacked/gybed to avoid hitting land") return True, normalize_angle_2pi(heading + 2 * wind_angle)
|
def prevent_beaching(self, heading, look_ahead = None): if look_ahead == None: look_ahead = 250 # We'll construct a future course line... boat_position = self.boat.position # ... project it ahead... sail_vector = PolarVector(heading, look_ahead) future_position = boat_position + sail_vector sail_line = (self.boat.position, sail_vector, future_position) # Check if the projected line hits land... if self.map.hit(sail_line): # ... and if so, tack or gybe away from it wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) self.__log("Tacked/gybed to avoid hitting land") return True, normalize_angle_2pi(heading + 2 * wind_angle)
| 479,988 |
def prevent_beaching(self, heading, look_ahead = None): if look_ahead == None: look_ahead = 250 # We'll construct a future course line... boat_position = self.boat.position # ... project it ahead... sail_vector = PolarVector(heading, look_ahead) future_position = boat_position + sail_vector sail_line = (self.boat.position, sail_vector, future_position) # Check if the projected line hits land... if self.map.hit(sail_line): # ... and if so, tack or gybe away from it wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) self.__log("Tacked/gybed to avoid hitting land") return True, normalize_angle_2pi(heading + 2 * wind_angle)
|
def prevent_beaching(self, heading, look_ahead = None): if look_ahead == None: look_ahead = 250 # We'll construct a future course line... boat_position = self.boat.position # ... project it ahead... sail_vector = PolarVector(heading, look_ahead) future_position = boat_position + sail_vector sail_line = (self.boat.position, sail_vector, future_position) # Check if the projected line hits land... if self.map.hit(sail_line): # ... and if so, tack or gybe away from it wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) self.__log("Tacked/gybed to avoid hitting land") return True, normalize_angle_2pi(heading + 2 * wind_angle)
| 479,989 |
def readmeta(item,meta): for elem in meta: if elem.tag.find(dc): if elem.tag == '{'+dc+'}date': try: item.info.date = datetime.strptime(elem.text,"%Y-%m-%dT%H:%M:%S.%f") except ValueError, e: if str(e) == "'f' is a bad directive in format '%Y-%m-%dT%H:%M:%S.%f'": # Python 2.5 item.info.date = datetime.strptime(elem.text.split('.')[0],"%Y-%m-%dT%H:%M:%S") else: remain = str(e)[26:] if remain == 'Z': item.info.date = datetime.strptime(elem.text[:-len(remain)],"%Y-%m-%dT%H:%M:%S.%f") else: date = datetime.strptime(elem.text[:-len(remain)],"%Y-%m-%dT%H:%M:%S.%f") delta = remain.split(':') item.info.date = date - timedelta(hours=int(delta[0]),minutes=int(delta[1])) elif elem.tag == '{'+dc+'}type': if elem.attrib['{'+rdf+'}resource'] != 'http://purl.org/dc/dcmitype/Dataset': raise FileFormatError elif elem.tag == '{'+dc+'}format': if elem.text != 'application/swatchbook': raise FileFormatError elif '{'+xml+'}lang' in elem.attrib: exec("item.info."+elem.tag[(len(dc)+2):]+"_l10n[elem.attrib['{'+xml+'}lang']] = xmlunescape(elem.text)") else: exec("item.info."+elem.tag[(len(dc)+2):]+" = xmlunescape(elem.text)") elif elem.tag == '{'+dcterms+'}license' item.info.license = xmlunescape(elem.attrib['{'+rdf+'}resource'])
|
def readmeta(item,meta): for elem in meta: if elem.tag.find(dc): if elem.tag == '{'+dc+'}date': try: item.info.date = datetime.strptime(elem.text,"%Y-%m-%dT%H:%M:%S.%f") except ValueError, e: if str(e) == "'f' is a bad directive in format '%Y-%m-%dT%H:%M:%S.%f'": # Python 2.5 item.info.date = datetime.strptime(elem.text.split('.')[0],"%Y-%m-%dT%H:%M:%S") else: remain = str(e)[26:] if remain == 'Z': item.info.date = datetime.strptime(elem.text[:-len(remain)],"%Y-%m-%dT%H:%M:%S.%f") else: date = datetime.strptime(elem.text[:-len(remain)],"%Y-%m-%dT%H:%M:%S.%f") delta = remain.split(':') item.info.date = date - timedelta(hours=int(delta[0]),minutes=int(delta[1])) elif elem.tag == '{'+dc+'}type': if elem.attrib['{'+rdf+'}resource'] != 'http://purl.org/dc/dcmitype/Dataset': raise FileFormatError elif elem.tag == '{'+dc+'}format': if elem.text != 'application/swatchbook': raise FileFormatError elif '{'+xml+'}lang' in elem.attrib: exec("item.info."+elem.tag[(len(dc)+2):]+"_l10n[elem.attrib['{'+xml+'}lang']] = xmlunescape(elem.text)") else: exec("item.info."+elem.tag[(len(dc)+2):]+" = xmlunescape(elem.text)") elif elem.tag == '{'+dcterms+'}license': item.info.license = xmlunescape(elem.attrib['{'+rdf+'}resource'])
| 479,990 |
def write(swatchbook): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.7"\n xmlns:dc="http://purl.org/dc/elements/1.1/"\n xmlns:cc="http://creativecommons.org/ns#"\n xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">\n' xml += sbz.writemeta(swatchbook.info) xml += ' <swatches>\n' for id in swatchbook.swatches: if isinstance(swatchbook.swatches[id], Color): swatch = swatchbook.swatches[id] xml += ' <color' if 'spot' in swatch.usage: xml += ' spot="1"' xml += '>\n' xml += sbz.writemeta(swatch.info,2) for value in swatch.values: xml += ' <values model="'+value[0]+'"' if value[1]: xml += ' space="'+value[1]+'"' xml += '>'+' '.join(str(round(x,16)) for x in swatch.values[value])+'</values>\n' for extra in swatch.extra: xml += ' <extra type="'+xmlescape(extra)+'">' if swatch.extra[extra]: xml += xmlescape(unicode(swatch.extra[extra])) xml += '</extra>\n' xml += ' </color>\n' xml += ' </swatches>\n' if len(swatchbook.book.items) > 0: xml += ' <book' for display in swatchbook.book.display: if swatchbook.book.display[display]: xml += ' '+display+'="'+str(swatchbook.book.display[display])+'"' xml += '>\n' xml += unicode(sbz.writem(swatchbook.book.items),'utf-8') xml += ' </book>\n' xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in swatchbook.profiles: zip.write(swatchbook.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
|
def write(swatchbook): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.7"\n xmlns:dc="http://purl.org/dc/elements/1.1/"\n xmlns:cc="http://creativecommons.org/ns#"\n xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">\n' xml += sbz.writemeta(swatchbook.info) xml += ' <swatches>\n' for id in swatchbook.swatches: if isinstance(swatchbook.swatches[id], Color): swatch = swatchbook.swatches[id] xml += ' <color' if 'spot' in swatch.usage: xml += ' spot="1"' xml += '>\n' xml += sbz.writemeta(swatch.info,2) for value in swatch.values: xml += ' <values model="'+value[0]+'"' if value[1]: xml += ' space="'+value[1]+'"' xml += '>'+' '.join(str(round(x,16)) for x in swatch.values[value])+'</values>\n' for extra in swatch.extra: xml += ' <extra type="'+xmlescape(extra)+'">' if swatch.extra[extra]: xml += xmlescape(unicode(swatch.extra[extra])) xml += '</extra>\n' xml += ' </color>\n' xml += ' </swatches>\n' if len(swatchbook.book.items) > 0: xml += ' <book' for display in swatchbook.book.display: if swatchbook.book.display[display]: xml += ' '+display+'="'+str(swatchbook.book.display[display])+'"' xml += '>\n' xml += unicode(sbz.writem(swatchbook.book.items),'utf-8') xml += ' </book>\n' xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in swatchbook.profiles: zip.write(swatchbook.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
| 479,991 |
def write(book): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.2">\n' for info in book.info: if isinstance(book.info[info],dict): for lang in book.info[info]: if lang == 0: xml += ' <info type="'+info+'">'+xmlescape(book.info[info][0])+'</info>\n' else: xml += ' <info type="'+info+'" lang="'+lang+'">'+xmlescape(book.info[info][lang])+'</info>\n' else: xml += ' <info type="'+info+'">'+xmlescape(book.info[info])+'</info>\n' for display in book.display: xml += ' <display type="'+display+'">'+str(book.display[display])+'</display>\n' xml += unicode(sbz.writem(book.items,0),'utf-8') xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in book.profiles: #TODO: check if exists zip.write(book.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
|
def write(book): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.2">\n' for info in book.info: if isinstance(book.info[info],dict): for lang in book.info[info]: if book.info[info][lang]: if lang == 0: xml += ' <info type="'+info+'">'+xmlescape(book.info[info][0])+'</info>\n' else: xml += ' <info type="'+info+'" lang="'+lang+'">'+xmlescape(book.info[info][lang])+'</info>\n' else: xml += ' <info type="'+info+'">'+xmlescape(book.info[info])+'</info>\n' for display in book.display: xml += ' <display type="'+display+'">'+str(book.display[display])+'</display>\n' xml += unicode(sbz.writem(book.items,0),'utf-8') xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in book.profiles: #TODO: check if exists zip.write(book.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
| 479,992 |
def write(book): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.2">\n' for info in book.info: if isinstance(book.info[info],dict): for lang in book.info[info]: if lang == 0: xml += ' <info type="'+info+'">'+xmlescape(book.info[info][0])+'</info>\n' else: xml += ' <info type="'+info+'" lang="'+lang+'">'+xmlescape(book.info[info][lang])+'</info>\n' else: xml += ' <info type="'+info+'">'+xmlescape(book.info[info])+'</info>\n' for display in book.display: xml += ' <display type="'+display+'">'+str(book.display[display])+'</display>\n' xml += unicode(sbz.writem(book.items,0),'utf-8') xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in book.profiles: #TODO: check if exists zip.write(book.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
|
def write(book): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.2">\n' for info in book.info: if isinstance(book.info[info],dict): for lang in book.info[info]: if lang == 0: xml += ' <info type="'+info+'">'+xmlescape(book.info[info][0])+'</info>\n' else: xml += ' <info type="'+info+'" lang="'+lang+'">'+xmlescape(book.info[info][lang])+'</info>\n' else: if book.info[info]: xml += ' <info type="'+info+'">'+xmlescape(book.info[info])+'</info>\n' for display in book.display: xml += ' <display type="'+display+'">'+str(book.display[display])+'</display>\n' xml += unicode(sbz.writem(book.items,0),'utf-8') xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in book.profiles: #TODO: check if exists zip.write(book.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
| 479,993 |
def write(book): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.2">\n' for info in book.info: if isinstance(book.info[info],dict): for lang in book.info[info]: if lang == 0: xml += ' <info type="'+info+'">'+xmlescape(book.info[info][0])+'</info>\n' else: xml += ' <info type="'+info+'" lang="'+lang+'">'+xmlescape(book.info[info][lang])+'</info>\n' else: xml += ' <info type="'+info+'">'+xmlescape(book.info[info])+'</info>\n' for display in book.display: xml += ' <display type="'+display+'">'+str(book.display[display])+'</display>\n' xml += unicode(sbz.writem(book.items,0),'utf-8') xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in book.profiles: #TODO: check if exists zip.write(book.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
|
def write(book): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.2">\n' for info in book.info: if isinstance(book.info[info],dict): for lang in book.info[info]: if lang == 0: xml += ' <info type="'+info+'">'+xmlescape(book.info[info][0])+'</info>\n' else: xml += ' <info type="'+info+'" lang="'+lang+'">'+xmlescape(book.info[info][lang])+'</info>\n' else: xml += ' <info type="'+info+'">'+xmlescape(book.info[info])+'</info>\n' for display in book.display: if book.display[display]: xml += ' <display type="'+display+'">'+str(book.display[display])+'</display>\n' xml += unicode(sbz.writem(book.items,0),'utf-8') xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in book.profiles: #TODO: check if exists zip.write(book.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
| 479,994 |
def readmeta(item,meta): for elem in meta: if elem.tag.find(dc): if elem.tag == '{'+dc+'}date': try: item.info.date = datetime.strptime(elem.text,"%Y-%m-%dT%H:%M:%S.%f") except ValueError, e: if str(e) == "'f' is a bad directive in format '%Y-%m-%dT%H:%M:%S.%f'": # Python 2.5 item.info.date = datetime.strptime(elem.text.split('.')[0],"%Y-%m-%dT%H:%M:%S") else: remain = str(e)[26:] if remain == 'Z': item.info.date = datetime.strptime(elem.text[:-len(remain)],"%Y-%m-%dT%H:%M:%S.%f") else: date = datetime.strptime(elem.text[:-len(remain)],"%Y-%m-%dT%H:%M:%S.%f") delta = remain.split(':') item.info.date = date - timedelta(hours=int(delta[0]),minutes=int(delta[1])) elif elem.tag == '{'+dc+'}type': if elem.attrib['{'+rdf+'}resource'] != 'http://purl.org/dc/dcmitype/Dataset': raise FileFormatError elif elem.tag == '{'+dc+'}format': if elem.text != 'application/swatchbook': raise FileFormatError elif '{'+xml+'}lang' in elem.attrib: exec("item.info."+elem.tag[(len(dc)+2):]+"_l10n[elem.attrib['{'+xml+'}lang']] = xmlunescape(elem.text)") else: exec("item.info."+elem.tag[(len(dc)+2):]+" = xmlunescape(elem.text)") elif elem.tag.find(cc): exec("item.info."+elem.tag[(len(cc)+2):]+" = xmlunescape(elem.text)")
|
def readmeta(item,meta): for elem in meta: if elem.tag.find(dc): if elem.tag == '{'+dc+'}date': try: item.info.date = datetime.strptime(elem.text,"%Y-%m-%dT%H:%M:%S.%f") except ValueError, e: if str(e) == "'f' is a bad directive in format '%Y-%m-%dT%H:%M:%S.%f'": # Python 2.5 item.info.date = datetime.strptime(elem.text.split('.')[0],"%Y-%m-%dT%H:%M:%S") else: remain = str(e)[26:] if remain == 'Z': item.info.date = datetime.strptime(elem.text[:-len(remain)],"%Y-%m-%dT%H:%M:%S.%f") else: date = datetime.strptime(elem.text[:-len(remain)],"%Y-%m-%dT%H:%M:%S.%f") delta = remain.split(':') item.info.date = date - timedelta(hours=int(delta[0]),minutes=int(delta[1])) elif elem.tag == '{'+dc+'}type': if elem.attrib['{'+rdf+'}resource'] != 'http://purl.org/dc/dcmitype/Dataset': raise FileFormatError elif elem.tag == '{'+dc+'}format': if elem.text != 'application/swatchbook': raise FileFormatError elif '{'+xml+'}lang' in elem.attrib: exec("item.info."+elem.tag[(len(dc)+2):]+"_l10n[elem.attrib['{'+xml+'}lang']] = xmlunescape(elem.text)") else: exec("item.info."+elem.tag[(len(dc)+2):]+" = xmlunescape(elem.text)") elif elem.tag == '{'+dcterms+'}license' item.info.license = xmlunescape(elem.attrib['{'+rdf+'}resource'])
| 479,995 |
def write(swatchbook): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.7"\n xmlns:dc="'+dc+'"\n xmlns:cc="'+cc+'"\n xmlns:rdf="'+rdf+'">\n' xml += sbz.writemeta(swatchbook.info) xml += ' <swatches>\n' for id in swatchbook.swatches: if isinstance(swatchbook.swatches[id], Color): swatch = swatchbook.swatches[id] xml += ' <color' if 'spot' in swatch.usage: xml += ' spot="1"' xml += '>\n' xml += sbz.writemeta(swatch.info,2) for value in swatch.values: xml += ' <values model="'+value[0]+'"' if value[1]: xml += ' space="'+value[1]+'"' xml += '>'+' '.join(str(round(x,16)) for x in swatch.values[value])+'</values>\n' for extra in swatch.extra: xml += ' <extra type="'+xmlescape(extra)+'">' if swatch.extra[extra]: xml += xmlescape(unicode(swatch.extra[extra])) xml += '</extra>\n' xml += ' </color>\n' xml += ' </swatches>\n' if len(swatchbook.book.items) > 0: xml += ' <book' for display in swatchbook.book.display: if swatchbook.book.display[display]: xml += ' '+display+'="'+str(swatchbook.book.display[display])+'"' xml += '>\n' xml += unicode(sbz.writem(swatchbook.book.items),'utf-8') xml += ' </book>\n' xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in swatchbook.profiles: zip.write(swatchbook.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
|
def write(swatchbook): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.7"\n xmlns:dc="'+dc+'"\n xmlns:dcterms="'+dcterms+'"\n xmlns:rdf="'+rdf+'">\n' xml += sbz.writemeta(swatchbook.info) xml += ' <swatches>\n' for id in swatchbook.swatches: if isinstance(swatchbook.swatches[id], Color): swatch = swatchbook.swatches[id] xml += ' <color' if 'spot' in swatch.usage: xml += ' spot="1"' xml += '>\n' xml += sbz.writemeta(swatch.info,2) for value in swatch.values: xml += ' <values model="'+value[0]+'"' if value[1]: xml += ' space="'+value[1]+'"' xml += '>'+' '.join(str(round(x,16)) for x in swatch.values[value])+'</values>\n' for extra in swatch.extra: xml += ' <extra type="'+xmlescape(extra)+'">' if swatch.extra[extra]: xml += xmlescape(unicode(swatch.extra[extra])) xml += '</extra>\n' xml += ' </color>\n' xml += ' </swatches>\n' if len(swatchbook.book.items) > 0: xml += ' <book' for display in swatchbook.book.display: if swatchbook.book.display[display]: xml += ' '+display+'="'+str(swatchbook.book.display[display])+'"' xml += '>\n' xml += unicode(sbz.writem(swatchbook.book.items),'utf-8') xml += ' </book>\n' xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in swatchbook.profiles: zip.write(swatchbook.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
| 479,996 |
def writemeta(meta,offset=0): xml = u'' if offset == 0: xml += ' <dc:format>application/swatchbook</dc:format>\n <dc:type rdf:resource="http://purl.org/dc/dcmitype/Dataset" />\n' if meta.date: xml += ' '*(offset+2)+'<dc:date>'+meta.date.isoformat()+'Z</dc:date>\n' for dc in meta.dc: info = eval('meta.'+dc) if len(info) > 0: xml += ' '*(offset+2)+'<dc:'+dc+'>'+xmlescape(info)+'</dc:'+dc+'>\n' if meta.dc[dc][0]: info_l10n = eval('meta.'+dc+'_l10n') for lang in info_l10n: xml += ' '*(offset+2)+'<dc:'+dc+' xml:lang="'+lang+'">'+xmlescape(info_l10n[lang])+'</dc:'+dc+'>\n' if meta.license > '': xml += ' '*(offset+2)+'<cc:license rdf:resource="'+xmlescape(meta.license)+'" />\n' if xml > u'': return ' '*(offset+1)+'<metadata>\n'+xml+' '*(offset+1)+'</metadata>\n' else: return u''
|
def writemeta(meta,offset=0): xml = u'' if offset == 0: xml += ' <dc:format>application/swatchbook</dc:format>\n <dc:type rdf:resource="http://purl.org/dc/dcmitype/Dataset" />\n' if meta.date: xml += ' '*(offset+2)+'<dc:date>'+meta.date.isoformat()+'Z</dc:date>\n' for dc in meta.dc: info = eval('meta.'+dc) if len(info) > 0: xml += ' '*(offset+2)+'<dc:'+dc+'>'+xmlescape(info)+'</dc:'+dc+'>\n' if meta.dc[dc][0]: info_l10n = eval('meta.'+dc+'_l10n') for lang in info_l10n: xml += ' '*(offset+2)+'<dc:'+dc+' xml:lang="'+lang+'">'+xmlescape(info_l10n[lang])+'</dc:'+dc+'>\n' if meta.license > '': xml += ' '*(offset+2)+'<dcterms:license rdf:resource="'+xmlescape(meta.license)+'" />\n' if xml > u'': return ' '*(offset+1)+'<metadata>\n'+xml+' '*(offset+1)+'</metadata>\n' else: return u''
| 479,997 |
def readmaterial(material,swatchbook): if material.tag == 'color': sitem = Color(swatchbook) if 'usage' in material.attrib: sitem.usage = material.attrib['usage'].split(',') elif material.tag == 'pattern': sitem = Pattern(swatchbook) for elem in material: if elem.tag == 'values': values = map(eval,elem.text.split()) if 'space' in elem.attrib: sitem.values[(elem.attrib['model'],unicode(elem.attrib['space']))] = values else: sitem.values[(elem.attrib['model'],False)] = values elif elem.tag == 'metadata': sbz.readmeta(sitem,elem) elif elem.tag == 'extra': sitem.extra[xmlunescape(elem.attrib['type'])] = xmlunescape(elem.text) if sitem.info.identifier > '': id = sitem.info.identifier else: raise FileFormatError swatchbook.materials[id] = sitem
|
def readmaterial(material,swatchbook): if material.tag == 'color': sitem = Color(swatchbook) if 'usage' in material.attrib: sitem.usage = material.attrib['usage'].split(',') elif material.tag == 'pattern': sitem = Pattern(swatchbook) for elem in material: if elem.tag == 'values': values = map(eval,elem.text.split()) if 'space' in elem.attrib: sitem.values[(elem.attrib['model'],unicode(elem.attrib['space']))] = values else: sitem.values[(elem.attrib['model'],False)] = values elif elem.tag == 'metadata': sbz.readmeta(sitem,elem) elif elem.tag == 'extra': sitem.extra[xmlunescape(elem.attrib['type'])] = xmlunescape(elem.text) if sitem.info.identifier > '': id = sitem.info.identifier else: raise FileFormatError swatchbook.materials[id] = sitem
| 479,998 |
def write(swatchbook): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.7"\n xmlns:dc="'+dc+'"\n xmlns:dcterms="'+dcterms+'"\n xmlns:rdf="'+rdf+'">\n' xml += sbz.writemeta(swatchbook.info) xml += ' <materials>\n' for id in swatchbook.materials: material = swatchbook.materials[id] if isinstance(swatchbook.materials[id], Color): xml += ' <color' if len(material.usage) > 0: xml += ' usage="'+(','.join(material.usage))+'"' xml += '>\n' xml += sbz.writemeta(material.info,2) for value in material.values: xml += ' <values model="'+value[0]+'"' if value[1]: xml += ' space="'+value[1]+'"' xml += '>'+' '.join(str(round(x,16)) for x in material.values[value])+'</values>\n' for extra in material.extra: xml += ' <extra type="'+xmlescape(extra)+'">' if material.extra[extra]: xml += xmlescape(unicode(material.extra[extra])) xml += '</extra>\n' xml += ' </color>\n' elif isinstance(swatchbook.materials[id], Pattern): xml += ' <pattern>\n' xml += sbz.writemeta(material.info,2) for extra in material.extra: xml += ' <extra type="'+xmlescape(extra)+'">' if material.extra[extra]: xml += xmlescape(unicode(material.extra[extra])) xml += '</extra>\n' xml += ' </pattern>\n' xml += ' </materials>\n' if len(swatchbook.book.items) > 0: xml += ' <book' for display in swatchbook.book.display: if swatchbook.book.display[display]: xml += ' '+display+'="'+str(swatchbook.book.display[display])+'"' xml += '>\n' xml += unicode(sbz.writem(swatchbook.book.items),'utf-8') xml += ' </book>\n' xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in swatchbook.profiles: zip.write(swatchbook.profiles[profile].uri,'profiles/'+profile) def addfiles(dir): for s in sorted(os.listdir(dir)): file = os.path.join(dir,s) if os.path.isdir(file) and file not in ('.','..'): addfiles(file) else: zip.write(file,file[len(swatchbook.tmpdir):]) addfiles(swatchbook.tmpdir) zip.close() tf.seek(0) return tf.read()
|
def write(swatchbook): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.7"\n xmlns:dc="'+dc+'"\n xmlns:dcterms="'+dcterms+'"\n xmlns:rdf="'+rdf+'">\n' xml += sbz.writemeta(swatchbook.info) xml += ' <materials>\n' for id in swatchbook.materials: material = swatchbook.materials[id] if isinstance(swatchbook.materials[id], Color): xml += ' <color' if len(material.usage) > 0: xml += ' usage="'+(','.join(material.usage))+'"' xml += '>\n' xml += sbz.writemeta(material.info,2) for value in material.values: xml += ' <values model="'+value[0]+'"' if value[1]: xml += ' space="'+value[1]+'"' xml += '>'+' '.join(str(round(x,16)) for x in material.values[value])+'</values>\n' for extra in material.extra: xml += ' <extra type="'+xmlescape(extra)+'">' if material.extra[extra]: xml += xmlescape(unicode(material.extra[extra])) xml += '</extra>\n' xml += ' </color>\n' elif isinstance(swatchbook.materials[id], Pattern): xml += ' <pattern>\n' xml += sbz.writemeta(material.info,2) for extra in material.extra: xml += ' <extra type="'+xmlescape(extra)+'">' if material.extra[extra]: xml += xmlescape(unicode(material.extra[extra])) xml += '</extra>\n' xml += ' </pattern>\n' xml += ' </materials>\n' if len(swatchbook.book.items) > 0: xml += ' <book' for display in swatchbook.book.display: if swatchbook.book.display[display]: xml += ' '+display+'="'+str(swatchbook.book.display[display])+'"' xml += '>\n' xml += unicode(sbz.writem(swatchbook.book.items),'utf-8') xml += ' </book>\n' xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in swatchbook.profiles: zip.write(swatchbook.profiles[profile].uri,'profiles/'+profile) def addfiles(dir): for s in sorted(os.listdir(dir)): file = os.path.join(dir,s) if os.path.isdir(file) and file not in ('.','..'): addfiles(file) else: zip.write(file,file[len(swatchbook.tmpdir):]) addfiles(swatchbook.tmpdir) zip.close() tf.seek(0) return tf.read()
| 479,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.