Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

93 lines
5.4KB

  1. # @name: resources.py
  2. # @creation_date: 2022-02-23
  3. # @license: The MIT License <https://opensource.org/licenses/MIT>
  4. # @author: Simon Bowie <ad7588@coventry.ac.uk>
  5. # @purpose: functions for resources
  6. # @acknowledgements:
  7. # isbntools: https://isbntools.readthedocs.io/en/latest/info.html
  8. # regex for URLs: https://gist.github.com/gruber/249502
  9. from flask import Blueprint, render_template, request, flash, redirect, url_for
  10. from .models import Resource
  11. from werkzeug.exceptions import abort
  12. from . import db
  13. from isbntools.app import *
  14. import requests
  15. import re
  16. # function to retrieve data about a single resource from the database
  17. def get_resource(resource_id):
  18. resource = Resource.query.filter_by(id=resource_id).first()
  19. if resource is None:
  20. abort(404)
  21. return resource
  22. # function to delete a single resource
  23. def delete_resource(resource_id):
  24. deletion = Resource.query.get(resource_id)
  25. db.session.delete(deletion)
  26. db.session.commit()
  27. flash('Successfully deleted!')
  28. # function to get filters for a specific field
  29. def get_filter_values(field, type):
  30. # get field values for filter
  31. field_filter = Resource.query.filter_by(type=type).with_entities(getattr(Resource, field))
  32. # turn SQLAlchemy object into list
  33. field_filter = [i for i, in field_filter]
  34. # split each element on '/' (useful for scriptingLanguage only)
  35. field_filter = [y for x in field_filter for y in x.split(' / ')]
  36. # consolidate duplicate values
  37. field_filter = list(dict.fromkeys(field_filter))
  38. # filter None values from list
  39. field_filter = filter(None, field_filter)
  40. # sort list by alphabetical order
  41. field_filter = sorted(field_filter)
  42. return field_filter
  43. # function to get book data including metadata and covers
  44. def get_book_data(isbn):
  45. try:
  46. book = meta(isbn)
  47. description = {'desc': desc(isbn)}
  48. book.update(description)
  49. # get highest-resolution book cover possible
  50. openl_url = 'https://covers.openlibrary.org/b/isbn/' + book['ISBN-13'] + '-L.jpg?default=false'
  51. request = requests.get(openl_url)
  52. if request.status_code != 200:
  53. book.update(cover(isbn))
  54. else:
  55. book_cover = {'thumbnail': openl_url}
  56. book.update(book_cover)
  57. return book
  58. except:
  59. pass
  60. # function to replace embedded URL strings with href links
  61. def replace_urls(input):
  62. # Compile a regular expression to match URLs.
  63. # This regular expression is not exhaustive and may not match all possible URLs.
  64. # It is intended to be a starting point and can be refined and expanded as needed.
  65. url_regex = re.compile(r'((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:\'\".,<>?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)\b/?(?!@)))')
  66. # Find all URLs in the input string using the regular expression.
  67. # This will return a list of Match objects, each of which represents a single URL in the string.
  68. matches = url_regex.finditer(input)
  69. # Iterate over the list of matches and replace each URL with an HTML link.
  70. for match in matches:
  71. # Get the full URL from the Match object.
  72. url = match.group(0)
  73. # Create the HTML link by wrapping the URL in an <a> tag.
  74. # If the URL does not include a protocol (e.g. "http://" or "https://"),
  75. # then add "http://" as the default protocol.
  76. if not url.startswith('http'):
  77. link = f'<a href="http://{url}">{url}</a>'
  78. else:
  79. link = f'<a href="{url}">{url}</a>'
  80. # Replace the URL in the original string with the HTML link.
  81. input = input.replace(url, link)
  82. return input