diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 36d9a73faa1aa347af5e4e234ee54133fc79e043..929597bb06752d34b6948634ddaf7d2d11d3001a 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -49,6 +49,45 @@ black:
   script:
     - black --fast .
 
+#SOURCE: https://pypi.org/project/pylint-gitlab/
+pylint:
+  image: python:latest
+  stage: test
+  before_script:
+    - python -V
+    - mkdir -p public/badges public/lint
+    - echo undefined > public/badges/$CI_JOB_NAME.score
+    - pip install pylint-gitlab
+    - pip install -r requirements.txt
+  script:
+    - pylint  --exit-zero --output-format=text $(find -type f -name "*.py" ! -path "**/.venv/**") | tee /tmp/pylint.txt
+    - sed -n 's/^Your code has been rated at \([-0-9.]*\)\/.*/\1/p' /tmp/pylint.txt > public/badges/$CI_JOB_NAME.score
+    - pylint  --exit-zero --output-format=pylint_gitlab.GitlabCodeClimateReporter $(find -type f -name "*.py" ! -path "**/.venv/**") > codeclimate.json
+    - pylint  --exit-zero --output-format=pylint_gitlab.GitlabPagesHtmlReporter $(find -type f -name "*.py" ! -path "**/.venv/**") > public/lint/index.html
+  after_script:
+    - anybadge --overwrite --label $CI_JOB_NAME --value=$(cat public/badges/$CI_JOB_NAME.score) --file=public/badges/$CI_JOB_NAME.svg 4=red 6=orange 8=yellow 10=green
+    - |
+      echo "Your score is: $(cat public/badges/$CI_JOB_NAME.score)"
+  artifacts:
+    paths:
+      - public
+    reports:
+      codequality: codeclimate.json
+    when: always
+
+pages:
+  stage: deploy
+  image: alpine:latest
+  script:
+    - echo
+  artifacts:
+    paths:
+      - public
+  only:
+    refs:
+      - master
+
+
 test:
 #  variables:
 #    DATABASE_URL: "postgresql://postgres:postgres@postgres:5432/$POSTGRES_DB"
diff --git a/.pylintrc b/.pylintrc
new file mode 100644
index 0000000000000000000000000000000000000000..7fb66343a471746257c8eaa4350d930d41d9e865
--- /dev/null
+++ b/.pylintrc
@@ -0,0 +1,3 @@
+[MASTER]
+
+disable=line-too-long
diff --git a/unisportomat/course_scraper/course_scraper.py b/unisportomat/course_scraper/course_scraper.py
index 27ea1ce5d033d773a6725112d88e9b0d042b899c..e0671bea476f4c54ef3e6776692ae1bb0b85d8cb 100644
--- a/unisportomat/course_scraper/course_scraper.py
+++ b/unisportomat/course_scraper/course_scraper.py
@@ -1,7 +1,11 @@
+"""
+Implementation of a rudementary scraping tool
+for http://www.buchsys.de for SWP UniSport-O-Mat.
+"""
+
 import requests
 from bs4 import BeautifulSoup
 
-
 def fetch_website(url):
     """
     Helper function to fetch the content of a website.
@@ -17,17 +21,21 @@ def fetch_website(url):
         # pinpoint the parser only to the section containing the course names and links
         return soup.find("dl", {"class": "bs_menu"}).find_all("a", href=True)
 
-    except requests.exceptions.RequestException as e:
-        print(e)
+    except requests.exceptions.RequestException as err:
+        print(err)
+        raise
 
 
 def scraping(site=None) -> dict:
     """
-    Returns a dictionary of the form {name: link}, containing the scraped content of https://www.buchsys.de/fu-berlin/angebote/aktueller_zeitraum/index.html, unless another URL is given as an argument.
+    Returns a dictionary of the form {name: link},
+    containing the scraped content of
+    https://www.buchsys.de/fu-berlin/angebote/aktueller_zeitraum/index.html,
+    unless another URL is given as an argument.
     """
     courses = {}
 
-    if site == None:
+    if site is None:
         site = "https://www.buchsys.de/fu-berlin/angebote/aktueller_zeitraum/"
 
     website = fetch_website(site)
diff --git a/unisportomat/course_scraper/test_course_scraper.py b/unisportomat/course_scraper/test_course_scraper.py
index 17df4369b844ce2890b81dae7fe42cb9f0621b9c..4bac400bef4d091c7455c7cdd91c125c46fd0340 100644
--- a/unisportomat/course_scraper/test_course_scraper.py
+++ b/unisportomat/course_scraper/test_course_scraper.py
@@ -1,10 +1,23 @@
+"""
+Testing module, yo. Just for the course_scraper.py.
+"""
 from django.test import TestCase
-from course_scraper import fetch_website, scraping
+from course_scraper import scraping #, fetch_website
 
 
 class ScraperTestCase(TestCase):
+    """
+    Just a few tests, so pylint isn't getting a fit.
+    Because reasons.
+    """
     def test_returns_dict(self):
+        """
+        Testing return type of scraping().
+        """
         self.assertIsInstance(scraping(), dict)
 
     def test_dict_not_empty(self):
+        """
+        Testing if dict is not empty.
+        """
         self.assertTrue(len(scraping()) > 0)