Join the Swarm

If you're familiar running an IPFS Node, you can run the script below as a cron job to participate in loading podcast episodes into the IPFS network.

The script...

  1. Requests work from IPFSPodcasting.net.
  2. Receives a url to download.
  3. Loads the audio into IPFS
  4. Reports the hash to IPFSPodcasting.net.

IPFSPodcasting.net will also instruct your node to unpin an episode after traffic has diminished (typically 48 hours after the last podcast download).

#!/usr/bin/env python3
import subprocess
import json
import requests
import shutil
import time
import random
import logging

#Enter your email for support contact.
email = '[email protected]'
#Set download = False to stop downloading and only process deletions (you must wait for episodes to expire)
download = True

#Basic logging to ipfspodcastnode.log
logging.basicConfig(format="%(asctime)s : %(message)s", datefmt="%Y-%m-%d %H:%M:%S", filename="ipfspodcastnode.log", level=logging.INFO)

#Try to randomize requests so not all on the minute
wait=random.randint(1,150)
logging.info('Sleeping ' + str(wait) + ' seconds...')
time.sleep(wait)

#Find ipfs, wget, wc
ipfspath = shutil.which('ipfs')
wgetpath = shutil.which('wget')
wcpath = shutil.which('wc')

#Get IPFS ID
ipid = subprocess.run(ipfspath + ' id', shell=True, capture_output=True, text=True)
ipfs = json.loads(ipid.stdout)
ipfs_id = ipfs['ID']

#Query ipfspodcasting.net for work
logging.info('Requesting work...')
payload = { 'ipfs_id': ipfs_id, 'email': email, 'version': 0.2, 'download': download }
response = requests.post("https://IPFSPodcasting.net/Query", data=payload)
work = json.loads(response.text)
logging.info('Response : ' + str(work))

if work['message'][0:7] != 'No Work':
  payload = { 'ipfs_id': ipfs_id, 'email': email, 'version': 0.1 }
  if work['download'] != '' and work['filename'] != '':
    logging.info('Downloading ' + str(work['download']))
    #Download any "downloads" and Add to IPFS
    hash = subprocess.run(wgetpath + ' -q "' + work['download'] + '" -O - | ' + ipfspath + ' add -q -w --stdin-name "' + work['filename'] + '"', shell=True, capture_output=True, text=True)
    downhash=hash.stdout.strip().split('\n')
    #Get file size (for validation)
    size = subprocess.run(ipfspath + ' cat ' + downhash[0] + ' | ' + wcpath + ' -c', shell=True, capture_output=True, text=True)
    downsize=size.stdout.strip()
    logging.info('Added to IPFS ( hash : ' + str(downhash[0]) + ' length : ' + str(downsize) + ')')
    payload['downloaded'] = downhash[0] + '/' + downhash[1]
    payload['length'] = downsize

  if work['delete'] != '':
    #Delete any expired episodes
    logging.info('Unpinned old/expired hash (' + str(work['delete']) + ')')
    delete = subprocess.run(ipfspath + ' pin rm ' + work['delete'], shell=True, capture_output=True, text=True)
    payload['deleted'] = work['delete']

  #Report Results
  logging.info('Reporting results...')
  response = requests.post("https://ipfspodcasting.net/Response", data=payload)

else:
  logging.info('No work.')

Copy or Download the above script (optionally modify line 11 to include your email)...

Create a cron job to run every 5-10 minutes (using flock to prevent multiple instances). An ipfspodcastnode.log log file will be created to monitor activity.

*/10 * * * * cd ~/ && /usr/bin/flock -n /tmp/ipfspodcastnode.lockfile ~/ipfspodcastnode.py

You can view participating nodes to find your node stats (using your IPFS ID).