diff --git a/controllers/data_controller/export_controller.go b/controllers/data_controller/export_controller.go
index 5b2c7a78293822a5b0f728dea2fed2d63b531a5f..b94f2f57ae0240dc3a51119fa73fe58a4f43193d 100644
--- a/controllers/data_controller/export_controller.go
+++ b/controllers/data_controller/export_controller.go
@@ -72,6 +72,13 @@ func StartUserExport(userId string, s3urls bool, includeData bool, log *logrus.E
 			return
 		}
 
+		exportDb := storage.GetDatabase().GetExportStore(ctx, log)
+		err = exportDb.InsertExport(exportId, userId)
+		if err != nil {
+			log.Error(err)
+			return
+		}
+
 		var currentTar *tar.Writer
 		var currentTarBytes bytes.Buffer
 		part := 0
@@ -94,12 +101,19 @@ func StartUserExport(userId string, s3urls bool, includeData bool, log *logrus.E
 
 			log.Info("Uploading compressed tar file")
 			buf := bytes.NewBuffer(gzipBytes.Bytes())
-			obj, err := ds.UploadFile(util.BufferToStream(buf), int64(buf.Len()), ctx, log)
+			size := int64(buf.Len())
+			obj, err := ds.UploadFile(util.BufferToStream(buf), size, ctx, log)
 			if err != nil {
 				return err
 			}
 			parts = append(parts, obj)
 
+			fname := fmt.Sprintf("export-part-%d.tgz", part)
+			err = exportDb.InsertExportPart(exportId, part, size, fname, ds.DatastoreId, obj.Location)
+			if err != nil {
+				return err
+			}
+
 			return nil
 		}
 
diff --git a/migrations/13_add_export_tables_down.sql b/migrations/13_add_export_tables_down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..52260564f15e87e80a93066572afbfacaac517e2
--- /dev/null
+++ b/migrations/13_add_export_tables_down.sql
@@ -0,0 +1,3 @@
+DROP INDEX export_parts_index;
+DROP TABLE export_parts;
+DROP TABLE exports;
diff --git a/migrations/13_add_export_tables_up.sql b/migrations/13_add_export_tables_up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..a4c02105172e2d8c29ff48671e262831c87dd3ba
--- /dev/null
+++ b/migrations/13_add_export_tables_up.sql
@@ -0,0 +1,13 @@
+CREATE TABLE IF NOT EXISTS exports (
+    export_id TEXT PRIMARY KEY NOT NULL,
+    entity TEXT NOT NULL
+);
+CREATE TABLE IF NOT EXISTS export_parts (
+    export_id TEXT NOT NULL,
+    index INT NOT NULL,
+    size_bytes BIGINT NOT NULL,
+    file_name TEXT NOT NULL,
+    datastore_id TEXT NOT NULL,
+    location TEXT NOT NULL
+);
+CREATE UNIQUE INDEX IF NOT EXISTS export_parts_index ON export_parts (export_id, index);
diff --git a/storage/storage.go b/storage/storage.go
index 3cfd462550f8760c7a502ba963dcd39737e2a930..b74908e0c3c61888e661af8defd5e3c91f7b5833 100644
--- a/storage/storage.go
+++ b/storage/storage.go
@@ -22,6 +22,7 @@ type repos struct {
 	thumbnailStore *stores.ThumbnailStoreFactory
 	urlStore       *stores.UrlStoreFactory
 	metadataStore  *stores.MetadataStoreFactory
+	exportStore    *stores.ExportStoreFactory
 }
 
 var dbInstance *Database
@@ -79,6 +80,10 @@ func OpenDatabase(connectionString string, maxConns int, maxIdleConns int) error
 	if d.repos.metadataStore, err = stores.InitMetadataStore(d.db); err != nil {
 		return err
 	}
+	logrus.Info("Setting up export DB store...")
+	if d.repos.exportStore, err = stores.InitExportStore(d.db); err != nil {
+		return err
+	}
 
 	// Run some tasks that should always be done on startup
 	if err = populateDatastores(d); err != nil {
@@ -107,3 +112,7 @@ func (d *Database) GetUrlStore(ctx context.Context, log *logrus.Entry) *stores.U
 func (d *Database) GetMetadataStore(ctx context.Context, log *logrus.Entry) *stores.MetadataStore {
 	return d.repos.metadataStore.Create(ctx, log)
 }
+
+func (d *Database) GetExportStore(ctx context.Context, log *logrus.Entry) *stores.ExportStore {
+	return d.repos.exportStore.Create(ctx, log)
+}
diff --git a/storage/stores/export_store.go b/storage/stores/export_store.go
new file mode 100644
index 0000000000000000000000000000000000000000..3a4ba511b82e5c02a4226e55c08c8a0809e604a6
--- /dev/null
+++ b/storage/stores/export_store.go
@@ -0,0 +1,63 @@
+package stores
+
+import (
+	"context"
+	"database/sql"
+
+	"github.com/sirupsen/logrus"
+)
+
+const insertExportMetadata = "INSERT INTO exports (export_id, entity) VALUES ($1, $2);"
+const insertExportPart = "INSERT INTO export_parts (export_id, index, size_bytes, file_name, datastore_id, location) VALUES ($1, $2, $3, $4, $5, $6);"
+
+type exportStoreStatements struct {
+	insertExportMetadata *sql.Stmt
+	insertExportPart     *sql.Stmt
+}
+
+type ExportStoreFactory struct {
+	sqlDb *sql.DB
+	stmts *exportStoreStatements
+}
+
+type ExportStore struct {
+	factory    *ExportStoreFactory // just for reference
+	ctx        context.Context
+	log        *logrus.Entry
+	statements *exportStoreStatements // copied from factory
+}
+
+func InitExportStore(sqlDb *sql.DB) (*ExportStoreFactory, error) {
+	store := ExportStoreFactory{stmts: &exportStoreStatements{}}
+	var err error
+
+	store.sqlDb = sqlDb
+
+	if store.stmts.insertExportMetadata, err = store.sqlDb.Prepare(insertExportMetadata); err != nil {
+		return nil, err
+	}
+	if store.stmts.insertExportPart, err = store.sqlDb.Prepare(insertExportPart); err != nil {
+		return nil, err
+	}
+
+	return &store, nil
+}
+
+func (f *ExportStoreFactory) Create(ctx context.Context, entry *logrus.Entry) *ExportStore {
+	return &ExportStore{
+		factory:    f,
+		ctx:        ctx,
+		log:        entry,
+		statements: f.stmts, // we copy this intentionally
+	}
+}
+
+func (s *ExportStore) InsertExport(exportId string, entity string) error {
+	_, err := s.statements.insertExportMetadata.ExecContext(s.ctx, exportId, entity)
+	return err
+}
+
+func (s *ExportStore) InsertExportPart(exportId string, index int, size int64, name string, datastoreId string, location string) error {
+	_, err := s.statements.insertExportPart.ExecContext(s.ctx, exportId, index, size, name, datastoreId, location)
+	return err
+}