diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4012c30e67914cdda3423b42ad096cecf4a39d20..76f502446053e760274d887c35c0d916c5413ff4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -96,6 +96,7 @@ path/server, for example, then you can simply update the path in the config for
 ### Removed
 
 * IPFS support has been removed due to maintenance burden.
+* Exports initiated through the admin API no longer support `?include_data=false`. Exports will always contain data.
 
 ### Changed
 
diff --git a/api/custom/exports.go b/api/custom/exports.go
index 2814ff6a543e9f97a61d4574ee09bea4ae1823ca..84be0e32f189c1194e93c360c6e202505ae3a2f3 100644
--- a/api/custom/exports.go
+++ b/api/custom/exports.go
@@ -9,14 +9,14 @@ import (
 	"github.com/turt2live/matrix-media-repo/api/_apimeta"
 	"github.com/turt2live/matrix-media-repo/api/_responses"
 	"github.com/turt2live/matrix-media-repo/api/_routers"
+	"github.com/turt2live/matrix-media-repo/database"
+	"github.com/turt2live/matrix-media-repo/datastores"
+	"github.com/turt2live/matrix-media-repo/tasks"
 
 	"github.com/dustin/go-humanize"
 	"github.com/sirupsen/logrus"
 	"github.com/turt2live/matrix-media-repo/common/rcontext"
-	"github.com/turt2live/matrix-media-repo/controllers/data_controller"
 	"github.com/turt2live/matrix-media-repo/matrix"
-	"github.com/turt2live/matrix-media-repo/storage"
-	"github.com/turt2live/matrix-media-repo/storage/datastore"
 	"github.com/turt2live/matrix-media-repo/templating"
 	"github.com/turt2live/matrix-media-repo/util"
 )
@@ -47,7 +47,6 @@ func ExportUserData(r *http.Request, rctx rcontext.RequestContext, user _apimeta
 		return _responses.AuthFailed()
 	}
 
-	includeData := r.URL.Query().Get("include_data") != "false"
 	s3urls := r.URL.Query().Get("s3_urls") != "false"
 
 	userId := _routers.GetParam("userId", r)
@@ -58,10 +57,9 @@ func ExportUserData(r *http.Request, rctx rcontext.RequestContext, user _apimeta
 
 	rctx = rctx.LogWithFields(logrus.Fields{
 		"exportUserId": userId,
-		"includeData":  includeData,
 		"s3urls":       s3urls,
 	})
-	task, exportId, err := data_controller.StartUserExport(userId, s3urls, includeData, rctx)
+	task, exportId, err := tasks.RunUserExport(rctx, userId, s3urls)
 	if err != nil {
 		rctx.Log.Error(err)
 		sentry.CaptureException(err)
@@ -69,7 +67,7 @@ func ExportUserData(r *http.Request, rctx rcontext.RequestContext, user _apimeta
 	}
 
 	return &_responses.DoNotCacheResponse{Payload: &ExportStarted{
-		TaskID:   task.ID,
+		TaskID:   task.TaskId,
 		ExportID: exportId,
 	}}
 }
@@ -84,7 +82,6 @@ func ExportServerData(r *http.Request, rctx rcontext.RequestContext, user _apime
 		return _responses.AuthFailed()
 	}
 
-	includeData := r.URL.Query().Get("include_data") != "false"
 	s3urls := r.URL.Query().Get("s3_urls") != "false"
 
 	serverName := _routers.GetParam("serverName", r)
@@ -109,10 +106,9 @@ func ExportServerData(r *http.Request, rctx rcontext.RequestContext, user _apime
 
 	rctx = rctx.LogWithFields(logrus.Fields{
 		"exportServerName": serverName,
-		"includeData":      includeData,
 		"s3urls":           s3urls,
 	})
-	task, exportId, err := data_controller.StartServerExport(serverName, s3urls, includeData, rctx)
+	task, exportId, err := tasks.RunServerExport(rctx, serverName, s3urls)
 	if err != nil {
 		rctx.Log.Error(err)
 		sentry.CaptureException(err)
@@ -120,7 +116,7 @@ func ExportServerData(r *http.Request, rctx rcontext.RequestContext, user _apime
 	}
 
 	return &_responses.DoNotCacheResponse{Payload: &ExportStarted{
-		TaskID:   task.ID,
+		TaskID:   task.TaskId,
 		ExportID: exportId,
 	}}
 }
@@ -140,16 +136,20 @@ func ViewExport(r *http.Request, rctx rcontext.RequestContext, user _apimeta.Use
 		"exportId": exportId,
 	})
 
-	exportDb := storage.GetDatabase().GetExportStore(rctx)
+	exportDb := database.GetInstance().Exports.Prepare(rctx)
+	partsDb := database.GetInstance().ExportParts.Prepare(rctx)
 
-	exportInfo, err := exportDb.GetExportMetadata(exportId)
+	entityId, err := exportDb.GetEntity(exportId)
 	if err != nil {
 		rctx.Log.Error(err)
 		sentry.CaptureException(err)
-		return _responses.InternalServerError("failed to get metadata")
+		return _responses.InternalServerError("failed to get entity for export ID")
+	}
+	if entityId == "" {
+		return _responses.NotFoundError()
 	}
 
-	parts, err := exportDb.GetExportParts(exportId)
+	parts, err := partsDb.GetForExport(exportId)
 	if err != nil {
 		rctx.Log.Error(err)
 		sentry.CaptureException(err)
@@ -164,14 +164,14 @@ func ViewExport(r *http.Request, rctx rcontext.RequestContext, user _apimeta.Use
 	}
 
 	model := &templating.ViewExportModel{
-		ExportID:    exportInfo.ExportID,
-		Entity:      exportInfo.Entity,
+		ExportID:    exportId,
+		Entity:      entityId,
 		ExportParts: make([]*templating.ViewExportPartModel, 0),
 	}
 	for _, p := range parts {
 		model.ExportParts = append(model.ExportParts, &templating.ViewExportPartModel{
-			ExportID:       exportInfo.ExportID,
-			Index:          p.Index,
+			ExportID:       exportId,
+			Index:          p.PartNum,
 			FileName:       p.FileName,
 			SizeBytes:      p.SizeBytes,
 			SizeBytesHuman: humanize.Bytes(uint64(p.SizeBytes)),
@@ -204,16 +204,17 @@ func GetExportMetadata(r *http.Request, rctx rcontext.RequestContext, user _apim
 		"exportId": exportId,
 	})
 
-	exportDb := storage.GetDatabase().GetExportStore(rctx)
+	exportDb := database.GetInstance().Exports.Prepare(rctx)
+	partsDb := database.GetInstance().ExportParts.Prepare(rctx)
 
-	exportInfo, err := exportDb.GetExportMetadata(exportId)
+	entityId, err := exportDb.GetEntity(exportId)
 	if err != nil {
 		rctx.Log.Error(err)
 		sentry.CaptureException(err)
-		return _responses.InternalServerError("failed to get metadata")
+		return _responses.InternalServerError("failed to get entity for export ID")
 	}
 
-	parts, err := exportDb.GetExportParts(exportId)
+	parts, err := partsDb.GetForExport(exportId)
 	if err != nil {
 		rctx.Log.Error(err)
 		sentry.CaptureException(err)
@@ -221,12 +222,12 @@ func GetExportMetadata(r *http.Request, rctx rcontext.RequestContext, user _apim
 	}
 
 	metadata := &ExportMetadata{
-		Entity: exportInfo.Entity,
+		Entity: entityId,
 		Parts:  make([]*ExportPartMetadata, 0),
 	}
 	for _, p := range parts {
 		metadata.Parts = append(metadata.Parts, &ExportPartMetadata{
-			Index:     p.Index,
+			Index:     p.PartNum,
 			SizeBytes: p.SizeBytes,
 			FileName:  p.FileName,
 		})
@@ -247,7 +248,7 @@ func DownloadExportPart(r *http.Request, rctx rcontext.RequestContext, user _api
 		_responses.BadRequest("invalid export ID")
 	}
 
-	partId, err := strconv.ParseInt(pid, 10, 64)
+	partId, err := strconv.Atoi(pid)
 	if err != nil {
 		rctx.Log.Error(err)
 		return _responses.BadRequest("invalid part index")
@@ -258,15 +259,24 @@ func DownloadExportPart(r *http.Request, rctx rcontext.RequestContext, user _api
 		"partId":   partId,
 	})
 
-	db := storage.GetDatabase().GetExportStore(rctx)
-	part, err := db.GetExportPart(exportId, int(partId))
+	partsDb := database.GetInstance().ExportParts.Prepare(rctx)
+	part, err := partsDb.Get(exportId, partId)
 	if err != nil {
 		rctx.Log.Error(err)
 		sentry.CaptureException(err)
 		return _responses.InternalServerError("failed to get part")
 	}
 
-	s, err := datastore.DownloadStream(rctx, part.DatastoreID, part.Location)
+	if part == nil {
+		return _responses.NotFoundError()
+	}
+
+	dsConf, ok := datastores.Get(rctx, part.DatastoreId)
+	if !ok {
+		sentry.CaptureMessage("failed to locate datastore")
+		return _responses.InternalServerError("failed to locate datastore")
+	}
+	s, err := datastores.Download(rctx, dsConf, part.Location)
 	if err != nil {
 		rctx.Log.Error(err)
 		sentry.CaptureException(err)
@@ -274,7 +284,7 @@ func DownloadExportPart(r *http.Request, rctx rcontext.RequestContext, user _api
 	}
 
 	return &_responses.DownloadResponse{
-		ContentType:       "application/gzip",
+		ContentType:       "application/gzip", // TODO: We should be detecting type rather than assuming
 		SizeBytes:         part.SizeBytes,
 		Data:              s,
 		Filename:          part.FileName,
@@ -297,39 +307,39 @@ func DeleteExport(r *http.Request, rctx rcontext.RequestContext, user _apimeta.U
 		"exportId": exportId,
 	})
 
-	db := storage.GetDatabase().GetExportStore(rctx)
+	exportDb := database.GetInstance().Exports.Prepare(rctx)
+	partsDb := database.GetInstance().ExportParts.Prepare(rctx)
 
 	rctx.Log.Info("Getting information on which parts to delete")
-	parts, err := db.GetExportParts(exportId)
+	parts, err := partsDb.GetForExport(exportId)
 	if err != nil {
 		rctx.Log.Error(err)
 		sentry.CaptureException(err)
-		return _responses.InternalServerError("failed to delete export")
+		return _responses.InternalServerError("failed to get export parts")
 	}
 
 	for _, part := range parts {
-		rctx.Log.Info("Locating datastore: " + part.DatastoreID)
-		ds, err := datastore.LocateDatastore(rctx, part.DatastoreID)
+		rctx.Log.Debugf("Deleting object '%s' from datastore '%s'", part.Location, part.DatastoreId)
+		err = datastores.RemoveWithDsId(rctx, part.DatastoreId, part.Location)
 		if err != nil {
 			rctx.Log.Error(err)
 			sentry.CaptureException(err)
-			return _responses.InternalServerError("failed to delete export")
-		}
-
-		rctx.Log.Info("Deleting object: " + part.Location)
-		err = ds.DeleteObject(part.Location)
-		if err != nil {
-			rctx.Log.Warn(err)
-			sentry.CaptureException(err)
+			return _responses.InternalServerError("failed to delete export part")
 		}
 	}
 
-	rctx.Log.Info("Purging export from database")
-	err = db.DeleteExportAndParts(exportId)
+	rctx.Log.Debug("Purging export from database")
+	err = partsDb.DeleteForExport(exportId)
+	if err != nil {
+		rctx.Log.Error(err)
+		sentry.CaptureException(err)
+		return _responses.InternalServerError("failed to delete export parts")
+	}
+	err = exportDb.Delete(exportId)
 	if err != nil {
 		rctx.Log.Error(err)
 		sentry.CaptureException(err)
-		return _responses.InternalServerError("failed to delete export")
+		return _responses.InternalServerError("failed to delete export record")
 	}
 
 	return _responses.EmptyResponse{}
diff --git a/database/db.go b/database/db.go
index f88a8c8c70eecdb3fdcc36a19a308e7d2bb48f8f..9de18c04d37924c6b9a05bba518fa8a6d4c4bf9d 100644
--- a/database/db.go
+++ b/database/db.go
@@ -27,6 +27,8 @@ type Database struct {
 	UrlPreviews     *urlPreviewsTableStatements
 	MediaAttributes *mediaAttributesTableStatements
 	Tasks           *tasksTableStatements
+	Exports         *exportsTableStatements
+	ExportParts     *exportPartsTableStatements
 }
 
 var instance *Database
@@ -116,6 +118,12 @@ func openDatabase(connectionString string, maxConns int, maxIdleConns int) error
 	if d.Tasks, err = prepareTasksTables(d.conn); err != nil {
 		return errors.New("failed to create tasks table accessor: " + err.Error())
 	}
+	if d.Exports, err = prepareExportsTables(d.conn); err != nil {
+		return errors.New("failed to create exports table accessor: " + err.Error())
+	}
+	if d.ExportParts, err = prepareExportPartsTables(d.conn); err != nil {
+		return errors.New("failed to create export parts table accessor: " + err.Error())
+	}
 
 	instance = d
 	return nil
diff --git a/database/table_export_parts.go b/database/table_export_parts.go
new file mode 100644
index 0000000000000000000000000000000000000000..a6ecf447717aaa32e099c9f979316a66ae57f657
--- /dev/null
+++ b/database/table_export_parts.go
@@ -0,0 +1,101 @@
+package database
+
+import (
+	"database/sql"
+	"errors"
+
+	"github.com/turt2live/matrix-media-repo/common/rcontext"
+)
+
+type DbExportPart struct {
+	ExportId    string
+	PartNum     int
+	SizeBytes   int64
+	FileName    string
+	DatastoreId string
+	Location    string
+}
+
+const insertExportPart = "INSERT INTO export_parts (export_id, index, size_bytes, file_name, datastore_id, location) VALUES ($1, $2, $3, $4, $5, $6);"
+const deleteExportPartsById = "DELETE FROM export_parts WHERE export_id = $1;"
+const selectExportPartsById = "SELECT export_id, index, size_bytes, file_name, datastore_id, location FROM export_parts WHERE export_id = $1;"
+const selectExportPartById = "SELECT export_id, index, size_bytes, file_name, datastore_id, location FROM export_parts WHERE export_id = $1 AND index = $2;"
+
+type exportPartsTableStatements struct {
+	insertExportPart      *sql.Stmt
+	deleteExportPartsById *sql.Stmt
+	selectExportPartsById *sql.Stmt
+	selectExportPartById  *sql.Stmt
+}
+
+type exportPartsTableWithContext struct {
+	statements *exportPartsTableStatements
+	ctx        rcontext.RequestContext
+}
+
+func prepareExportPartsTables(db *sql.DB) (*exportPartsTableStatements, error) {
+	var err error
+	var stmts = &exportPartsTableStatements{}
+
+	if stmts.insertExportPart, err = db.Prepare(insertExportPart); err != nil {
+		return nil, errors.New("error preparing insertExportPart: " + err.Error())
+	}
+	if stmts.deleteExportPartsById, err = db.Prepare(deleteExportPartsById); err != nil {
+		return nil, errors.New("error preparing deleteExportPartsById: " + err.Error())
+	}
+	if stmts.selectExportPartsById, err = db.Prepare(selectExportPartsById); err != nil {
+		return nil, errors.New("error preparing selectExportPartsById: " + err.Error())
+	}
+	if stmts.selectExportPartById, err = db.Prepare(selectExportPartById); err != nil {
+		return nil, errors.New("error preparing selectExportPartById: " + err.Error())
+	}
+
+	return stmts, nil
+}
+
+func (s *exportPartsTableStatements) Prepare(ctx rcontext.RequestContext) *exportPartsTableWithContext {
+	return &exportPartsTableWithContext{
+		statements: s,
+		ctx:        ctx,
+	}
+}
+
+func (s *exportPartsTableWithContext) GetForExport(exportId string) ([]*DbExportPart, error) {
+	results := make([]*DbExportPart, 0)
+	rows, err := s.statements.selectExportPartsById.QueryContext(s.ctx, exportId)
+	if err != nil {
+		if err == sql.ErrNoRows {
+			return results, nil
+		}
+		return nil, err
+	}
+	for rows.Next() {
+		val := &DbExportPart{}
+		if err = rows.Scan(&val.ExportId, &val.PartNum, &val.SizeBytes, &val.FileName, &val.DatastoreId, &val.Location); err != nil {
+			return nil, err
+		}
+		results = append(results, val)
+	}
+	return results, nil
+}
+
+func (s *exportPartsTableWithContext) Get(exportId string, partNum int) (*DbExportPart, error) {
+	row := s.statements.selectExportPartById.QueryRowContext(s.ctx, exportId, partNum)
+	val := &DbExportPart{}
+	err := row.Scan(&val.ExportId, &val.PartNum, &val.SizeBytes, &val.FileName, &val.DatastoreId, &val.Location)
+	if err == sql.ErrNoRows {
+		err = nil
+		val = nil
+	}
+	return val, err
+}
+
+func (s *exportPartsTableWithContext) Insert(part *DbExportPart) error {
+	_, err := s.statements.insertExportPart.ExecContext(s.ctx, part.ExportId, part.PartNum, part.SizeBytes, part.FileName, part.DatastoreId, part.Location)
+	return err
+}
+
+func (s *exportPartsTableWithContext) DeleteForExport(exportId string) error {
+	_, err := s.statements.deleteExportPartsById.ExecContext(s.ctx, exportId)
+	return err
+}
diff --git a/database/table_exports.go b/database/table_exports.go
new file mode 100644
index 0000000000000000000000000000000000000000..ea306f305d1e6dbb91232268b2c9c4e57f4d8808
--- /dev/null
+++ b/database/table_exports.go
@@ -0,0 +1,68 @@
+package database
+
+import (
+	"database/sql"
+	"errors"
+
+	"github.com/turt2live/matrix-media-repo/common/rcontext"
+)
+
+const insertExport = "INSERT INTO exports (export_id, entity) VALUES ($1, $2);"
+const selectExportEntity = "SELECT entity FROM exports WHERE export_id = $1;"
+const deleteExport = "DELETE FROM exports WHERE export_id = $1;"
+
+type exportsTableStatements struct {
+	insertExport       *sql.Stmt
+	selectExportEntity *sql.Stmt
+	deleteExport       *sql.Stmt
+}
+
+type exportsTableWithContext struct {
+	statements *exportsTableStatements
+	ctx        rcontext.RequestContext
+}
+
+func prepareExportsTables(db *sql.DB) (*exportsTableStatements, error) {
+	var err error
+	var stmts = &exportsTableStatements{}
+
+	if stmts.insertExport, err = db.Prepare(insertExport); err != nil {
+		return nil, errors.New("error preparing insertExport: " + err.Error())
+	}
+	if stmts.selectExportEntity, err = db.Prepare(selectExportEntity); err != nil {
+		return nil, errors.New("error preparing selectExportEntity: " + err.Error())
+	}
+	if stmts.deleteExport, err = db.Prepare(deleteExport); err != nil {
+		return nil, errors.New("error preparing deleteExport: " + err.Error())
+	}
+
+	return stmts, nil
+}
+
+func (s *exportsTableStatements) Prepare(ctx rcontext.RequestContext) *exportsTableWithContext {
+	return &exportsTableWithContext{
+		statements: s,
+		ctx:        ctx,
+	}
+}
+
+func (s *exportsTableWithContext) Insert(exportId string, entity string) error {
+	_, err := s.statements.insertExport.ExecContext(s.ctx, exportId, entity)
+	return err
+}
+
+func (s *exportsTableWithContext) Delete(exportId string) error {
+	_, err := s.statements.deleteExport.ExecContext(s.ctx, exportId)
+	return err
+}
+
+func (s *exportsTableWithContext) GetEntity(exportId string) (string, error) {
+	row := s.statements.selectExportEntity.QueryRowContext(s.ctx, exportId)
+	val := ""
+	err := row.Scan(&val)
+	if err == sql.ErrNoRows {
+		err = nil
+		val = ""
+	}
+	return val, err
+}
diff --git a/docs/admin.md b/docs/admin.md
index 842bb80b0adf2bd9ce3870495c2f60c469b7f605..efe9f0ede608d1ecf378a601307d1dcf726d2c92 100644
--- a/docs/admin.md
+++ b/docs/admin.md
@@ -395,9 +395,9 @@ Exports (and therefore imports) are currently done on a per-user basis. This is
 
 #### Exporting data for a user
 
-URL: `POST /_matrix/media/unstable/admin/user/<user ID>/export?include_data=true&s3_urls=true`
+URL: `POST /_matrix/media/unstable/admin/user/<user ID>/export?s3_urls=true`
 
-Both query params are optional, and their default values are shown. If `include_data` is false, only metadata will be returned by the export. `s3_urls`, when true, includes the s3 URL to the media in the metadata if one is available.
+Both query params are optional, and their default values are shown. `s3_urls`, when true, includes the s3 URL to the media in the metadata if one is available.
 
 The response is a task ID and export ID to put into the 'view export' URL:
 
@@ -414,7 +414,7 @@ The response is a task ID and export ID to put into the 'view export' URL:
 
 #### Exporting data for a domain
 
-URL: `POST /_matrix/media/unstable/admin/server/<server name>/export?include_data=true&s3_urls=true`
+URL: `POST /_matrix/media/unstable/admin/server/<server name>/export?s3_urls=true`
 
 Response is the same as the user export endpoint above. The `<server name>` does not need to be configured in the repo - it will export data it has on a remote server if you ask it to.
 
@@ -472,7 +472,7 @@ The response is an empty JSON object if successful.
 
 Once an export has been completed it can be imported back into the media repo. Files that are already known to the repo will not be overwritten - it'll use its known copy first.
 
-**Note**: Imports happen in memory, which can balloon quickly depending on how you exported your data. Although you can import data without s3 it is recommended that you only import from archives generated with `include_data=false`.
+**Note**: Imports happen in memory, which can balloon quickly depending on how you exported your data.
 
 **Note**: Only repository administrators can perform imports, regardless of who they are for.
 
diff --git a/tasks/exec.go b/tasks/exec.go
index a8eab10c85d3c1b9b16d582358888bdf2db12dbe..f4b17759b61d16066f3bbf8be745d364be5a2d60 100644
--- a/tasks/exec.go
+++ b/tasks/exec.go
@@ -63,6 +63,8 @@ func beginTask(task *database.DbTask) {
 	runnerCtx := rcontext.Initial().LogWithFields(logrus.Fields{"task_id": task.TaskId})
 	if task.Name == string(TaskDatastoreMigrate) {
 		go task_runner.DatastoreMigrate(runnerCtx, task)
+	} else if task.Name == string(TaskExportData) {
+		go task_runner.ExportData(runnerCtx, task)
 	} else {
 		m := fmt.Sprintf("Received unknown task to run %s (ID: %d)", task.Name, task.TaskId)
 		logrus.Warn(m)
diff --git a/tasks/schedule.go b/tasks/schedule.go
index 9821a7cb4e252ff52b2925c29a19ad637258cfab..9e4aed8474c9fff8d9a736beab7571edfdd8c510 100644
--- a/tasks/schedule.go
+++ b/tasks/schedule.go
@@ -20,6 +20,7 @@ type RecurringTaskName string
 
 const (
 	TaskDatastoreMigrate TaskName = "storage_migration"
+	TaskExportData       TaskName = "export_data"
 )
 const (
 	RecurringTaskPurgeThumbnails  RecurringTaskName = "recurring_purge_thumbnails"
@@ -122,3 +123,29 @@ func RunDatastoreMigration(ctx rcontext.RequestContext, sourceDsId string, targe
 		BeforeTs:   beforeTs,
 	})
 }
+
+func RunUserExport(ctx rcontext.RequestContext, userId string, includeS3Urls bool) (*database.DbTask, string, error) {
+	return runExport(ctx, task_runner.ExportDataParams{
+		UserId:        userId,
+		IncludeS3Urls: includeS3Urls,
+		//ExportId:      "", // populated by runExport
+	})
+}
+
+func RunServerExport(ctx rcontext.RequestContext, serverName string, includeS3Urls bool) (*database.DbTask, string, error) {
+	return runExport(ctx, task_runner.ExportDataParams{
+		ServerName:    serverName,
+		IncludeS3Urls: includeS3Urls,
+		//ExportId:      "", // populated by runExport
+	})
+}
+
+func runExport(ctx rcontext.RequestContext, paramsTemplate task_runner.ExportDataParams) (*database.DbTask, string, error) {
+	exportId, err := ids.NewUniqueId()
+	if err != nil {
+		return nil, "", err
+	}
+	paramsTemplate.ExportId = exportId
+	task, err := scheduleTask(ctx, TaskExportData, paramsTemplate)
+	return task, exportId, err
+}
diff --git a/tasks/task_runner/export_data.go b/tasks/task_runner/export_data.go
new file mode 100644
index 0000000000000000000000000000000000000000..3e9574c676f18973142ca31e7e2a3876b5919b31
--- /dev/null
+++ b/tasks/task_runner/export_data.go
@@ -0,0 +1,99 @@
+package task_runner
+
+import (
+	"io"
+
+	"github.com/getsentry/sentry-go"
+	"github.com/turt2live/matrix-media-repo/archival"
+	"github.com/turt2live/matrix-media-repo/common/rcontext"
+	"github.com/turt2live/matrix-media-repo/database"
+	"github.com/turt2live/matrix-media-repo/datastores"
+)
+
+type ExportDataParams struct {
+	UserId        string `json:"user_id,omitempty"`
+	ServerName    string `json:"server_name,omitempty"`
+	ExportId      string `json:"export_id"`
+	IncludeS3Urls bool   `json:"include_s3_urls"`
+}
+
+func ExportData(ctx rcontext.RequestContext, task *database.DbTask) {
+	defer markDone(ctx, task)
+
+	params := ExportDataParams{}
+	if err := task.Params.ApplyTo(&params); err != nil {
+		ctx.Log.Error("Error decoding params: ", err)
+		sentry.CaptureException(err)
+		return
+	}
+
+	if params.ExportId == "" {
+		ctx.Log.Error("No export ID provided")
+		sentry.CaptureMessage("No export ID provided")
+		return
+	}
+
+	exportDb := database.GetInstance().Exports.Prepare(ctx)
+	if existingEntity, err := exportDb.GetEntity(params.ExportId); err != nil {
+		ctx.Log.Error("Error checking export ID: ", err)
+		sentry.CaptureException(err)
+		return
+	} else if existingEntity != "" {
+		ctx.Log.Error("Export ID already in use")
+		sentry.CaptureMessage("Export ID already in use")
+		return
+	}
+
+	entityId := params.UserId
+	if entityId != "" && entityId[0] != '@' {
+		ctx.Log.Error("Invalid user ID")
+		sentry.CaptureMessage("Invalid user ID")
+		return
+	} else if entityId == "" {
+		entityId = params.ServerName
+	}
+	if entityId == "" {
+		ctx.Log.Error("No entity provided")
+		sentry.CaptureMessage("No entity provided")
+		return
+	}
+
+	if err := exportDb.Insert(params.ExportId, entityId); err != nil {
+		ctx.Log.Error("Error persisting export ID: ", err)
+		sentry.CaptureException(err)
+		return
+	}
+
+	partsDb := database.GetInstance().ExportParts.Prepare(ctx)
+	persistPart := func(partNum int, fileName string, data io.ReadCloser) error {
+		dsConf, err := datastores.Pick(ctx, datastores.ArchivesKind)
+		if err != nil {
+			return err
+		}
+		sha256hash, sizeBytes, reader, err := datastores.BufferTemp(dsConf, data)
+		if err != nil {
+			return err
+		}
+		dsLocation, err := datastores.Upload(ctx, dsConf, reader, sizeBytes, "application/octet-stream", sha256hash)
+		if err != nil {
+			return err
+		}
+		if err = partsDb.Insert(&database.DbExportPart{
+			ExportId:    params.ExportId,
+			PartNum:     partNum,
+			SizeBytes:   sizeBytes,
+			FileName:    fileName,
+			DatastoreId: dsConf.Id,
+			Location:    dsLocation,
+		}); err != nil {
+			return err
+		}
+		return nil
+	}
+
+	if err := archival.ExportEntityData(ctx, params.ExportId, entityId, params.IncludeS3Urls, persistPart); err != nil {
+		ctx.Log.Error("Error during export: ", err)
+		sentry.CaptureException(err)
+		return
+	}
+}