Skip to content
Snippets Groups Projects
Commit 24730ec1 authored by Travis Ralston's avatar Travis Ralston
Browse files

Rewrite archiver for export_synapse_for_import

parent b7160248
No related branches found
No related tags found
No related merge requests found
package archival
import (
"archive/tar"
"bytes"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"time"
"github.com/dustin/go-humanize"
"github.com/gabriel-vasile/mimetype"
"github.com/sirupsen/logrus"
"github.com/turt2live/matrix-media-repo/common/rcontext"
"github.com/turt2live/matrix-media-repo/templating"
"github.com/turt2live/matrix-media-repo/util"
"github.com/turt2live/matrix-media-repo/util/stream_util"
)
type V2ArchiveWriter interface {
WritePart(part int, fileName string, archive io.Reader, size int64) error
}
type V2ArchiveExport struct {
exportId string
entity string
indexModel *templating.ExportIndexModel
writer V2ArchiveWriter
mediaManifest map[string]*V2ManifestRecord
partSize int64
ctx rcontext.RequestContext
// state variables
currentPart int
currentTar *tar.Writer
currentTarBytes *bytes.Buffer
currentSize int64
writingManifest bool
}
func NewV2Export(exportId string, entity string, partSize int64, writer V2ArchiveWriter, ctx rcontext.RequestContext) (*V2ArchiveExport, error) {
ctx = ctx.LogWithFields(logrus.Fields{
"v2_export-id": exportId,
"v2_export-entity": entity,
"v2_export-partSize": partSize,
})
archiver := &V2ArchiveExport{
exportId: exportId,
entity: entity,
writer: writer,
partSize: partSize,
ctx: ctx,
indexModel: &templating.ExportIndexModel{
Entity: entity,
ExportID: exportId,
Media: make([]*templating.ExportIndexMediaModel, 0),
},
mediaManifest: make(map[string]*V2ManifestRecord),
currentPart: 0,
}
ctx.Log.Info("Preparing first tar file...")
err := archiver.newTar()
return archiver, err
}
func (e *V2ArchiveExport) newTar() error {
if e.currentPart > 0 {
e.ctx.Log.Info("Persisting complete tar file...")
if err := e.persistTar(); err != nil {
return err
}
}
e.ctx.Log.Info("Starting new tar file...")
e.currentTarBytes = &bytes.Buffer{}
e.currentTar = tar.NewWriter(e.currentTarBytes)
e.currentPart = e.currentPart + 1
e.currentSize = 0
return nil
}
func (e *V2ArchiveExport) persistTar() error {
_ = e.currentTar.Close()
e.ctx.Log.Info("Compressing tar file...")
gzipBytes := &bytes.Buffer{}
archiver := gzip.NewWriter(gzipBytes)
archiver.Name = fmt.Sprintf("export-part-%d.tar", e.currentPart)
if e.writingManifest {
archiver.Name = "export-manifest.tar"
}
if _, err := io.Copy(archiver, stream_util.ClonedBufReader(*e.currentTarBytes)); err != nil {
return err
}
_ = archiver.Close()
e.ctx.Log.Info("Writing compressed tar")
name := fmt.Sprintf("export-part-%d.tgz", e.currentPart)
if e.writingManifest {
name = "export-manifest.tgz"
}
return e.writer.WritePart(e.currentPart, name, gzipBytes, int64(len(gzipBytes.Bytes())))
}
func (e *V2ArchiveExport) putFile(buf *bytes.Buffer, name string, creationTime time.Time) (int64, error) {
length := int64(len(buf.Bytes()))
header := &tar.Header{
Name: name,
Size: length,
Mode: int64(0644),
ModTime: creationTime,
}
if err := e.currentTar.WriteHeader(header); err != nil {
return 0, err
}
i, err := io.Copy(e.currentTar, buf)
if err != nil {
return 0, err
}
e.currentSize += i
return length, nil
}
func (e *V2ArchiveExport) AppendMedia(origin string, mediaId string, originalName string, contentType string, creationTime time.Time, file io.Reader, sha256 string, s3Url string, userId string) error {
// buffer the entire file into memory
buf := &bytes.Buffer{}
if _, err := io.Copy(buf, file); err != nil {
return err
}
mime := mimetype.Detect(buf.Bytes())
internalName := fmt.Sprintf("%s__%s%s", origin, mediaId, mime.Extension())
length, err := e.putFile(buf, internalName, creationTime)
if err != nil {
return err
}
mxc := fmt.Sprintf("mxc://%s/%s", origin, mediaId)
e.mediaManifest[mxc] = &V2ManifestRecord{
ArchivedName: internalName,
FileName: originalName,
SizeBytes: length,
ContentType: contentType,
S3Url: s3Url,
Sha256: sha256,
Origin: origin,
MediaId: mediaId,
CreatedTs: creationTime.UnixNano() / 1000000,
Uploader: userId,
}
e.indexModel.Media = append(e.indexModel.Media, &templating.ExportIndexMediaModel{
ExportID: e.exportId,
ArchivedName: internalName,
FileName: originalName,
SizeBytes: length,
SizeBytesHuman: humanize.Bytes(uint64(length)),
Origin: origin,
MediaID: mediaId,
Sha256Hash: sha256,
ContentType: contentType,
UploadTs: creationTime.UnixNano() / 1000000,
UploadDateHuman: creationTime.Format(time.UnixDate),
Uploader: userId,
})
if e.currentSize >= e.partSize {
e.ctx.Log.Info("Rotating tar...")
return e.newTar()
}
return nil
}
func (e *V2ArchiveExport) Finish() error {
if err := e.newTar(); err != nil {
return err
}
e.ctx.Log.Info("Writing manifest...")
e.writingManifest = true
defer (func() { e.writingManifest = false })()
manifest := &V2Manifest{
Version: 2,
EntityId: e.entity,
CreatedTs: util.NowMillis(),
Media: e.mediaManifest,
}
b, err := json.Marshal(manifest)
if err != nil {
e.writingManifest = false
return err
}
if _, err := e.putFile(bytes.NewBuffer(b), "manifest.json", time.Now()); err != nil {
return err
}
e.ctx.Log.Info("Writing index...")
t, err := templating.GetTemplate("export_index")
if err != nil {
return err
}
html := bytes.Buffer{}
if err := t.Execute(&html, e.indexModel); err != nil {
return err
}
if _, err := e.putFile(bytes.NewBuffer(html.Bytes()), "index.html", time.Now()); err != nil {
return err
}
e.ctx.Log.Info("Writing manifest tar...")
return e.persistTar()
}
package archival
import (
"io"
"os"
"path"
)
type V2ArchiveDiskWriter struct {
directory string
}
func NewV2ArchiveDiskWriter(directory string) *V2ArchiveDiskWriter {
return &V2ArchiveDiskWriter{directory: directory}
}
func (w V2ArchiveDiskWriter) WritePart(part int, fileName string, archive io.Reader, size int64) error {
f, err := os.Create(path.Join(w.directory, fileName))
if err != nil {
return err
}
if _, err := io.Copy(f, archive); err != nil {
return err
}
return f.Close()
}
package archival package v2archive
type V2ManifestRecord struct { type Manifest struct {
Version int `json:"version"`
EntityId string `json:"entity_id"`
CreatedTs int64 `json:"created_ts"`
Media map[string]*ManifestRecord `json:"media"`
}
type ManifestRecord struct {
FileName string `json:"name"` FileName string `json:"name"`
ArchivedName string `json:"file_name"` ArchivedName string `json:"file_name"`
SizeBytes int64 `json:"size_bytes"` SizeBytes int64 `json:"size_bytes"`
...@@ -12,10 +19,3 @@ type V2ManifestRecord struct { ...@@ -12,10 +19,3 @@ type V2ManifestRecord struct {
CreatedTs int64 `json:"created_ts"` CreatedTs int64 `json:"created_ts"`
Uploader string `json:"uploader"` Uploader string `json:"uploader"`
} }
type V2Manifest struct {
Version int `json:"version"`
EntityId string `json:"entity_id"`
CreatedTs int64 `json:"created_ts"`
Media map[string]*V2ManifestRecord `json:"media"`
}
package v2archive
import (
"archive/tar"
"compress/gzip"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"html"
"io"
"os"
"time"
"github.com/dustin/go-humanize"
"github.com/gabriel-vasile/mimetype"
"github.com/sirupsen/logrus"
"github.com/turt2live/matrix-media-repo/common/rcontext"
"github.com/turt2live/matrix-media-repo/templating"
"github.com/turt2live/matrix-media-repo/util"
"github.com/turt2live/matrix-media-repo/util/readers"
)
type MediaInfo struct {
Origin string
MediaId string
FileName string
ContentType string
CreationTs int64
S3Url string
UserId string
}
type PartPersister func(part int, fileName string, data io.ReadCloser) error
type ArchiveWriter struct {
ctx rcontext.RequestContext
exportId string
entity string
index *templating.ExportIndexModel
mediaManifest map[string]*ManifestRecord
partSize int64
writeFn PartPersister
// state machine variables
currentPart int
currentTar *tar.Writer
currentTempFile *os.File
currentSize int64
writingManifest bool
}
func NewWriter(ctx rcontext.RequestContext, exportId string, entity string, partSize int64, writeFn PartPersister) (*ArchiveWriter, error) {
ctx = ctx.LogWithFields(logrus.Fields{
"v2archive-id": exportId,
"v2archive-entity": entity,
})
archiver := &ArchiveWriter{
ctx: ctx,
exportId: exportId,
entity: entity,
index: &templating.ExportIndexModel{
ExportID: exportId,
Entity: entity,
Media: make([]*templating.ExportIndexMediaModel, 0),
},
mediaManifest: make(map[string]*ManifestRecord),
partSize: partSize,
writeFn: writeFn,
currentPart: 0,
}
err := archiver.beginTar()
return archiver, err
}
func (w *ArchiveWriter) rotateTar() error {
if w.currentPart > 0 {
if err := w.writeTar(); err != nil {
return err
}
}
return w.beginTar()
}
func (w *ArchiveWriter) beginTar() error {
w.currentSize = 0
w.currentPart = w.currentPart + 1
file, err := os.CreateTemp(os.TempDir(), "mmr-archive")
if err != nil {
return err
}
w.currentTempFile = file
w.currentTar = tar.NewWriter(file)
return nil
}
func (w *ArchiveWriter) writeTar() error {
_ = w.currentTar.Close()
tempFilePath := w.currentTempFile.Name()
if err := w.currentTempFile.Close(); err != nil {
return err
}
f, err := os.Open(tempFilePath)
if err != nil {
return err
}
pr, pw := io.Pipe()
archiver := gzip.NewWriter(pw)
fname := fmt.Sprintf("export-part-%d", w.currentPart)
if w.writingManifest {
fname = "export-manifest"
}
archiver.Name = fname + ".tar"
w.ctx.Log.Debug("Writing tar file to gzip container: ", archiver.Name)
go func() {
_, err := io.Copy(archiver, f)
if err != nil {
_ = pw.CloseWithError(err)
} else {
err = archiver.Close()
if err != nil {
_ = pw.CloseWithError(err)
} else {
_ = pw.Close()
}
}
}()
closerStack := readers.NewCancelCloser(pr, func() {
_ = readers.NewTempFileCloser("", f.Name(), f).Close()
})
return w.writeFn(w.currentPart, fname+".tgz", closerStack)
}
// AppendMedia / returns (sha256hash, error)
func (w *ArchiveWriter) AppendMedia(file io.ReadCloser, info MediaInfo) (string, error) {
defer file.Close()
br := readers.NewBufferReadsReader(file)
mime, err := mimetype.DetectReader(br)
if err != nil {
return "", err
}
internalName := fmt.Sprintf("%s__%s%s", info.Origin, info.MediaId, mime.Extension())
createTime := util.FromMillis(info.CreationTs)
size, sha256hash, err := w.putFile(br.GetRewoundReader(), internalName, createTime)
w.mediaManifest[util.MxcUri(info.Origin, info.MediaId)] = &ManifestRecord{
FileName: info.FileName,
ArchivedName: internalName,
SizeBytes: size,
ContentType: info.ContentType,
S3Url: info.S3Url,
Sha256: sha256hash,
Origin: info.Origin,
MediaId: info.MediaId,
CreatedTs: info.CreationTs,
Uploader: info.UserId,
}
w.index.Media = append(w.index.Media, &templating.ExportIndexMediaModel{
ExportID: w.exportId,
ArchivedName: internalName,
FileName: html.EscapeString(info.FileName),
Origin: info.Origin,
MediaID: info.MediaId,
SizeBytes: size,
SizeBytesHuman: humanize.Bytes(uint64(size)),
UploadTs: info.CreationTs,
UploadDateHuman: createTime.UTC().Format(time.UnixDate),
Sha256Hash: sha256hash,
ContentType: info.ContentType,
Uploader: info.UserId,
})
if w.currentSize >= w.partSize {
return sha256hash, w.rotateTar()
}
return sha256hash, nil
}
func (w *ArchiveWriter) putFile(r io.Reader, name string, creationTime time.Time) (int64, string, error) {
f, err := os.CreateTemp(os.TempDir(), "mmr-archive-put")
if err != nil {
return 0, "", err
}
defer func() {
f.Close()
os.Remove(f.Name())
}()
i1, err := io.Copy(f, r)
if err != nil {
return 0, "", err
}
if _, err = f.Seek(0, io.SeekStart); err != nil {
return 0, "", err
}
hasher := sha256.New()
header := &tar.Header{
Name: name,
Mode: int64(0644),
ModTime: creationTime,
Size: i1,
}
if err := w.currentTar.WriteHeader(header); err != nil {
return 0, "", err
}
mw := io.MultiWriter(hasher, w.currentTar)
i2, err := io.Copy(mw, f)
if err != nil {
return 0, "", err
}
w.currentSize = w.currentSize + i2
if i1 != i2 {
w.ctx.Log.Warnf("Size mismatch! Expected %d bytes but wrote %d instead", i1, i2)
}
return i2, hex.EncodeToString(hasher.Sum(nil)), nil
}
func (w *ArchiveWriter) Finish() error {
if err := w.rotateTar(); err != nil {
return err
}
w.writingManifest = true
defer func() { w.writingManifest = false }()
manifest := &Manifest{
Version: 2,
EntityId: w.entity,
CreatedTs: util.NowMillis(),
Media: w.mediaManifest,
}
pr, pw := io.Pipe()
jenc := json.NewEncoder(pw)
go func() {
if err := jenc.Encode(manifest); err != nil {
_ = pw.CloseWithError(err)
} else {
_ = pw.Close()
}
}()
if _, _, err := w.putFile(pr, "manifest.json", time.Now()); err != nil {
return err
}
t, err := templating.GetTemplate("export_index")
if err != nil {
return err
}
pr, pw = io.Pipe()
go func() {
if err := t.Execute(pw, w.index); err != nil {
_ = pw.CloseWithError(err)
} else {
_ = pw.Close()
}
}()
if _, _, err := w.putFile(pr, "index.html", time.Now()); err != nil {
return err
}
return w.writeTar()
}
package main package main
import ( import (
"bytes"
"flag" "flag"
"fmt" "fmt"
"io" "io"
...@@ -10,16 +9,14 @@ import ( ...@@ -10,16 +9,14 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/sirupsen/logrus" "github.com/turt2live/matrix-media-repo/archival/v2archive"
"github.com/turt2live/matrix-media-repo/archival"
"github.com/turt2live/matrix-media-repo/common/assets" "github.com/turt2live/matrix-media-repo/common/assets"
"github.com/turt2live/matrix-media-repo/common/config" "github.com/turt2live/matrix-media-repo/common/config"
"github.com/turt2live/matrix-media-repo/common/logging" "github.com/turt2live/matrix-media-repo/common/logging"
"github.com/turt2live/matrix-media-repo/common/rcontext" "github.com/turt2live/matrix-media-repo/common/rcontext"
"github.com/turt2live/matrix-media-repo/common/runtime" "github.com/turt2live/matrix-media-repo/common/version"
"github.com/turt2live/matrix-media-repo/homeserver_interop/synapse" "github.com/turt2live/matrix-media-repo/homeserver_interop/synapse"
"github.com/turt2live/matrix-media-repo/util" "github.com/turt2live/matrix-media-repo/util"
"github.com/turt2live/matrix-media-repo/util/stream_util"
"golang.org/x/crypto/ssh/terminal" "golang.org/x/crypto/ssh/terminal"
) )
...@@ -35,8 +32,12 @@ func main() { ...@@ -35,8 +32,12 @@ func main() {
importPath := flag.String("mediaDirectory", "./media_store", "The media_store_path for Synapse") importPath := flag.String("mediaDirectory", "./media_store", "The media_store_path for Synapse")
partSizeBytes := flag.Int64("partSize", 104857600, "The number of bytes (roughly) to split the export files into.") partSizeBytes := flag.Int64("partSize", 104857600, "The number of bytes (roughly) to split the export files into.")
skipMissing := flag.Bool("skipMissing", false, "If a media file can't be found, skip it.") skipMissing := flag.Bool("skipMissing", false, "If a media file can't be found, skip it.")
debug := flag.Bool("debug", false, "Enables debug logging.")
prettyLog := flag.Bool("prettyLog", false, "Enables pretty logging (colours).")
flag.Parse() flag.Parse()
version.SetDefaults()
version.Print(true)
assets.SetupTemplates(*templatesPath) assets.SetupTemplates(*templatesPath)
_ = os.MkdirAll(*exportPath, 0755) _ = os.MkdirAll(*exportPath, 0755)
...@@ -59,39 +60,51 @@ func main() { ...@@ -59,39 +60,51 @@ func main() {
realPsqlPassword = *postgresPassword realPsqlPassword = *postgresPassword
} }
err := logging.Setup( level := "info"
config.Get().General.LogDirectory, if *debug {
config.Get().General.LogColors, level = "debug"
config.Get().General.JsonLogs, }
config.Get().General.LogLevel, if err := logging.Setup(
) "-",
if err != nil { *prettyLog,
false,
level,
); err != nil {
panic(err) panic(err)
} }
logrus.Info("Setting up for importing...") ctx := rcontext.InitialNoConfig()
runtime.CheckIdGenerator()
connectionString := "postgres://" + *postgresUsername + ":" + realPsqlPassword + "@" + *postgresHost + ":" + strconv.Itoa(*postgresPort) + "/" + *postgresDatabase + "?sslmode=disable" connectionString := "postgres://" + *postgresUsername + ":" + realPsqlPassword + "@" + *postgresHost + ":" + strconv.Itoa(*postgresPort) + "/" + *postgresDatabase + "?sslmode=disable"
logrus.Info("Connecting to synapse database...") ctx.Log.Debug("Connecting to synapse database...")
synDb, err := synapse.OpenDatabase(connectionString) synDb, err := synapse.OpenDatabase(connectionString)
if err != nil { if err != nil {
panic(err) panic(err)
} }
logrus.Info("Fetching all local media records from synapse...") ctx.Log.Info("Fetching all local media records from Synapse...")
records, err := synDb.GetAllMedia() records, err := synDb.GetAllMedia()
if err != nil { if err != nil {
panic(err) panic(err)
} }
logrus.Info(fmt.Sprintf("Exporting %d media records", len(records))) ctx.Log.Info(fmt.Sprintf("Exporting %d media records", len(records)))
writer := archival.NewV2ArchiveDiskWriter(*exportPath) archiver, err := v2archive.NewWriter(ctx, "OOB", *serverName, *partSizeBytes, func(part int, fileName string, data io.ReadCloser) error {
exporter, err := archival.NewV2Export("OOB", *serverName, *partSizeBytes, writer, rcontext.Initial()) defer data.Close()
f, errf := os.Create(path.Join(*exportPath, fileName))
if errf != nil {
return errf
}
_, errf = io.Copy(f, data)
if errf != nil {
return errf
}
return nil
})
if err != nil { if err != nil {
logrus.Fatal(err) ctx.Log.Fatal(err)
} }
missing := make([]string, 0) missing := make([]string, 0)
...@@ -103,9 +116,9 @@ func main() { ...@@ -103,9 +116,9 @@ func main() {
// For a URL MediaID 2020-08-17_AABBCCDD: // For a URL MediaID 2020-08-17_AABBCCDD:
// $importPath/url_cache/2020-08-17/AABBCCDD // $importPath/url_cache/2020-08-17/AABBCCDD
mxc := fmt.Sprintf("mxc://%s/%s", *serverName, r.MediaId) mxc := util.MxcUri(*serverName, r.MediaId)
logrus.Info("Copying " + mxc) ctx.Log.Info("Copying " + mxc)
filePath := path.Join(*importPath, "local_content", r.MediaId[0:2], r.MediaId[2:4], r.MediaId[4:]) filePath := path.Join(*importPath, "local_content", r.MediaId[0:2], r.MediaId[2:4], r.MediaId[4:])
if r.UrlCache != "" { if r.UrlCache != "" {
...@@ -115,36 +128,34 @@ func main() { ...@@ -115,36 +128,34 @@ func main() {
f, err := os.Open(filePath) f, err := os.Open(filePath)
if os.IsNotExist(err) && *skipMissing { if os.IsNotExist(err) && *skipMissing {
logrus.Warn("File does not appear to exist, skipping: " + filePath) ctx.Log.Warn("File does not appear to exist, skipping: " + filePath)
missing = append(missing, filePath) missing = append(missing, filePath)
continue continue
} }
if err != nil { if err != nil {
logrus.Fatal(err) ctx.Log.Fatal(err)
}
d := &bytes.Buffer{}
_, _ = io.Copy(d, f)
_ = f.Close()
temp := bytes.NewBuffer(d.Bytes())
sha256, err := stream_util.GetSha256HashOfStream(io.NopCloser(temp))
if err != nil {
logrus.Fatal(err)
} }
err = exporter.AppendMedia(*serverName, r.MediaId, r.UploadName, r.ContentType, util.FromMillis(r.CreatedTs), d, sha256, "", r.UserId) _, err = archiver.AppendMedia(f, v2archive.MediaInfo{
Origin: *serverName,
MediaId: r.MediaId,
FileName: r.UploadName,
ContentType: r.ContentType,
CreationTs: r.CreatedTs,
S3Url: "",
UserId: r.UserId,
})
if err != nil { if err != nil {
logrus.Fatal(err) ctx.Log.Fatal(err)
} }
} }
err = exporter.Finish() err = archiver.Finish()
if err != nil { if err != nil {
logrus.Fatal(err) ctx.Log.Fatal(err)
} }
logrus.Info("Done export - cleaning up...") ctx.Log.Info("Done export - cleaning up...")
// Clean up // Clean up
assets.Cleanup() assets.Cleanup()
...@@ -152,9 +163,9 @@ func main() { ...@@ -152,9 +163,9 @@ func main() {
// Report missing files // Report missing files
if len(missing) > 0 { if len(missing) > 0 {
for _, m := range missing { for _, m := range missing {
logrus.Warn("Was not able to find " + m) ctx.Log.Warn("Was not able to find " + m)
} }
} }
logrus.Info("Export completed") ctx.Log.Info("Export completed")
} }
...@@ -90,8 +90,6 @@ func main() { ...@@ -90,8 +90,6 @@ func main() {
logrus.Info("Starting up...") logrus.Info("Starting up...")
runtime.RunStartupSequence() runtime.RunStartupSequence()
logrus.Debug("Setting up for importing...")
connectionString := "postgres://" + *postgresUsername + ":" + realPsqlPassword + "@" + *postgresHost + ":" + strconv.Itoa(*postgresPort) + "/" + *postgresDatabase + "?sslmode=disable" connectionString := "postgres://" + *postgresUsername + ":" + realPsqlPassword + "@" + *postgresHost + ":" + strconv.Itoa(*postgresPort) + "/" + *postgresDatabase + "?sslmode=disable"
csApiUrl := *baseUrl csApiUrl := *baseUrl
if csApiUrl[len(csApiUrl)-1:] == "/" { if csApiUrl[len(csApiUrl)-1:] == "/" {
......
...@@ -11,7 +11,7 @@ import ( ...@@ -11,7 +11,7 @@ import (
func Initial() RequestContext { func Initial() RequestContext {
return RequestContext{ return RequestContext{
Context: context.Background(), Context: context.Background(),
Log: logrus.WithFields(logrus.Fields{"nocontext": true}), Log: logrus.WithFields(logrus.Fields{"internal_flag": 1}),
Config: config.DomainRepoConfig{ Config: config.DomainRepoConfig{
MinimumRepoConfig: config.Get().MinimumRepoConfig, MinimumRepoConfig: config.Get().MinimumRepoConfig,
Downloads: config.Get().Downloads.DownloadsConfig, Downloads: config.Get().Downloads.DownloadsConfig,
...@@ -22,6 +22,15 @@ func Initial() RequestContext { ...@@ -22,6 +22,15 @@ func Initial() RequestContext {
}.populate() }.populate()
} }
func InitialNoConfig() RequestContext {
return RequestContext{
Context: context.Background(),
Log: logrus.WithFields(logrus.Fields{"internal_flag": 2}),
Config: config.DomainRepoConfig{},
Request: nil,
}.populate()
}
type RequestContext struct { type RequestContext struct {
context.Context context.Context
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment