perf(drivers): fs operations and cache (#4965)

* perf(baidu_photo):multi-thread upload

* perf(baidu_netdisk):multi-thread upload and cache optimization

* fix:LimitWriter

* fix(weiyun):only one login is allowed

* feat(189pc):multi threaded upload

* feat(baidu_netdisk):multi threaded upload

* feat(baidu_photo):multi threaded upload

* feat(weiyun):multi threaded upload

* perf(aliyundriver_open):optimize upload code and optimize cache

* fix(weiyun):invalid directory ID

* fix(baidu_netdisk):modified time

* fix(baidu_netdisk,baidu_photo):upload slice error

* perf(baidu_netdisk):cancel unnecessary retries

* fix(limitWriter):must return a non-nil error if it returns n < len(p)

* fix(aliyundrive_open):Name and Filename only use one

* perf(mopan):multi-thread upload
This commit is contained in:
foxxorcat
2023-08-09 16:13:09 +08:00
committed by GitHub
parent 9d45718e5f
commit df6b306fce
22 changed files with 650 additions and 354 deletions

93
pkg/errgroup/errgroup.go Normal file
View File

@ -0,0 +1,93 @@
package errgroup
import (
"context"
"fmt"
"sync"
"sync/atomic"
"github.com/avast/retry-go"
)
type token struct{}
type Group struct {
cancel func(error)
ctx context.Context
opts []retry.Option
success uint64
wg sync.WaitGroup
sem chan token
}
func NewGroupWithContext(ctx context.Context, limit int, retryOpts ...retry.Option) (*Group, context.Context) {
ctx, cancel := context.WithCancelCause(ctx)
return (&Group{cancel: cancel, ctx: ctx, opts: retryOpts}).SetLimit(limit), ctx
}
func (g *Group) done() {
if g.sem != nil {
<-g.sem
}
g.wg.Done()
atomic.AddUint64(&g.success, 1)
}
func (g *Group) Wait() error {
g.wg.Wait()
return context.Cause(g.ctx)
}
func (g *Group) Go(f func(ctx context.Context) error) {
if g.sem != nil {
g.sem <- token{}
}
g.wg.Add(1)
go func() {
defer g.done()
if err := retry.Do(func() error { return f(g.ctx) }, g.opts...); err != nil {
g.cancel(err)
}
}()
}
func (g *Group) TryGo(f func(ctx context.Context) error) bool {
if g.sem != nil {
select {
case g.sem <- token{}:
default:
return false
}
}
g.wg.Add(1)
go func() {
defer g.done()
if err := retry.Do(func() error { return f(g.ctx) }, g.opts...); err != nil {
g.cancel(err)
}
}()
return true
}
func (g *Group) SetLimit(n int) *Group {
if len(g.sem) != 0 {
panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem)))
}
if n > 0 {
g.sem = make(chan token, n)
} else {
g.sem = nil
}
return g
}
func (g *Group) Success() uint64 {
return atomic.LoadUint64(&g.success)
}
func (g *Group) Err() error {
return context.Cause(g.ctx)
}

View File

@ -4,9 +4,10 @@ import (
"bytes"
"context"
"fmt"
log "github.com/sirupsen/logrus"
"io"
"time"
log "github.com/sirupsen/logrus"
)
// here is some syntaxic sugar inspired by the Tomas Senart's video,
@ -47,31 +48,22 @@ func CopyWithCtx(ctx context.Context, out io.Writer, in io.Reader, size int64, p
type limitWriter struct {
w io.Writer
count int64
limit int64
}
func (l limitWriter) Write(p []byte) (n int, err error) {
wn := int(l.limit - l.count)
if wn > len(p) {
wn = len(p)
}
if wn > 0 {
if n, err = l.w.Write(p[:wn]); err != nil {
return
}
if n < wn {
err = io.ErrShortWrite
func (l *limitWriter) Write(p []byte) (n int, err error) {
if l.limit > 0 {
if int64(len(p)) > l.limit {
p = p[:l.limit]
}
l.limit -= int64(len(p))
_, err = l.w.Write(p)
}
if err == nil {
n = len(p)
}
return
return len(p), err
}
func LimitWriter(w io.Writer, size int64) io.Writer {
return &limitWriter{w: w, limit: size}
func LimitWriter(w io.Writer, limit int64) io.Writer {
return &limitWriter{w: w, limit: limit}
}
type ReadCloser struct {