forked from CalebQ42/squashfs
-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Added data.FullReader and moved to low level library
Added ability to get readers from Base
- Loading branch information
Showing
4 changed files
with
172 additions
and
16 deletions.
There are no files selected for viewing
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,153 @@ | ||
package data | ||
|
||
import ( | ||
"encoding/binary" | ||
"errors" | ||
"io" | ||
"sync" | ||
|
||
"github.com/CalebQ42/squashfs/internal/decompress" | ||
"github.com/CalebQ42/squashfs/internal/toreader" | ||
) | ||
|
||
type FragReaderConstructor func(io.ReaderAt, decompress.Decompressor) (*Reader, error) | ||
|
||
type FullReader struct { | ||
r io.ReaderAt | ||
d decompress.Decompressor | ||
frag FragReaderConstructor | ||
retPool *sync.Pool | ||
sizes []uint32 | ||
initialOffset int64 | ||
goroutineLimit uint16 | ||
} | ||
|
||
func NewFullReader(r io.ReaderAt, initialOffset int64, d decompress.Decompressor, sizes []uint32) *FullReader { | ||
return &FullReader{ | ||
r: r, | ||
d: d, | ||
sizes: sizes, | ||
initialOffset: initialOffset, | ||
goroutineLimit: 10, | ||
retPool: &sync.Pool{ | ||
New: func() any { | ||
return &retValue{} | ||
}, | ||
}, | ||
} | ||
} | ||
|
||
func (r *FullReader) AddFrag(frag FragReaderConstructor) { | ||
r.frag = frag | ||
} | ||
|
||
func (r *FullReader) SetGoroutineLimit(limit uint16) { | ||
r.goroutineLimit = limit | ||
} | ||
|
||
type retValue struct { | ||
err error | ||
data []byte | ||
index uint64 | ||
} | ||
|
||
func (r *FullReader) process(index uint64, fileOffset uint64, retChan chan *retValue) { | ||
ret := r.retPool.Get().(*retValue) | ||
ret.index = index | ||
realSize := r.sizes[index] &^ (1 << 24) | ||
ret.data = make([]byte, realSize) | ||
ret.err = binary.Read(toreader.NewReader(r.r, int64(r.initialOffset)+int64(fileOffset)), binary.LittleEndian, &ret.data) | ||
if r.sizes[index] == realSize { | ||
ret.data, ret.err = r.d.Decompress(ret.data) | ||
} | ||
retChan <- ret | ||
} | ||
|
||
func (r *FullReader) WriteTo(w io.Writer) (int64, error) { | ||
var curIndex uint64 | ||
var curOffset uint64 | ||
var toProcess uint16 | ||
var wrote int64 | ||
cache := make(map[uint64]*retValue) | ||
var errCache []error | ||
retChan := make(chan *retValue, r.goroutineLimit) | ||
for i := uint64(0); i < uint64(len(r.sizes))/uint64(r.goroutineLimit); i++ { | ||
toProcess = uint16(len(r.sizes)) - (uint16(i) * r.goroutineLimit) | ||
if toProcess > r.goroutineLimit { | ||
toProcess = r.goroutineLimit | ||
} | ||
// Start all the goroutines | ||
for j := uint16(0); j < toProcess; j++ { | ||
go r.process((i*uint64(r.goroutineLimit))+uint64(j), curOffset, retChan) | ||
curOffset += uint64(r.sizes[(i*uint64(r.goroutineLimit))+uint64(j)]) &^ (1 << 24) | ||
} | ||
// Then consume the results on retChan | ||
for j := uint16(0); j < toProcess; j++ { | ||
res := <-retChan | ||
// If there's an error, we don't care about the results. | ||
if res.err != nil { | ||
errCache = append(errCache, res.err) | ||
if len(cache) > 0 { | ||
clear(cache) | ||
} | ||
continue | ||
} | ||
// If there has been an error previously, we don't care about the results. | ||
// We still want to wait for all the goroutines to prevent resources being wasted. | ||
if len(errCache) > 0 { | ||
continue | ||
} | ||
// If we don't need the data yet, we cache it and move on | ||
if res.index != curIndex { | ||
cache[res.index] = res | ||
continue | ||
} | ||
// If we do need the data, we write it | ||
wr, err := w.Write(res.data) | ||
wrote += int64(wr) | ||
if err != nil { | ||
errCache = append(errCache, err) | ||
if len(cache) > 0 { | ||
clear(cache) | ||
} | ||
continue | ||
} | ||
r.retPool.Put(res) | ||
curIndex++ | ||
// Now we recursively try to clear the cache | ||
for len(cache) > 0 { | ||
res, ok := cache[curIndex] | ||
if !ok { | ||
break | ||
} | ||
wr, err := w.Write(res.data) | ||
wrote += int64(wr) | ||
if err != nil { | ||
errCache = append(errCache, err) | ||
if len(cache) > 0 { | ||
clear(cache) | ||
} | ||
break | ||
} | ||
delete(cache, curIndex) | ||
r.retPool.Put(res) | ||
curIndex++ | ||
} | ||
} | ||
if len(errCache) > 0 { | ||
return wrote, errors.Join(errCache...) | ||
} | ||
} | ||
if r.frag != nil { | ||
rdr, err := r.frag(r.r, r.d) | ||
if err != nil { | ||
return wrote, err | ||
} | ||
wr, err := io.Copy(w, rdr) | ||
wrote += wr | ||
if err != nil { | ||
return wrote, err | ||
} | ||
} | ||
return wrote, nil | ||
} |
File renamed without changes.