Skip to content

Commit

Permalink
Initial work
Browse files Browse the repository at this point in the history
Create Reader
Pulled back in Inode decoding and superblock
New Data and Metadata readers
Added getting of id, fragment, and export table data lazily
Added README to squashfs/squashfs
  • Loading branch information
CalebQ42 committed Dec 23, 2023
1 parent d4d1b2c commit 707391b
Show file tree
Hide file tree
Showing 23 changed files with 860 additions and 2 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

[![PkgGoDev](https://pkg.go.dev/badge/github.com/CalebQ42/squashfs)](https://pkg.go.dev/github.com/CalebQ42/squashfs) [![Go Report Card](https://goreportcard.com/badge/github.com/CalebQ42/squashfs)](https://goreportcard.com/report/github.com/CalebQ42/squashfs)

A PURE Go library to read squashfs. There is currently no plans to add archive creation support as it will almost always be better to just call `mksquashfs`. I could see some possible use cases, but probably won't spend time on it unless it's requested (open a discussion fi you want this feature).
A PURE Go library to read squashfs. There is currently no plans to add archive creation support as it will almost always be better to just call `mksquashfs`. I could see some possible use cases, but probably won't spend time on it unless it's requested (open a discussion if you want this feature).

Currently has support for reading squashfs files and extracting files and folders.

Expand All @@ -15,7 +15,7 @@ Thanks also to [distri's squashfs library](https://github.com/distr1/distri/tree

* No Xattr parsing. This is simply because I haven't done any research on it and how to apply these in a pure go way.
* Socket files are not extracted.
* From my research, it seems like a socket file would be useless if it could be created.
* From my research, it seems like a socket file would be useless if it could be created. They are still exposed when fuse mounted.
* Fifo files are ignored on `darwin`

## Issues
Expand Down
11 changes: 11 additions & 0 deletions go.mod
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
module github.com/CalebQ42/squashfs

go 1.21.5

require (
github.com/pierrec/lz4/v4 v4.1.19
github.com/ulikunitz/xz v0.5.11
github.com/klauspost/compress v1.17.4
github.com/rasky/go-lzo v0.0.0-20200203143853-96a758eda86e
github.com/therootcompany/xz v1.0.1
)
10 changes: 10 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/pierrec/lz4/v4 v4.1.19 h1:tYLzDnjDXh9qIxSTKHwXwOYmm9d887Y7Y1ZkyXYHAN4=
github.com/pierrec/lz4/v4 v4.1.19/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/rasky/go-lzo v0.0.0-20200203143853-96a758eda86e h1:dCWirM5F3wMY+cmRda/B1BiPsFtmzXqV9b0hLWtVBMs=
github.com/rasky/go-lzo v0.0.0-20200203143853-96a758eda86e/go.mod h1:9leZcVcItj6m9/CfHY5Em/iBrCz7js8LcRQGTKEEv2M=
github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw=
github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY=
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
73 changes: 73 additions & 0 deletions internal/data/reader.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
package data

import (
"encoding/binary"
"io"

"github.com/CalebQ42/squashfs/internal/decompress"
)

type Reader struct {
r io.Reader
d decompress.Decompressor
frag io.Reader
sizes []uint32
dat []byte
curOffset uint16
curIndex uint64
}

func NewReader(r io.Reader, d decompress.Decompressor, sizes []uint32) (*Reader, error) {
return &Reader{
r: r,
d: d,
sizes: sizes,
}, nil
}

func (r *Reader) AddFrag(fragRdr io.Reader) {
r.frag = fragRdr
}

func (r *Reader) advance() error {
r.curOffset = 0
defer func() { r.curIndex++ }()
var err error
if r.curIndex == uint64(len(r.sizes))-1 && r.frag != nil {
r.dat, err = io.ReadAll(r.frag)
return err
} else if r.curIndex >= uint64(len(r.sizes))-1 {
return io.EOF
}
realSize := r.sizes[r.curIndex] &^ 0x8000
r.dat = make([]byte, realSize)
err = binary.Read(r.r, binary.LittleEndian, &r.dat)
if err != nil {
return err
}
if r.sizes[r.curIndex] != realSize {
return nil
}
r.dat, err = r.d.Decompress(r.dat)
return err
}

func (r *Reader) Read(b []byte) (int, error) {
curRead := 0
var toRead int
for curRead < len(b) {
if r.curOffset >= uint16(len(r.dat)) {
if err := r.advance(); err != nil {
return curRead, err
}
}
toRead = len(b) - curRead
if toRead > len(r.dat)-int(r.curOffset) {
toRead = len(r.dat) - int(r.curOffset)
}
copy(b[curRead:], r.dat[r.curOffset:int(r.curOffset)+toRead])
r.curOffset += uint16(toRead)
curRead += toRead
}
return curRead, nil
}
5 changes: 5 additions & 0 deletions internal/decompress/decompress.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
package decompress

type Decompressor interface {
Decompress([]byte) ([]byte, error)
}
15 changes: 15 additions & 0 deletions internal/decompress/lz4.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
package decompress

import (
"bytes"
"io"

"github.com/pierrec/lz4/v4"
)

type Lz4 struct{}

func (l Lz4) Decompress(data []byte) ([]byte, error) {
rdr := lz4.NewReader(bytes.NewReader(data))
return io.ReadAll(rdr)
}
18 changes: 18 additions & 0 deletions internal/decompress/lzma.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
package decompress

import (
"bytes"
"io"

"github.com/ulikunitz/xz/lzma"
)

type Lzma struct{}

func (l Lzma) Decompress(data []byte) ([]byte, error) {
rdr, err := lzma.NewReader(bytes.NewReader(data))
if err != nil {
return nil, err
}
return io.ReadAll(rdr)
}
13 changes: 13 additions & 0 deletions internal/decompress/lzo.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
package decompress

import (
"bytes"

"github.com/rasky/go-lzo"
)

type Lzo struct{}

func (l Lzo) Decompress(data []byte) ([]byte, error) {
return lzo.Decompress1X(bytes.NewReader(data), len(data), 0)
}
18 changes: 18 additions & 0 deletions internal/decompress/xz.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
package decompress

import (
"bytes"
"io"

"github.com/therootcompany/xz"
)

type Xz struct{}

func (x Xz) Decompress(data []byte) ([]byte, error) {
rdr, err := xz.NewReader(bytes.NewReader(data), 0)
if err != nil {
return nil, err
}
return io.ReadAll(rdr)
}
18 changes: 18 additions & 0 deletions internal/decompress/zlib.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
package decompress

import (
"bytes"
"compress/zlib"
"io"
)

type Zlib struct{}

func (z Zlib) Decompress(data []byte) ([]byte, error) {
rdr, err := zlib.NewReader(bytes.NewReader(data))
if err != nil {
return nil, err
}
defer rdr.Close()
return io.ReadAll(rdr)
}
19 changes: 19 additions & 0 deletions internal/decompress/zstd.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
package decompress

import (
"bytes"
"io"

"github.com/klauspost/compress/zstd"
)

type Zstd struct{}

func (z Zstd) Decompress(data []byte) ([]byte, error) {
rdr, err := zstd.NewReader(bytes.NewReader(data))
if err != nil {
return nil, err
}
defer rdr.Close()
return io.ReadAll(rdr)
}
62 changes: 62 additions & 0 deletions internal/metadata/reader.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
package metadata

import (
"encoding/binary"
"io"

"github.com/CalebQ42/squashfs/internal/decompress"
)

type Reader struct {
r io.Reader
d decompress.Decompressor
dat []byte
curOffset uint16
}

func NewReader(r io.Reader, d decompress.Decompressor) *Reader {
return &Reader{
r: r,
d: d,
}
}

func (r *Reader) advance() error {
r.curOffset = 0
var size uint16
err := binary.Read(r.r, binary.LittleEndian, &size)
if err != nil {
return err
}
realSize := size &^ 0x8000
r.dat = make([]byte, realSize)
err = binary.Read(r.r, binary.LittleEndian, &r.dat)
if err != nil {
return err
}
if size != realSize {
return nil
}
r.dat, err = r.d.Decompress(r.dat)
return err
}

func (r *Reader) Read(b []byte) (int, error) {
curRead := 0
var toRead int
for curRead < len(b) {
if r.curOffset >= uint16(len(r.dat)) {
if err := r.advance(); err != nil {
return curRead, err
}
}
toRead = len(b) - curRead
if toRead > len(r.dat)-int(r.curOffset) {
toRead = len(r.dat) - int(r.curOffset)
}
copy(b[curRead:], r.dat[r.curOffset:int(r.curOffset)+toRead])
r.curOffset += uint16(toRead)
curRead += toRead
}
return curRead, nil
}
21 changes: 21 additions & 0 deletions internal/toreader/toreader.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
package toreader

import "io"

type Reader struct {
r io.ReaderAt
offset int64
}

func NewReader(r io.ReaderAt, start int64) *Reader {
return &Reader{
r: r,
offset: start,
}
}

func (r *Reader) Read(b []byte) (int, error) {
n, err := r.r.ReadAt(b, r.offset)
r.offset += int64(n)
return n, err
}
3 changes: 3 additions & 0 deletions squashfs/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Lower-Level Squashfs

This library is a lower level version of the main [squashfs](https://github.com/CalebQ42/squashfs) library that doesn't try to be easy to use and exposes a lot of information that is not necesary for must use cases.
7 changes: 7 additions & 0 deletions squashfs/fragment.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
package squashfs

type fragEntry struct {
start uint64
size uint32
_ uint32
}
17 changes: 17 additions & 0 deletions squashfs/inode.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
package squashfs

import (
"github.com/CalebQ42/squashfs/internal/metadata"
"github.com/CalebQ42/squashfs/internal/toreader"
"github.com/CalebQ42/squashfs/squashfs/inode"
)

func (r *Reader) inodeFromRef(ref uint64) (*inode.Inode, error) {
offset, meta := (ref>>16)+r.sup.InodeTableStart, ref&0xFFFF
rdr := metadata.NewReader(toreader.NewReader(r.r, int64(offset)), r.d)
_, err := rdr.Read(make([]byte, meta))
if err != nil {
return nil, err
}
return inode.Read(rdr, r.sup.BlockSize)
}
Loading

0 comments on commit 707391b

Please sign in to comment.