Compare commits

...

11 commits

Author SHA1 Message Date
Evan Su
b8d21a84ac
Merge bbf250be37 into f0bfe3ba03 2025-04-18 21:16:06 +00:00
Evan Su
bbf250be37 Update default.yml
Some checks failed
CodeQL / Analyze (push) Has been cancelled
2025-04-18 17:15:58 -04:00
Evan Su
268055804e two os.Removes conflict, allow them to err
deniability and split
2025-04-18 17:03:09 -04:00
Evan Su
0cc109bc6b handle more errors 2025-04-18 16:48:29 -04:00
Evan Su
b0c2943bb0 handle more errors for keyfile code 2025-04-18 16:29:42 -04:00
Evan Su
1b55200b84 handle more errors in deniability decrypt block 2025-04-18 16:13:12 -04:00
Evan Su
d0e4e71b97 handle more errors in recombine code 2025-04-18 15:59:04 -04:00
Evan Su
7a28e2b273 handle more errors in temp zip code 2025-04-18 15:54:26 -04:00
Evan Su
22a1118f01 catch temporary encrypted zip errors 2025-04-18 15:42:42 -04:00
Evan Su
ca0a74f99d handle more errors 2025-04-18 15:33:17 -04:00
Evan Su
a0a7f430e4 missing an err != nil check 2025-04-18 15:04:28 -04:00
2 changed files with 434 additions and 120 deletions

View file

@ -33,6 +33,14 @@ body:
attributes: attributes:
value: | value: |
Usually these issues are not directly caused by Picocrypt's code. If you're on Windows, see [here](https://github.com/Picocrypt/Picocrypt/issues/91). If you're on Linux, install some packages and try again (see [here](https://github.com/Picocrypt/Picocrypt/tree/main/src#1-prerequisites)). Picocrypt only targets Windows 11, Ubuntu 24/Debian 12, and macOS 15 or later, so *do not create an issue if your OS is older than those; that is your problem, not mine*. If none of the points above help, create the issue and in a separate comment, provide details about the environment you're running in (like OS, DE, etc.). **Do not ping me initially.** Let the issue sit for at least *5 days* to allow other users to potentially help you resolve the issue. If after 5 days, you haven't figured things out, then you may ping me (@HACKERALERT). Usually these issues are not directly caused by Picocrypt's code. If you're on Windows, see [here](https://github.com/Picocrypt/Picocrypt/issues/91). If you're on Linux, install some packages and try again (see [here](https://github.com/Picocrypt/Picocrypt/tree/main/src#1-prerequisites)). Picocrypt only targets Windows 11, Ubuntu 24/Debian 12, and macOS 15 or later, so *do not create an issue if your OS is older than those; that is your problem, not mine*. If none of the points above help, create the issue and in a separate comment, provide details about the environment you're running in (like OS, DE, etc.). **Do not ping me initially.** Let the issue sit for at least *5 days* to allow other users to potentially help you resolve the issue. If after 5 days, you haven't figured things out, then you may ping me (@HACKERALERT).
- type: markdown
attributes:
value: |
### Picocrypt is crashing
- type: markdown
attributes:
value: |
This is almost always caused by input/output files being in locations where you don't have the correct read/write permissions. Try working within your user/home folder only and copy to/from other places to see if that resolves the crash. If not, run Picocrypt from the command line (e.g. `Picocrypt.exe` or `./Picocrypt`) so you can read the crash message. If you still can't fix the crash, create an issue and ping me (@HACKERALERT).
- type: markdown - type: markdown
attributes: attributes:
value: | value: |

View file

@ -131,7 +131,6 @@ var startLabel = "Start"
var mainStatus = "Ready" var mainStatus = "Ready"
var mainStatusColor = WHITE var mainStatusColor = WHITE
var popupStatus string var popupStatus string
var usingTempZip bool
var requiredFreeSpace int64 var requiredFreeSpace int64
// Progress variables // Progress variables
@ -605,7 +604,7 @@ func draw() {
panic(errors.New("fatal crypto/rand error")) panic(errors.New("fatal crypto/rand error"))
} }
n, err := fout.Write(data) n, err := fout.Write(data)
if n != 32 { if err != nil || n != 32 {
fout.Close() fout.Close()
panic(errors.New("failed to write full keyfile")) panic(errors.New("failed to write full keyfile"))
} }
@ -787,6 +786,7 @@ func draw() {
outputFile = file outputFile = file
mainStatus = "Ready" mainStatus = "Ready"
mainStatusColor = WHITE mainStatusColor = WHITE
giu.Update()
}).Build() }).Build()
giu.Tooltip("Save the output with a custom name and path").Build() giu.Tooltip("Save the output with a custom name and path").Build()
}), }),
@ -855,7 +855,7 @@ func onDrop(names []string) {
duplicate = true duplicate = true
} }
} }
stat, _ := os.Stat(i) stat, statErr := os.Stat(i)
fin, err := os.Open(i) fin, err := os.Open(i)
if err == nil { if err == nil {
fin.Close() fin.Close()
@ -866,7 +866,7 @@ func onDrop(names []string) {
giu.Update() giu.Update()
return return
} }
if !duplicate && !stat.IsDir() { if !duplicate && statErr == nil && !stat.IsDir() {
tmp = append(tmp, i) tmp = append(tmp, i)
} }
} }
@ -897,6 +897,7 @@ func onDrop(names []string) {
if err != nil { if err != nil {
mainStatus = "Failed to stat dropped item" mainStatus = "Failed to stat dropped item"
mainStatusColor = RED mainStatusColor = RED
giu.Update()
return return
} }
@ -965,27 +966,51 @@ func onDrop(names []string) {
if err != nil { if err != nil {
resetUI() resetUI()
accessDenied("Read") accessDenied("Read")
giu.Update()
return return
} }
// Check if version can be read from header // Check if version can be read from header
tmp := make([]byte, 15) tmp := make([]byte, 15)
fin.Read(tmp) if n, err := fin.Read(tmp); err != nil || n != 15 {
fin.Close()
mainStatus = "Failed to read 15 bytes from file"
mainStatusColor = RED
giu.Update()
return
}
tmp, err = rsDecode(rs5, tmp) tmp, err = rsDecode(rs5, tmp)
if valid, _ := regexp.Match(`^v\d\.\d{2}`, tmp); !valid || err != nil { if valid, _ := regexp.Match(`^v\d\.\d{2}`, tmp); err != nil || !valid {
// Volume has plausible deniability // Volume has plausible deniability
deniability = true deniability = true
mainStatus = "Can't read header, assuming volume is deniable" mainStatus = "Can't read header, assuming volume is deniable"
fin.Close() fin.Close()
giu.Update()
} else { } else {
// Read comments from file and check for corruption // Read comments from file and check for corruption
tmp = make([]byte, 15) tmp = make([]byte, 15)
fin.Read(tmp) if n, err := fin.Read(tmp); err != nil || n != 15 {
fin.Close()
mainStatus = "Failed to read 15 bytes from file"
mainStatusColor = RED
giu.Update()
return
}
tmp, err = rsDecode(rs5, tmp) tmp, err = rsDecode(rs5, tmp)
if err == nil { if err == nil {
commentsLength, _ := strconv.Atoi(string(tmp)) commentsLength, err := strconv.Atoi(string(tmp))
if err != nil {
comments = "Comment length is corrupted"
giu.Update()
} else {
tmp = make([]byte, commentsLength*3) tmp = make([]byte, commentsLength*3)
fin.Read(tmp) if n, err := fin.Read(tmp); err != nil || n != commentsLength*3 {
fin.Close()
mainStatus = "Failed to read comments from file"
mainStatusColor = RED
giu.Update()
return
}
comments = "" comments = ""
for i := 0; i < commentsLength*3; i += 3 { for i := 0; i < commentsLength*3; i += 3 {
t, err := rsDecode(rs1, tmp[i:i+3]) t, err := rsDecode(rs1, tmp[i:i+3])
@ -995,18 +1020,30 @@ func onDrop(names []string) {
} }
comments += string(t) comments += string(t)
} }
giu.Update()
}
} else { } else {
comments = "Comments are corrupted" comments = "Comments are corrupted"
giu.Update()
} }
// Read flags from file and check for corruption // Read flags from file and check for corruption
flags := make([]byte, 15) flags := make([]byte, 15)
fin.Read(flags) if n, err := fin.Read(flags); err != nil || n != 15 {
fin.Close() fin.Close()
mainStatus = "Failed to read 15 bytes from file"
mainStatusColor = RED
giu.Update()
return
}
if err := fin.Close(); err != nil {
panic(err)
}
flags, err = rsDecode(rs5, flags) flags, err = rsDecode(rs5, flags)
if err != nil { if err != nil {
mainStatus = "The volume header is damaged" mainStatus = "The volume header is damaged"
mainStatusColor = RED mainStatusColor = RED
giu.Update()
return return
} }
@ -1020,6 +1057,7 @@ func onDrop(names []string) {
if flags[2] == 1 { if flags[2] == 1 {
keyfileOrdered = true keyfileOrdered = true
} }
giu.Update()
} }
} else { // One file was dropped for encryption } else { // One file was dropped for encryption
mode = "encrypt" mode = "encrypt"
@ -1027,6 +1065,7 @@ func onDrop(names []string) {
startLabel = "Encrypt" startLabel = "Encrypt"
inputFile = names[0] inputFile = names[0]
outputFile = names[0] + ".pcv" outputFile = names[0] + ".pcv"
giu.Update()
} }
// Add the file // Add the file
@ -1035,6 +1074,7 @@ func onDrop(names []string) {
if !isSplit { if !isSplit {
compressTotal += stat.Size() compressTotal += stat.Size()
} }
giu.Update()
} }
} else { // There are multiple dropped items } else { // There are multiple dropped items
mode = "encrypt" mode = "encrypt"
@ -1042,7 +1082,14 @@ func onDrop(names []string) {
// Go through each dropped item and add to corresponding slices // Go through each dropped item and add to corresponding slices
for _, name := range names { for _, name := range names {
stat, _ := os.Stat(name) stat, err := os.Stat(name)
if err != nil {
resetUI()
mainStatus = "Failed to stat dropped items"
mainStatusColor = RED
giu.Update()
return
}
if stat.IsDir() { if stat.IsDir() {
folders++ folders++
onlyFolders = append(onlyFolders, name) onlyFolders = append(onlyFolders, name)
@ -1078,17 +1125,31 @@ func onDrop(names []string) {
// Set the input and output paths // Set the input and output paths
inputFile = filepath.Join(filepath.Dir(names[0]), "encrypted-"+strconv.Itoa(int(time.Now().Unix()))) + ".zip" inputFile = filepath.Join(filepath.Dir(names[0]), "encrypted-"+strconv.Itoa(int(time.Now().Unix()))) + ".zip"
outputFile = inputFile + ".pcv" outputFile = inputFile + ".pcv"
usingTempZip = true giu.Update()
} }
// Recursively add all files in 'onlyFolders' to 'allFiles' // Recursively add all files in 'onlyFolders' to 'allFiles'
go func() { go func() {
oldInputLabel := inputLabel oldInputLabel := inputLabel
for _, name := range onlyFolders { for _, name := range onlyFolders {
filepath.Walk(name, func(path string, _ os.FileInfo, _ error) error { if filepath.Walk(name, func(path string, _ os.FileInfo, err error) error {
if err != nil {
resetUI()
mainStatus = "Failed to walk through dropped items"
mainStatusColor = RED
giu.Update()
return err
}
stat, err := os.Stat(path) stat, err := os.Stat(path)
if err != nil {
resetUI()
mainStatus = "Failed to walk through dropped items"
mainStatusColor = RED
giu.Update()
return err
}
// If 'path' is a valid file path, add to 'allFiles' // If 'path' is a valid file path, add to 'allFiles'
if err == nil && !stat.IsDir() { if !stat.IsDir() {
allFiles = append(allFiles, path) allFiles = append(allFiles, path)
compressTotal += stat.Size() compressTotal += stat.Size()
requiredFreeSpace += stat.Size() requiredFreeSpace += stat.Size()
@ -1096,7 +1157,13 @@ func onDrop(names []string) {
giu.Update() giu.Update()
} }
return nil return nil
}) }) != nil {
resetUI()
mainStatus = "Failed to walk through dropped items"
mainStatusColor = RED
giu.Update()
return
}
} }
inputLabel = fmt.Sprintf("%s (%s)", oldInputLabel, sizeify(compressTotal)) inputLabel = fmt.Sprintf("%s (%s)", oldInputLabel, sizeify(compressTotal))
scanning = false scanning = false
@ -1127,12 +1194,24 @@ func work() {
var tempZipCipherW *chacha20.Cipher var tempZipCipherW *chacha20.Cipher
var tempZipCipherR *chacha20.Cipher var tempZipCipherR *chacha20.Cipher
var tempZipInUse bool = false var tempZipInUse bool = false
func() { func() { // enclose to keep out of parent scope
key, nonce := make([]byte, 32), make([]byte, 12) key, nonce := make([]byte, 32), make([]byte, 12)
rand.Read(key) if n, err := rand.Read(key); err != nil || n != 32 {
rand.Read(nonce) panic(errors.New("fatal crypto/rand error"))
tempZipCipherW, _ = chacha20.NewUnauthenticatedCipher(key, nonce) }
tempZipCipherR, _ = chacha20.NewUnauthenticatedCipher(key, nonce) if n, err := rand.Read(nonce); err != nil || n != 12 {
panic(errors.New("fatal crypto/rand error"))
}
if bytes.Equal(key, make([]byte, 32)) || bytes.Equal(nonce, make([]byte, 12)) {
panic(errors.New("fatal crypto/rand error")) // this should never happen but be safe
}
var errW error
var errR error
tempZipCipherW, errW = chacha20.NewUnauthenticatedCipher(key, nonce)
tempZipCipherR, errR = chacha20.NewUnauthenticatedCipher(key, nonce)
if errW != nil || errR != nil {
panic(errors.New("fatal chacha20 init error"))
}
}() }()
// Combine/compress all files into a .zip file if needed // Combine/compress all files into a .zip file if needed
@ -1174,9 +1253,24 @@ func work() {
// Create file info header (size, last modified, etc.) // Create file info header (size, last modified, etc.)
stat, err := os.Stat(path) stat, err := os.Stat(path)
if err != nil { if err != nil {
continue // Skip temporary and inaccessible files writer.Close()
file.Close()
os.Remove(inputFile)
resetUI()
mainStatus = "Failed to stat input files"
mainStatusColor = RED
return
}
header, err := zip.FileInfoHeader(stat)
if err != nil {
writer.Close()
file.Close()
os.Remove(inputFile)
resetUI()
mainStatus = "Failed to create zip.FileInfoHeader"
mainStatusColor = RED
return
} }
header, _ := zip.FileInfoHeader(stat)
header.Name = strings.TrimPrefix(path, rootDir) header.Name = strings.TrimPrefix(path, rootDir)
header.Name = filepath.ToSlash(header.Name) header.Name = filepath.ToSlash(header.Name)
header.Name = strings.TrimPrefix(header.Name, "/") header.Name = strings.TrimPrefix(header.Name, "/")
@ -1188,7 +1282,16 @@ func work() {
} }
// Open the file for reading // Open the file for reading
entry, _ := writer.CreateHeader(header) entry, err := writer.CreateHeader(header)
if err != nil {
writer.Close()
file.Close()
os.Remove(inputFile)
resetUI()
mainStatus = "Failed to writer.CreateHeader"
mainStatusColor = RED
return
}
fin, err := os.Open(path) fin, err := os.Open(path)
if err != nil { if err != nil {
writer.Close() writer.Close()
@ -1219,8 +1322,12 @@ func work() {
return return
} }
} }
writer.Close() if err := writer.Close(); err != nil {
file.Close() panic(err)
}
if err := file.Close(); err != nil {
panic(err)
}
} }
// Recombine a split file if necessary // Recombine a split file if necessary
@ -1256,7 +1363,7 @@ func work() {
// Merge all chunks into one file // Merge all chunks into one file
startTime := time.Now() startTime := time.Now()
for i := 0; i < totalFiles; i++ { for i := range totalFiles {
fin, err := os.Open(fmt.Sprintf("%s.%d", inputFile, i)) fin, err := os.Open(fmt.Sprintf("%s.%d", inputFile, i))
if err != nil { if err != nil {
fout.Close() fout.Close()
@ -1280,10 +1387,11 @@ func work() {
break break
} }
data = data[:read] data = data[:read]
_, err = fout.Write(data) var n int
n, err = fout.Write(data)
done += read done += read
if err != nil { if err != nil || n != len(data) {
insufficientSpace(fin, fout) insufficientSpace(fin, fout)
os.Remove(outputFile + ".pcv") os.Remove(outputFile + ".pcv")
return return
@ -1295,9 +1403,13 @@ func work() {
popupStatus = fmt.Sprintf("Recombining at %.2f MiB/s (ETA: %s)", speed, eta) popupStatus = fmt.Sprintf("Recombining at %.2f MiB/s (ETA: %s)", speed, eta)
giu.Update() giu.Update()
} }
fin.Close() if err := fin.Close(); err != nil {
panic(err)
}
}
if err := fout.Close(); err != nil {
panic(err)
} }
fout.Close()
inputFileOld = inputFile inputFileOld = inputFile
inputFile = outputFile + ".pcv" inputFile = outputFile + ".pcv"
} }
@ -1311,26 +1423,44 @@ func work() {
giu.Update() giu.Update()
// Get size of volume for showing progress // Get size of volume for showing progress
stat, _ := os.Stat(inputFile) stat, err := os.Stat(inputFile)
if err != nil {
// we already read from inputFile successfully in onDrop
// so it is very unlikely this err != nil, we can just panic
panic(err)
}
total := stat.Size() total := stat.Size()
// Rename input volume to free up the filename // Rename input volume to free up the filename
fin, _ := os.Open(inputFile) fin, err := os.Open(inputFile)
if err != nil {
panic(err)
}
for strings.HasSuffix(inputFile, ".tmp") { for strings.HasSuffix(inputFile, ".tmp") {
inputFile = strings.TrimSuffix(inputFile, ".tmp") inputFile = strings.TrimSuffix(inputFile, ".tmp")
} }
inputFile += ".tmp" inputFile += ".tmp"
fout, _ := os.Create(inputFile) fout, err := os.Create(inputFile)
if err != nil {
panic(err)
}
// Get the Argon2 salt and XChaCha20 nonce from input volume // Get the Argon2 salt and XChaCha20 nonce from input volume
salt := make([]byte, 16) salt := make([]byte, 16)
nonce := make([]byte, 24) nonce := make([]byte, 24)
fin.Read(salt) if n, err := fin.Read(salt); err != nil || n != 16 {
fin.Read(nonce) panic(errors.New("failed to read 16 bytes from file"))
}
if n, err := fin.Read(nonce); err != nil || n != 24 {
panic(errors.New("failed to read 24 bytes from file"))
}
// Generate key and XChaCha20 // Generate key and XChaCha20
key := argon2.IDKey([]byte(password), salt, 4, 1<<20, 4, 32) key := argon2.IDKey([]byte(password), salt, 4, 1<<20, 4, 32)
chacha, _ := chacha20.NewUnauthenticatedCipher(key, nonce) chacha, err := chacha20.NewUnauthenticatedCipher(key, nonce)
if err != nil {
panic(err)
}
// Decrypt the entire volume // Decrypt the entire volume
done, counter := 0, 0 done, counter := 0, 0
@ -1343,7 +1473,11 @@ func work() {
src = src[:size] src = src[:size]
dst := make([]byte, len(src)) dst := make([]byte, len(src))
chacha.XORKeyStream(dst, src) chacha.XORKeyStream(dst, src)
fout.Write(dst) if n, err := fout.Write(dst); err != nil || n != len(dst) {
fout.Close()
os.Remove(fout.Name())
panic(errors.New("failed to write dst"))
}
// Update stats // Update stats
done += size done += size
@ -1354,23 +1488,39 @@ func work() {
// Change nonce after 60 GiB to prevent overflow // Change nonce after 60 GiB to prevent overflow
if counter >= 60*GiB { if counter >= 60*GiB {
tmp := sha3.New256() tmp := sha3.New256()
tmp.Write(nonce) if n, err := tmp.Write(nonce); err != nil || n != len(nonce) {
panic(errors.New("failed to write nonce to tmp during rekeying"))
}
nonce = tmp.Sum(nil)[:24] nonce = tmp.Sum(nil)[:24]
chacha, _ = chacha20.NewUnauthenticatedCipher(key, nonce) chacha, err = chacha20.NewUnauthenticatedCipher(key, nonce)
if err != nil {
panic(err)
}
counter = 0 counter = 0
} }
} }
fin.Close() if err := fin.Close(); err != nil {
fout.Close() panic(err)
}
if err := fout.Close(); err != nil {
panic(err)
}
// Check if the version can be read from the volume // Check if the version can be read from the volume
fin, _ = os.Open(inputFile) fin, err = os.Open(inputFile)
if err != nil {
panic(err)
}
tmp := make([]byte, 15) tmp := make([]byte, 15)
fin.Read(tmp) if n, err := fin.Read(tmp); err != nil || n != 15 {
fin.Close() panic(errors.New("failed to read 15 bytes from file"))
tmp, err := rsDecode(rs5, tmp) }
if valid, _ := regexp.Match(`^v1\.\d{2}`, tmp); !valid || err != nil { if err := fin.Close(); err != nil {
panic(err)
}
tmp, err = rsDecode(rs5, tmp)
if valid, _ := regexp.Match(`^v1\.\d{2}`, tmp); err != nil || !valid {
os.Remove(inputFile) os.Remove(inputFile)
inputFile = strings.TrimSuffix(inputFile, ".tmp") inputFile = strings.TrimSuffix(inputFile, ".tmp")
broken(nil, nil, "Password is incorrect or the file is not a volume", true) broken(nil, nil, "Password is incorrect or the file is not a volume", true)
@ -1387,7 +1537,12 @@ func work() {
giu.Update() giu.Update()
// Subtract the header size from the total size if decrypting // Subtract the header size from the total size if decrypting
stat, _ := os.Stat(inputFile) stat, err := os.Stat(inputFile)
if err != nil {
resetUI()
accessDenied("Read")
return
}
total := stat.Size() total := stat.Size()
if mode == "decrypt" { if mode == "decrypt" {
total -= 789 total -= 789
@ -1492,6 +1647,18 @@ func work() {
if _, err := rand.Read(nonce); err != nil { if _, err := rand.Read(nonce); err != nil {
panic(err) panic(err)
} }
if bytes.Equal(salt, make([]byte, 16)) {
panic(errors.New("fatal crypto/rand error"))
}
if bytes.Equal(hkdfSalt, make([]byte, 32)) {
panic(errors.New("fatal crypto/rand error"))
}
if bytes.Equal(serpentIV, make([]byte, 16)) {
panic(errors.New("fatal crypto/rand error"))
}
if bytes.Equal(nonce, make([]byte, 24)) {
panic(errors.New("fatal crypto/rand error"))
}
// Encode values with Reed-Solomon and write to file // Encode values with Reed-Solomon and write to file
_, errs[4] = fout.Write(rsEncode(rs16, salt)) _, errs[4] = fout.Write(rsEncode(rs16, salt))
@ -1614,6 +1781,9 @@ func work() {
32, 32,
) )
} }
if bytes.Equal(key, make([]byte, 32)) {
panic(errors.New("fatal crypto/argon2 error"))
}
// If keyfiles are being used // If keyfiles are being used
if len(keyfiles) > 0 || keyfile { if len(keyfiles) > 0 || keyfile {
@ -1622,7 +1792,10 @@ func work() {
var keyfileTotal int64 var keyfileTotal int64
for _, path := range keyfiles { for _, path := range keyfiles {
stat, _ := os.Stat(path) stat, err := os.Stat(path)
if err != nil {
panic(err) // we already checked os.Stat in onDrop
}
keyfileTotal += stat.Size() keyfileTotal += stat.Size()
} }
@ -1632,7 +1805,10 @@ func work() {
// For each keyfile... // For each keyfile...
for _, path := range keyfiles { for _, path := range keyfiles {
fin, _ := os.Open(path) fin, err := os.Open(path)
if err != nil {
panic(err)
}
for { // Read in chunks of 1 MiB for { // Read in chunks of 1 MiB
data := make([]byte, MiB) data := make([]byte, MiB)
size, err := fin.Read(data) size, err := fin.Read(data)
@ -1640,27 +1816,36 @@ func work() {
break break
} }
data = data[:size] data = data[:size]
tmp.Write(data) // Hash the data if _, err := tmp.Write(data); err != nil { // Hash the data
panic(err)
}
// Update progress // Update progress
keyfileDone += size keyfileDone += size
progress = float32(keyfileDone) / float32(keyfileTotal) progress = float32(keyfileDone) / float32(keyfileTotal)
giu.Update() giu.Update()
} }
fin.Close() if err := fin.Close(); err != nil {
panic(err)
}
} }
keyfileKey = tmp.Sum(nil) // Get the SHA3-256 keyfileKey = tmp.Sum(nil) // Get the SHA3-256
// Store a hash of 'keyfileKey' for comparison // Store a hash of 'keyfileKey' for comparison
tmp = sha3.New256() tmp = sha3.New256()
tmp.Write(keyfileKey) if _, err := tmp.Write(keyfileKey); err != nil {
panic(err)
}
keyfileHash = tmp.Sum(nil) keyfileHash = tmp.Sum(nil)
} else { // If order doesn't matter, hash individually and combine } else { // If order doesn't matter, hash individually and combine
var keyfileDone int var keyfileDone int
// For each keyfile... // For each keyfile...
for _, path := range keyfiles { for _, path := range keyfiles {
fin, _ := os.Open(path) fin, err := os.Open(path)
if err != nil {
panic(err)
}
tmp := sha3.New256() tmp := sha3.New256()
for { // Read in chunks of 1 MiB for { // Read in chunks of 1 MiB
data := make([]byte, MiB) data := make([]byte, MiB)
@ -1669,14 +1854,18 @@ func work() {
break break
} }
data = data[:size] data = data[:size]
tmp.Write(data) // Hash the data if _, err := tmp.Write(data); err != nil { // Hash the data
panic(err)
}
// Update progress // Update progress
keyfileDone += size keyfileDone += size
progress = float32(keyfileDone) / float32(keyfileTotal) progress = float32(keyfileDone) / float32(keyfileTotal)
giu.Update() giu.Update()
} }
fin.Close() if err := fin.Close(); err != nil {
panic(err)
}
sum := tmp.Sum(nil) // Get the SHA3-256 sum := tmp.Sum(nil) // Get the SHA3-256
@ -1692,7 +1881,9 @@ func work() {
// Store a hash of 'keyfileKey' for comparison // Store a hash of 'keyfileKey' for comparison
tmp := sha3.New256() tmp := sha3.New256()
tmp.Write(keyfileKey) if _, err := tmp.Write(keyfileKey); err != nil {
panic(err)
}
keyfileHash = tmp.Sum(nil) keyfileHash = tmp.Sum(nil)
} }
} }
@ -1702,7 +1893,9 @@ func work() {
// Hash the encryption key for comparison when decrypting // Hash the encryption key for comparison when decrypting
tmp := sha3.New512() tmp := sha3.New512()
tmp.Write(key) if _, err := tmp.Write(key); err != nil {
panic(err)
}
keyHash = tmp.Sum(nil) keyHash = tmp.Sum(nil)
// Validate the password and/or keyfiles // Validate the password and/or keyfiles
@ -1776,23 +1969,36 @@ func work() {
} }
done, counter := 0, 0 done, counter := 0, 0
chacha, _ := chacha20.NewUnauthenticatedCipher(key, nonce) chacha, err := chacha20.NewUnauthenticatedCipher(key, nonce)
if err != nil {
panic(err)
}
// Use HKDF-SHA3 to generate a subkey for the MAC // Use HKDF-SHA3 to generate a subkey for the MAC
var mac hash.Hash var mac hash.Hash
subkey := make([]byte, 32) subkey := make([]byte, 32)
hkdf := hkdf.New(sha3.New256, key, hkdfSalt, nil) hkdf := hkdf.New(sha3.New256, key, hkdfSalt, nil)
hkdf.Read(subkey) if n, err := hkdf.Read(subkey); err != nil || n != 32 {
panic(errors.New("fatal hkdf.Read error"))
}
if paranoid { if paranoid {
mac = hmac.New(sha3.New512, subkey) // HMAC-SHA3 mac = hmac.New(sha3.New512, subkey) // HMAC-SHA3
} else { } else {
mac, _ = blake2b.New512(subkey) // Keyed BLAKE2b mac, err = blake2b.New512(subkey) // Keyed BLAKE2b
if err != nil {
panic(err)
}
} }
// Generate another subkey for use as Serpent's key // Generate another subkey for use as Serpent's key
serpentKey := make([]byte, 32) serpentKey := make([]byte, 32)
hkdf.Read(serpentKey) if n, err := hkdf.Read(serpentKey); err != nil || n != 32 {
s, _ := serpent.NewCipher(serpentKey) panic(errors.New("fatal hkdf.Read error"))
}
s, err := serpent.NewCipher(serpentKey)
if err != nil {
panic(err)
}
serpent := cipher.NewCTR(s, serpentIV) serpent := cipher.NewCTR(s, serpentIV)
// Start the main encryption process // Start the main encryption process
@ -1840,7 +2046,9 @@ func work() {
} }
chacha.XORKeyStream(dst, src) chacha.XORKeyStream(dst, src)
mac.Write(dst) if _, err := mac.Write(dst); err != nil {
panic(err)
}
if reedsolo { if reedsolo {
copy(src, dst) copy(src, dst)
@ -1894,7 +2102,7 @@ func work() {
} else { } else {
// Decode the full chunks // Decode the full chunks
chunks := len(dst)/136 - 1 chunks := len(dst)/136 - 1
for i := 0; i < chunks; i++ { for i := range chunks {
tmp, err := rsDecode(rs128, dst[i*136:(i+1)*136]) tmp, err := rsDecode(rs128, dst[i*136:(i+1)*136])
if err != nil { if err != nil {
if keep { if keep {
@ -1929,7 +2137,9 @@ func work() {
dst = make([]byte, len(src)) dst = make([]byte, len(src))
} }
mac.Write(src) if _, err := mac.Write(src); err != nil {
panic(err)
}
chacha.XORKeyStream(dst, src) chacha.XORKeyStream(dst, src)
if paranoid { if paranoid {
@ -1971,12 +2181,19 @@ func work() {
if counter >= 60*GiB { if counter >= 60*GiB {
// ChaCha20 // ChaCha20
nonce = make([]byte, 24) nonce = make([]byte, 24)
hkdf.Read(nonce) if n, err := hkdf.Read(nonce); err != nil || n != 24 {
chacha, _ = chacha20.NewUnauthenticatedCipher(key, nonce) panic(errors.New("fatal hkdf.Read error"))
}
chacha, err = chacha20.NewUnauthenticatedCipher(key, nonce)
if err != nil {
panic(err)
}
// Serpent // Serpent
serpentIV = make([]byte, 16) serpentIV = make([]byte, 16)
hkdf.Read(serpentIV) if n, err := hkdf.Read(serpentIV); err != nil || n != 16 {
panic(errors.New("fatal hkdf.Read error"))
}
serpent = cipher.NewCTR(s, serpentIV) serpent = cipher.NewCTR(s, serpentIV)
// Reset counter to 0 // Reset counter to 0
@ -1993,10 +2210,18 @@ func work() {
giu.Update() giu.Update()
// Seek back to header and write important values // Seek back to header and write important values
fout.Seek(int64(309+len(comments)*3), 0) if _, err := fout.Seek(int64(309+len(comments)*3), 0); err != nil {
fout.Write(rsEncode(rs64, keyHash)) panic(err)
fout.Write(rsEncode(rs32, keyfileHash)) }
fout.Write(rsEncode(rs64, mac.Sum(nil))) if _, err := fout.Write(rsEncode(rs64, keyHash)); err != nil {
panic(err)
}
if _, err := fout.Write(rsEncode(rs32, keyfileHash)); err != nil {
panic(err)
}
if _, err := fout.Write(rsEncode(rs64, mac.Sum(nil))); err != nil {
panic(err)
}
} else { } else {
popupStatus = "Comparing values..." popupStatus = "Comparing values..."
giu.Update() giu.Update()
@ -2021,10 +2246,16 @@ func work() {
} }
} }
fin.Close() if err := fin.Close(); err != nil {
fout.Close() panic(err)
}
if err := fout.Close(); err != nil {
panic(err)
}
os.Rename(outputFile+".incomplete", outputFile) if err := os.Rename(outputFile+".incomplete", outputFile); err != nil {
panic(err)
}
// Add plausible deniability // Add plausible deniability
if mode == "encrypt" && deniability { if mode == "encrypt" && deniability {
@ -2033,29 +2264,51 @@ func work() {
giu.Update() giu.Update()
// Get size of volume for showing progress // Get size of volume for showing progress
stat, _ := os.Stat(outputFile) stat, err := os.Stat(outputFile)
if err != nil {
panic(err)
}
total := stat.Size() total := stat.Size()
// Rename the output volume to free up the filename // Rename the output volume to free up the filename
os.Rename(outputFile, outputFile+".tmp") os.Rename(outputFile, outputFile+".tmp")
fin, _ := os.Open(outputFile + ".tmp") fin, err := os.Open(outputFile + ".tmp")
fout, _ := os.Create(outputFile + ".incomplete") if err != nil {
panic(err)
}
fout, err := os.Create(outputFile + ".incomplete")
if err != nil {
panic(err)
}
// Use a random Argon2 salt and XChaCha20 nonce // Use a random Argon2 salt and XChaCha20 nonce
salt := make([]byte, 16) salt := make([]byte, 16)
nonce := make([]byte, 24) nonce := make([]byte, 24)
if _, err := rand.Read(salt); err != nil { if n, err := rand.Read(salt); err != nil || n != 16 {
panic(errors.New("fatal crypto/rand error"))
}
if n, err := rand.Read(nonce); err != nil || n != 24 {
panic(errors.New("fatal crypto/rand error"))
}
if bytes.Equal(salt, make([]byte, 16)) || bytes.Equal(nonce, make([]byte, 24)) {
panic(errors.New("fatal crypto/rand error"))
}
if _, err := fout.Write(salt); err != nil {
panic(err) panic(err)
} }
if _, err := rand.Read(nonce); err != nil { if _, err := fout.Write(nonce); err != nil {
panic(err) panic(err)
} }
fout.Write(salt)
fout.Write(nonce)
// Generate key and XChaCha20 // Generate key and XChaCha20
key := argon2.IDKey([]byte(password), salt, 4, 1<<20, 4, 32) key := argon2.IDKey([]byte(password), salt, 4, 1<<20, 4, 32)
chacha, _ := chacha20.NewUnauthenticatedCipher(key, nonce) if bytes.Equal(key, make([]byte, 32)) {
panic(errors.New("fatal crypto/argon2 error"))
}
chacha, err := chacha20.NewUnauthenticatedCipher(key, nonce)
if err != nil {
panic(err)
}
// Encrypt the entire volume // Encrypt the entire volume
done, counter := 0, 0 done, counter := 0, 0
@ -2068,7 +2321,9 @@ func work() {
src = src[:size] src = src[:size]
dst := make([]byte, len(src)) dst := make([]byte, len(src))
chacha.XORKeyStream(dst, src) chacha.XORKeyStream(dst, src)
fout.Write(dst) if _, err := fout.Write(dst); err != nil {
panic(err)
}
// Update stats // Update stats
done += size done += size
@ -2079,17 +2334,30 @@ func work() {
// Change nonce after 60 GiB to prevent overflow // Change nonce after 60 GiB to prevent overflow
if counter >= 60*GiB { if counter >= 60*GiB {
tmp := sha3.New256() tmp := sha3.New256()
tmp.Write(nonce) if _, err := tmp.Write(nonce); err != nil {
panic(err)
}
nonce = tmp.Sum(nil)[:24] nonce = tmp.Sum(nil)[:24]
chacha, _ = chacha20.NewUnauthenticatedCipher(key, nonce) chacha, err = chacha20.NewUnauthenticatedCipher(key, nonce)
if err != nil {
panic(err)
}
counter = 0 counter = 0
} }
} }
fin.Close() if err := fin.Close(); err != nil {
fout.Close() panic(err)
os.Remove(fin.Name()) }
os.Rename(outputFile+".incomplete", outputFile) if err := fout.Close(); err != nil {
panic(err)
}
if err := os.Remove(fin.Name()); err != nil {
panic(err)
}
if err := os.Rename(outputFile+".incomplete", outputFile); err != nil {
panic(err)
}
canCancel = true canCancel = true
giu.Update() giu.Update()
} }
@ -2097,11 +2365,17 @@ func work() {
// Split the file into chunks // Split the file into chunks
if split { if split {
var splitted []string var splitted []string
stat, _ := os.Stat(outputFile) stat, err := os.Stat(outputFile)
if err != nil {
panic(err)
}
size := stat.Size() size := stat.Size()
finishedFiles := 0 finishedFiles := 0
finishedBytes := 0 finishedBytes := 0
chunkSize, _ := strconv.Atoi(splitSize) chunkSize, err := strconv.Atoi(splitSize)
if err != nil {
panic(err)
}
// Calculate chunk size // Calculate chunk size
if splitSelected == 0 { if splitSelected == 0 {
@ -2122,17 +2396,25 @@ func work() {
giu.Update() giu.Update()
// Open the volume for reading // Open the volume for reading
fin, _ := os.Open(outputFile) fin, err := os.Open(outputFile)
if err != nil {
panic(err)
}
// Delete existing chunks to prevent mixed chunks // Delete existing chunks to prevent mixed chunks
names, _ := filepath.Glob(outputFile + ".*") names, err := filepath.Glob(outputFile + ".*")
if err != nil {
panic(err)
}
for _, i := range names { for _, i := range names {
os.Remove(i) if err := os.Remove(i); err != nil {
panic(err)
}
} }
// Start the splitting process // Start the splitting process
startTime := time.Now() startTime := time.Now()
for i := 0; i < chunks; i++ { for i := range chunks {
// Make the chunk // Make the chunk
fout, _ := os.Create(fmt.Sprintf("%s.%d.incomplete", outputFile, i)) fout, _ := os.Create(fmt.Sprintf("%s.%d.incomplete", outputFile, i))
done := 0 done := 0
@ -2186,7 +2468,9 @@ func work() {
popupStatus = fmt.Sprintf("Splitting at %.2f MiB/s (ETA: %s)", speed, eta) popupStatus = fmt.Sprintf("Splitting at %.2f MiB/s (ETA: %s)", speed, eta)
giu.Update() giu.Update()
} }
fout.Close() if err := fout.Close(); err != nil {
panic(err)
}
// Update stats // Update stats
finishedFiles++ finishedFiles++
@ -2198,11 +2482,20 @@ func work() {
giu.Update() giu.Update()
} }
fin.Close() if err := fin.Close(); err != nil {
os.Remove(outputFile) panic(err)
names, _ = filepath.Glob(outputFile + ".*.incomplete") }
if err := os.Remove(outputFile); err != nil {
panic(err)
}
names, err = filepath.Glob(outputFile + ".*.incomplete")
if err != nil {
panic(err)
}
for _, i := range names { for _, i := range names {
os.Rename(i, strings.TrimSuffix(i, ".incomplete")) if err := os.Rename(i, strings.TrimSuffix(i, ".incomplete")); err != nil {
panic(err)
}
} }
} }
@ -2213,7 +2506,9 @@ func work() {
// Delete temporary files used during encryption and decryption // Delete temporary files used during encryption and decryption
if recombine || len(allFiles) > 1 || len(onlyFolders) > 0 || compress { if recombine || len(allFiles) > 1 || len(onlyFolders) > 0 || compress {
os.Remove(inputFile) if err := os.Remove(inputFile); err != nil {
panic(err)
}
if deniability { if deniability {
os.Remove(strings.TrimSuffix(inputFile, ".tmp")) os.Remove(strings.TrimSuffix(inputFile, ".tmp"))
} }
@ -2232,21 +2527,31 @@ func work() {
if err != nil { if err != nil {
break break
} }
os.Remove(fmt.Sprintf("%s.%d", inputFileOld, i)) if err := os.Remove(fmt.Sprintf("%s.%d", inputFileOld, i)); err != nil {
panic(err)
}
i++ i++
} }
} else { } else {
os.Remove(inputFile) if err := os.Remove(inputFile); err != nil {
panic(err)
}
if deniability { if deniability {
os.Remove(strings.TrimSuffix(inputFile, ".tmp")) if err := os.Remove(strings.TrimSuffix(inputFile, ".tmp")); err != nil {
panic(err)
}
} }
} }
} else { } else {
for _, i := range onlyFiles { for _, i := range onlyFiles {
os.Remove(i) if err := os.Remove(i); err != nil {
panic(err)
}
} }
for _, i := range onlyFolders { for _, i := range onlyFolders {
os.RemoveAll(i) if err := os.RemoveAll(i); err != nil {
panic(err)
}
} }
} }
} }
@ -2266,7 +2571,9 @@ func work() {
return return
} }
os.Remove(outputFile) if err := os.Remove(outputFile); err != nil {
panic(err)
}
} }
// All done, reset the UI // All done, reset the UI
@ -2375,7 +2682,6 @@ func resetUI() {
mainStatus = "Ready" mainStatus = "Ready"
mainStatusColor = WHITE mainStatusColor = WHITE
popupStatus = "" popupStatus = ""
usingTempZip = false
requiredFreeSpace = 0 requiredFreeSpace = 0
progress = 0 progress = 0
@ -2400,7 +2706,7 @@ func rsDecode(rs *infectious.FEC, data []byte) ([]byte, error) {
} }
tmp := make([]infectious.Share, rs.Total()) tmp := make([]infectious.Share, rs.Total())
for i := 0; i < rs.Total(); i++ { for i := range rs.Total() {
tmp[i].Number = i tmp[i].Number = i
tmp[i].Data = append(tmp[i].Data, data[i]) tmp[i].Data = append(tmp[i].Data, data[i])
} }
@ -2447,7 +2753,7 @@ func genPassword() string {
chars += "-=_+!@#$^&()?<>" chars += "-=_+!@#$^&()?<>"
} }
tmp := make([]byte, passgenLength) tmp := make([]byte, passgenLength)
for i := 0; i < int(passgenLength); i++ { for i := range int(passgenLength) {
j, _ := rand.Int(rand.Reader, big.NewInt(int64(len(chars)))) j, _ := rand.Int(rand.Reader, big.NewInt(int64(len(chars))))
tmp[i] = chars[j.Int64()] tmp[i] = chars[j.Int64()]
} }