Unverified Commit 172fb276 authored by Terry Ellison's avatar Terry Ellison Committed by GitHub
Browse files

Add compression to LFS images (#2448)

* Merge of LFS compress, optimize against current dev
* Fixes to LFS compress patch
parent 0e103a39
/*
* This implementation draws heavily on the work down by Paul Sokolovsky
* (https://github.com/pfalcon) and his uzlib library which in turn uses
* work done by Joergen Ibsen, Simon Tatham and others. All of this work
* is under an unrestricted right to use subject to copyright attribution.
* Two copyright wordings (variants A and B) are following.
*
* (c) statement A initTables, copy, literal
*
* The remainder of this code has been written by me, Terry Ellison 2018,
* under the standard NodeMCU MIT licence, but is available to the other
* contributors to this source under any permissive licence.
*
* My primary algorthmic reference is RFC 1951: "DEFLATE Compressed Data
* Format Specification version 1.3", dated May 1996.
*
* Also because the code in this module is drawn from different sources,
* the different coding practices can be confusing, I have standardised
* the source by:
*
* - Adopting the 2 indent rule as in the rest of the firmware
*
* - I have replaced the various mix of char, unsigned char and uchar
* by the single uchar type; ditto for ushort and uint.
*
* - All internal (non-exported) functions and data are static
*
* - Only exported functions and data have the module prefix. All
* internal (static) variables and fields are lowerCamalCase.
*
***********************************************************************
* Copyright statement A for Zlib (RFC1950 / RFC1951) compression for PuTTY.
PuTTY is copyright 1997-2014 Simon Tatham.
Portions copyright Robert de Bath, Joris van Rantwijk, Delian
Delchev, Andreas Schultz, Jeroen Massar, Wez Furlong, Nicolas Barry,
Justin Bradford, Ben Harris, Malcolm Smith, Ahmad Khalifa, Markus
Kuhn, Colin Watson, and CORE SDI S.A.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE COP--YRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
************************************************************************
Copyright statement B for genlz77 functions:
*
* genlz77 - Generic LZ77 compressor
*
* Copyright (c) 2014 by Paul Sokolovsky
*
* This software is provided 'as-is', without any express
* or implied warranty. In no event will the authors be
* held liable for any damages arising from the use of
* this software.
*
* Permission is granted to anyone to use this software
* for any purpose, including commercial applications,
* and to alter it and redistribute it freely, subject to
* the following restrictions:
*
* 1. The origin of this software must not be
* misrepresented; you must not claim that you
* wrote the original software. If you use this
* software in a product, an acknowledgment in
* the product documentation would be appreciated
* but is not required.
*
* 2. Altered source versions must be plainly marked
* as such, and must not be misrepresented as
* being the original software.
*
* 3. This notice may not be removed or altered from
* any source distribution.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
#include "uzlib.h"
jmp_buf unwindAddr;
/* Minimum and maximum length of matches to look for, inclusive */
#define MIN_MATCH 3
#define MAX_MATCH 258
/* Max offset of the match to look for, inclusive */
#define MAX_OFFSET 16384 // 32768 //
#define OFFSET16_MASK 0x7FFF
#define NULL_OFFSET 0xFFFF
#if MIN_MATCH < 3
#error "Encoding requires a minium match of 3 bytes"
#endif
#define SIZE(a) (sizeof(a)/sizeof(*a)) /* no of elements in array */
#ifdef __XTENSA__
#define RAM_COPY_BYTE_ARRAY(c,s,sl) uchar *c = alloca(sl); memcpy(c,s,(sl))
#else
#define RAM_COPY_BYTE_ARRAY(c,s,sl) uchar *c = s;
#endif
#define FREE(v) if (v) uz_free(v)
typedef uint8_t uchar;
typedef uint16_t ushort;
typedef uint32_t uint;
#ifdef DEBUG_COUNTS
#define DBG_PRINT(...) printf(__VA_ARGS__)
#define DBG_COUNT(n) (debugCounts[n]++)
#define DBG_ADD_COUNT(n,m) (debugCounts[n]+=m)
int debugCounts[20];
#else
#define DBG_PRINT(...)
#define DBG_COUNT(n)
#define DBG_ADD_COUNT(n,m)
#endif
int dbg_break(void) {return 1;}
typedef struct {
ushort code, extraBits, min, max;
} codeRecord;
struct dynTables {
ushort *hashChain;
ushort *hashTable;
ushort hashMask;
ushort hashSlots;
ushort hashBits;
ushort dictLen;
const uchar bitrevNibble[16];
const codeRecord lenCodes[285-257+1];
const codeRecord distCodes[29-0+1];
} *dynamicTables;
struct outputBuf {
uchar *buffer;
uint len, size;
uint inLen, inNdx;
uint bits, nBits;
uint compDisabled;
} *oBuf;
/*
* Set up the constant tables used to drive the compression
*
* Constants are stored in flash memory on the ESP8266 NodeMCU firmware
* builds, but only word aligned data access are supported in hardare so
* short and byte accesses are handled by a S/W exception handler and are
* SLOW. RAM is also at premium, so these short routines are driven by
* byte vectors copied into RAM and then used to generate temporary RAM
* tables, which are the same as the above statically declared versions.
*
* This might seem a bit convolved but this runs faster and takes up less
* memory than the original version. This code also works fine on the
* x86-64s so we just use one code variant.
*
* Note that fixed Huffman trees as defined in RFC 1951 Sec 3.2.5 are
* always used. Whilst dynamic trees can give better compression for
* larger blocks, this comes at a performance hit of having to compute
* these trees. Fixed trees give better compression performance on short
* blocks and significantly reduce compression times.
*
* The following defines are used to initialise these tables.
*/
#define lenCodes_GEN \
"\x03\x01\x01\x01\x01\x01\x01\x01\xff\x02\x02\x02\x02\xff\x04\x04\x04\x04" \
"\xff\x08\x08\x08\x08\xff\x10\x10\x10\x10\xff\x20\x20\x20\x1f\xff\x01\x00"
#define lenCodes_LEN 29
#define distCodes_GEN \
"\x01\x01\x01\x01\xff\x02\x02\xff\x04\x04\xff\x08\x08\xff\x10\x10\xff" \
"\x20\x20\xff\x40\x40\xff\x86\x86\xff\x87\x87\xff\x88\x88\xff" \
"\x89\x89\xff\x8a\x8a\xff\x8b\x8b\xff\x8c\x8c"
#define distCodes_LEN 30
#define BITREV16 "\x0\x8\x4\xc\x2\xa\x6\xe\x1\x9\x5\xd\x3\xb\x7\xf"
static void genCodeRecs (const codeRecord *rec, ushort len,
char *init, int initLen,
ushort start, ushort m0) {
DBG_COUNT(0);
int i, b=0, m=0, last=m0;
RAM_COPY_BYTE_ARRAY(c, (uchar *)init,initLen);
codeRecord *p = (codeRecord *) rec;
for (i = start; i < start+len; i++, c++) {
if (*c == 0xFF)
b++, c++;
m +=!(*c & 0x80) ? *c & 0x7F : 2 << *c;
*p++ = (codeRecord) {i, b, last + 1, (last = m)};
}
}
static void initTables (uint chainLen, uint hashSlots) {
DBG_COUNT(1);
uint dynamicSize = sizeof(struct dynTables) +
sizeof(struct outputBuf) +
chainLen * sizeof(ushort) +
hashSlots * sizeof(ushort);
struct dynTables *dt = uz_malloc(dynamicSize);
memset(dt, 0, dynamicSize);
dynamicTables = dt;
/* Do a single malloc for dymanic tables and assign addresses */
if(!dt )
UZLIB_THROW(UZLIB_MEMORY_ERROR);
memcpy((uchar*)dt->bitrevNibble, BITREV16, 16);
oBuf = (struct outputBuf *)(dt+1);
dt->hashTable = (ushort *)(oBuf+1);
dt->hashChain = dt->hashTable + hashSlots;
dt->hashSlots = hashSlots;
dt->hashMask = hashSlots - 1;
/* As these are offset rather than pointer, 0 is a valid offset */
/* (unlike NULL), so 0xFFFF is used to denote an unset value */
memset(dt->hashTable, -1, sizeof(ushort)*hashSlots);
memset(dt->hashChain, -1, sizeof(ushort)*chainLen);
/* Generate the code recors for the lenth and distance code tables */
genCodeRecs(dt->lenCodes, SIZE(dt->lenCodes),
lenCodes_GEN, sizeof(lenCodes_GEN),
257,2);
((codeRecord *)(dynamicTables->lenCodes+285-257))->extraBits=0; /* odd ball entry */
genCodeRecs(dt->distCodes, SIZE(dt->distCodes),
distCodes_GEN, sizeof(distCodes_GEN),
0,0);
}
/*
* Routines to output bit streams and byte streams to the output buffer
*/
void resizeBuffer(void) {
uchar *nb;
DBG_COUNT(2);
/* The outbuf is given an initial size estimate but if we are running */
/* out of space then extropolate size using current compression */
double newEstimate = (((double) oBuf->len)*oBuf->inLen) / oBuf->inNdx;
oBuf->size = 128 + (uint) newEstimate;
if (!(nb = realloc(oBuf->buffer, oBuf->size)))
UZLIB_THROW(UZLIB_MEMORY_ERROR);
oBuf->buffer = nb;
}
void outBits(ushort bits, int nBits) {
DBG_COUNT(3);
oBuf->bits |= bits << oBuf->nBits;
oBuf->nBits += nBits;
if (oBuf->len >= oBuf->size - sizeof(bits))
resizeBuffer();
while (oBuf->nBits >= 8) {
DBG_PRINT("%02x-", oBuf->bits & 0xFF);
oBuf->buffer[oBuf->len++] = oBuf->bits & 0xFF;
oBuf->bits >>= 8;
oBuf->nBits -= 8;
}
}
void outBitsRev(uchar bits, int nBits) {
DBG_COUNT(4);
/* Note that bit reversal only operates on an 8-bit bits field */
uchar bitsRev = (dynamicTables->bitrevNibble[bits & 0x0f]<<4) |
dynamicTables->bitrevNibble[bits>>4];
outBits(bitsRev, nBits);
}
void outBytes(void *bytes, int nBytes) {
DBG_COUNT(5);
int i;
if (oBuf->len >= oBuf->size - nBytes)
resizeBuffer();
/* Note that byte output dumps any bits data so the caller must */
/* flush this first, if necessary */
oBuf->nBits = oBuf->bits = 0;
for (i = 0; i < nBytes; i++) {
DBG_PRINT("%02x-", *((uchar*)bytes+i));
oBuf->buffer[oBuf->len++] = *((uchar*)bytes+i);
}
}
/*
* Output an literal byte as an 8 or 9 bit code
*/
void literal (uchar c) {
DBG_COUNT(6);
DBG_PRINT("sym: %02x %c\n", c, c);
if (oBuf->compDisabled) {
/* We're in an uncompressed block, so just output the byte. */
outBits(c, 8);
} else if (c <= 143) {
/* 0 through 143 are 8 bits long starting at 00110000. */
outBitsRev(0x30 + c, 8);
} else {
/* 144 through 255 are 9 bits long starting at 110010000. */
outBits(1, 1);
outBitsRev(0x90 - 144 + c, 8);
}
}
/*
* Output a dictionary (distance, length) pars as bitstream codes
*/
void copy (int distance, int len) {
DBG_COUNT(7);
const codeRecord *lenCodes = dynamicTables->lenCodes, *l;
const codeRecord *distCodes = dynamicTables->distCodes, *d;
int i, j, k;
assert(!oBuf->compDisabled);
while (len > 0) {
/*
* We can transmit matches of lengths 3 through 258
* inclusive. So if len exceeds 258, we must transmit in
* several steps, with 258 or less in each step.
*
* Specifically: if len >= 261, we can transmit 258 and be
* sure of having at least 3 left for the next step. And if
* len <= 258, we can just transmit len. But if len == 259
* or 260, we must transmit len-3.
*/
int thislen = (len > 260 ? 258 : len <= 258 ? len : len - 3);
len -= thislen;
/*
* Binary-search to find which length code we're
* transmitting.
*/
i = -1;
j = lenCodes_LEN;
while (1) {
assert(j - i >= 2);
k = (j + i) / 2;
if (thislen < lenCodes[k].min)
j = k;
else if (thislen > lenCodes[k].max)
i = k;
else {
l = &lenCodes[k];
break; /* found it! */
}
}
/*
* Transmit the length code. 256-279 are seven bits
* starting at 0000000; 280-287 are eight bits starting at
* 11000000.
*/
if (l->code <= 279) {
outBitsRev((l->code - 256) * 2, 7);
} else {
outBitsRev(0xc0 - 280 + l->code, 8);
}
/*
* Transmit the extra bits.
*/
if (l->extraBits)
outBits(thislen - l->min, l->extraBits);
/*
* Binary-search to find which distance code we're
* transmitting.
*/
i = -1;
j = distCodes_LEN;
while (1) {
assert(j - i >= 2);
k = (j + i) / 2;
if (distance < distCodes[k].min)
j = k;
else if (distance > distCodes[k].max)
i = k;
else {
d = &distCodes[k];
break; /* found it! */
}
}
/*
* Transmit the distance code. Five bits starting at 00000.
*/
outBitsRev(d->code * 8, 5);
/*
* Transmit the extra bits.
*/
if (d->extraBits)
outBits(distance - d->min, d->extraBits);
}
}
/*
* Block compression uses a hashTable to index into a set of search
* chainList, where each chain links together the triples of chars within
* the dictionary (the last MAX_OFFSET bytes of the input buffer) with
* the same hash index. So for compressing a file of 200Kb, say, with a
* 16K dictionary (the largest that we can inflate within the memory
* constraints of the ESP8266), the chainList is 16K slots long, and the
* hashTable is 4K slots long, so a typical chain will have 4 links.
*
* These two tables use 16-bit ushort offsets rather than pointers to
* save memory (essential on the ESP8266).
*
* As per RFC 1951 sec 4, we also implement a "lazy match" procedure
*/
void uzlibCompressBlock(const uchar *src, uint srcLen) {
int i, j, k, l;
uint hashMask = dynamicTables->hashMask;
ushort *hashChain = dynamicTables->hashChain;
ushort *hashTable = dynamicTables->hashTable;
uint hashShift = 24 - dynamicTables->hashBits;
uint lastOffset = 0, lastLen = 0;
oBuf->inLen = srcLen; /* used for output buffer resizing */
DBG_COUNT(9);
for (i = 0; i <= ((int)srcLen) - MIN_MATCH; i++) {
/*
* Calculate a hash on the next three chars using the liblzf hash
* function, then use this via the hashTable to index into the chain
* of triples within the dictionary window which have the same hash.
*
* Note that using 16-bit offsets requires a little manipulation to
* handle wrap-around and recover the correct offset, but all other
* working uses uint offsets simply because the compiler generates
* faster (and smaller in the case of the ESP8266) code.
*
* Also note that this code also works for any tail 2 literals; the
* hash will access beyond the array and will be incorrect, but
* these can't match and will flush the last cache.
*/
const uchar *this = src + i, *comp;
uint base = i & ~OFFSET16_MASK;
uint iOffset = i - base;
uint maxLen = srcLen - i;
uint matchLen = MIN_MATCH - 1;
uint matchOffset = 0;
uint v = (this[0] << 16) | (this[1] << 8) | this[2];
uint hash = ((v >> hashShift) - v) & hashMask;
uint nextOffset = hashTable[hash];
oBuf->inNdx = i; /* used for output buffer resizing */
DBG_COUNT(10);
if (maxLen>MAX_MATCH)
maxLen = MAX_MATCH;
hashTable[hash] = iOffset;
hashChain[iOffset & (MAX_OFFSET-1)] = nextOffset;
for (l = 0; nextOffset != NULL_OFFSET && l<60; l++) {
DBG_COUNT(11);
/* handle the case where base has bumped */
j = base + nextOffset - ((nextOffset < iOffset) ? 0 : (OFFSET16_MASK + 1));
if (i - j > MAX_OFFSET)
break;
for (k = 0, comp = src + j; this[k] == comp[k] && k < maxLen; k++)
{}
DBG_ADD_COUNT(12, k);
if (k > matchLen) {
matchOffset = i - j;
matchLen = k;
}
nextOffset = hashChain[nextOffset & (MAX_OFFSET-1)];
}
if (lastOffset) {
if (matchOffset == 0 || lastLen >= matchLen ) {
/* ignore this match (or not) and process last */
DBG_COUNT(14);
copy(lastOffset, lastLen);
DBG_PRINT("dic: %6x %6x %6x\n", i-1, lastLen, lastOffset);
i += lastLen - 1 - 1;
lastOffset = lastLen = 0;
} else {
/* ignore last match and emit a symbol instead; cache this one */
DBG_COUNT(15);
literal(this[-1]);
lastOffset = matchOffset;
lastLen = matchLen;
}
} else { /* no last match */
if (matchOffset) {
DBG_COUNT(16);
/* cache this one */
lastOffset = matchOffset;
lastLen = matchLen;
} else {
DBG_COUNT(17);
/* emit a symbol; last already clear */
literal(this[0]);
}
}
}
if (lastOffset) { /* flush cached match if any */
copy(lastOffset, lastLen);
DBG_PRINT("dic: %6x %6x %6x\n", i, lastLen, lastOffset);
i += lastLen - 1;
}
while (i < srcLen)
literal(src[i++]); /* flush the last few bytes if needed */
}
/*
* This compress wrapper treats the input stream as a single block for
* compression using the default Static huffman block encoding
*/
int uzlib_compress (uchar **dest, uint *destLen, const uchar *src, uint srcLen) {
uint crc = ~uzlib_crc32(src, srcLen, ~0);
uint chainLen = srcLen < MAX_OFFSET ? srcLen : MAX_OFFSET;
uint hashSlots, i, j;
int status;
uint FLG_MTIME[] = {0x00088b1f, 0};
ushort XFL_OS = 0x0304;
/* The hash table has 4K slots for a 16K chain and scaling down */
/* accordingly, for an average chain length of 4 links or thereabouts */
for (i = 256, j = 8 - 2; i < chainLen; i <<= 1)
j++;
hashSlots = i >> 2;
if ((status = UZLIB_SETJMP(unwindAddr)) == 0) {
initTables(chainLen, hashSlots);
oBuf->size = srcLen/5; /* initial guess of a 5x compression ratio */
oBuf->buffer = uz_malloc(oBuf->size);
dynamicTables->hashSlots = hashSlots;
dynamicTables->hashBits = j;
if(!oBuf->buffer ) {
status = UZLIB_MEMORY_ERROR;
} else {
/* Output gzip and block headers */
outBytes(FLG_MTIME, sizeof(FLG_MTIME));
outBytes(&XFL_OS, sizeof(XFL_OS));
outBits(1, 1); /* Final block */
outBits(1, 2); /* Static huffman block */
uzlibCompressBlock(src, srcLen); /* Do the compress */
/* Output block finish */
outBits(0, 7); /* close block */
outBits(0, 7); /* Make sure all bits are flushed */
outBytes(&crc, sizeof(crc));
outBytes(&srcLen, sizeof(srcLen));
status = UZLIB_OK;
}
} else {
status = UZLIB_OK;
}
FREE(dynamicTables);
for (i=0; i<20;i++) DBG_PRINT("count %u = %u\n",i,debugCounts[i]);
if (status == UZLIB_OK) {
uchar *trimBuf = realloc(oBuf->buffer, oBuf->len);
*dest = trimBuf ? trimBuf : oBuf->buffer;
*destLen = oBuf->len;
} else {
*dest = NULL;
*destLen = 0;
FREE(oBuf->buffer);
}
return status;
}
/*
* tinfgzip.c - tiny gzip decompressor
* tinflate.c - tiny inflate
*
* The original source headers as below for licence compliance and in
* full acknowledgement of the originitor contributions. Modified by
* Terry Ellison 2018 to provide lightweight stream inflate for NodeMCU
* Lua. Modifications are under the standard NodeMCU MIT licence.
*
* Copyright (c) 2003 by Joergen Ibsen / Jibz
* All Rights Reserved
* http://www.ibsensoftware.com/
*
* Copyright (c) 2014-2016 by Paul Sokolovsky
*
* This software is provided 'as-is', without any express
* or implied warranty. In no event will the authors be
* held liable for any damages arising from the use of
* this software.
*
* Permission is granted to anyone to use this software
* for any purpose, including commercial applications,
* and to alter it and redistribute it freely, subject to
* the following restrictions:
*
* 1. The origin of this software must not be
* misrepresented; you must not claim that you
* wrote the original software. If you use this
* software in a product, an acknowledgment in
* the product documentation would be appreciated
* but is not required.
*
* 2. Altered source versions must be plainly marked
* as such, and must not be misrepresented as
* being the original software.
*
* 3. This notice may not be removed or altered from
* any source distribution.
*/
#include <string.h>
#ifdef __XTENSA__
#include "c_stdio.h"
#else
#include <stdio.h>
#endif
#include "uzlib.h"
#ifdef DEBUG_COUNTS
#define DBG_PRINT(...) printf(__VA_ARGS__)
#define DBG_COUNT(n) (debugCounts[n]++)
#define DBG_ADD_COUNT(n,m) (debugCounts[n]+=m)
int debugCounts[20];
#else
#define NDEBUG
#define DBG_PRINT(...)
#define DBG_COUNT(n)
#define DBG_ADD_COUNT(n,m)
#endif
#define SIZE(arr) (sizeof(arr) / sizeof(*(arr)))
jmp_buf unwindAddr;
int dbg_break(void) {return 1;}
typedef uint8_t uchar;
typedef uint16_t ushort;
typedef uint32_t uint;
/* data structures */
typedef struct {
ushort table[16]; /* table of code length counts */
ushort trans[288]; /* code -> symbol translation table */
} UZLIB_TREE;
struct uzlib_data {
/*
* extra bits and base tables for length and distance codes
*/
uchar lengthBits[30];
ushort lengthBase[30];
uchar distBits[30];
ushort distBase[30];
/*
* special ordering of code length codes
*/
uchar clcidx[19];
/*
* dynamic length/symbol and distance trees
*/
UZLIB_TREE ltree;
UZLIB_TREE dtree;
/*
* methods encapsulate handling of the input and output streams
*/
uchar (*get_byte)(void);
void (*put_byte)(uchar b);
uchar (*recall_byte)(uint offset);
/*
* Other state values
*/
uint destSize;
uint tag;
uint bitcount;
uint lzOffs;
int bType;
int bFinal;
uint curLen;
uint checksum;
};
/*
* Note on changes to layout, naming, etc. This module combines extracts
* from 3 code files from two sources (Sokolovsky, Ibsen et al) with perhaps
* 30% from me Terry Ellison. These sources had inconsistent layout and
* naming conventions, plus extra condtional handling of platforms that
* cannot support NodeMCU. (This is intended to be run compiled and executed
* on GCC POSIX and XENTA newlib environments.) So I have (1) reformatted
* this file in line with NodeMCU rules; (2) demoted all private data and
* functions to static and removed the redundant name prefixes; (3) reordered
* functions into a more logic order; (4) added some ESP architecture
* optimisations, for example these IoT devices are very RAM limited, so
* statically allocating large RAM blocks is against programming guidelines.
*/
static void skip_bytes(UZLIB_DATA *d, int num) {
if (num) /* Skip a fixed number of bytes */
while (num--) (void) d->get_byte();
else /* Skip to next nullchar */
while (d->get_byte()) {}
}
static uint16_t get_uint16(UZLIB_DATA *d) {
uint16_t v = d->get_byte();
return v | (d->get_byte() << 8);
}
static uint get_le_uint32 (UZLIB_DATA *d) {
uint v = get_uint16(d);
return v | ((uint) get_uint16(d) << 16);
}
/* get one bit from source stream */
static int getbit (UZLIB_DATA *d) {
uint bit;
/* check if tag is empty */
if (!d->bitcount--) {
/* load next tag */
d->tag = d->get_byte();
d->bitcount = 7;
}
/* shift bit out of tag */
bit = d->tag & 0x01;
d->tag >>= 1;
return bit;
}
/* read a num bit value from a stream and add base */
static uint read_bits (UZLIB_DATA *d, int num, int base) {
/* This is an optimised version which doesn't call getbit num times */
if (!num)
return base;
uint i, n = (((uint)-1)<<num);
for (i = d->bitcount; i < num; i +=8)
d->tag |= ((uint)d->get_byte()) << i;
n = d->tag & ~n;
d->tag >>= num;
d->bitcount = i - num;
return base + n;
}
/* --------------------------------------------------- *
* -- uninitialized global data (static structures) -- *
* --------------------------------------------------- */
/*
* Constants are stored in flash memory on the ESP8266 NodeMCU firmware
* builds, but only word aligned data access are supported in hardare so
* short and byte accesses are handled by a S/W exception handler and
* are SLOW. RAM is also at premium, especially static initialised vars,
* so we malloc a single block on first call to hold all tables and call
* the dynamic generator to generate malloced RAM tables that have the
* same content as the above statically declared versions.
*
* This might seem a bit convolved but this runs faster and takes up
* less memory than the static version on the ESP8266.
*/
#define CLCIDX_INIT \
"\x10\x11\x12\x00\x08\x07\x09\x06\x0a\x05\x0b\x04\x0c\x03\x0d\x02\x0e\x01\x0f"
/* ----------------------- *
* -- utility functions -- *
* ----------------------- */
/* build extra bits and base tables */
static void build_bits_base (uchar *bits, ushort *base,
int delta, int first) {
int i, sum;
/* build bits table */
for (i = 0; i < delta; ++i) bits[i] = 0;
for (i = 0; i < 30 - delta; ++i) bits[i + delta] = i / delta;
/* build base table */
for (sum = first, i = 0; i < 30; ++i) {
base[i] = sum;
sum += 1 << bits[i];
}
}
/* build the fixed huffman trees */
static void build_fixed_trees (UZLIB_TREE *lt, UZLIB_TREE *dt) {
int i;
/* build fixed length tree */
for (i = 0; i < 7; ++i) lt->table[i] = 0;
lt->table[7] = 24;
lt->table[8] = 152;
lt->table[9] = 112;
for (i = 0; i < 24; ++i) lt->trans[i] = 256 + i;
for (i = 0; i < 144; ++i) lt->trans[24 + i] = i;
for (i = 0; i < 8; ++i) lt->trans[24 + 144 + i] = 280 + i;
for (i = 0; i < 112; ++i) lt->trans[24 + 144 + 8 + i] = 144 + i;
/* build fixed distance tree */
for (i = 0; i < 5; ++i) dt->table[i] = 0;
dt->table[5] = 32;
for (i = 0; i < 32; ++i) dt->trans[i] = i;
}
/* given an array of code lengths, build a tree */
static void build_tree (UZLIB_TREE *t, const uchar *lengths, uint num) {
ushort offs[16];
uint i, sum;
/* clear code length count table */
for (i = 0; i < 16; ++i)
t->table[i] = 0;
/* scan symbol lengths, and sum code length counts */
for (i = 0; i < num; ++i)
t->table[lengths[i]]++;
t->table[0] = 0;
/* compute offset table for distribution sort */
for (sum = 0, i = 0; i < 16; ++i) {
offs[i] = sum;
sum += t->table[i];
}
/* create code->symbol translation table (symbols sorted by code) */
for (i = 0; i < num; ++i) {
if (lengths[i])
t->trans[offs[lengths[i]]++] = i;
}
}
/* ---------------------- *
* -- decode functions -- *
* ---------------------- */
/* given a data stream and a tree, decode a symbol */
static int decode_symbol (UZLIB_DATA *d, UZLIB_TREE *t) {
int sum = 0, cur = 0, len = 0;
/* get more bits while code value is above sum */
do {
cur = 2*cur + getbit(d);
if (++len == SIZE(t->table))
return UZLIB_DATA_ERROR;
sum += t->table[len];
cur -= t->table[len];
} while (cur >= 0);
sum += cur;
if (sum < 0 || sum >= SIZE(t->trans))
return UZLIB_DATA_ERROR;
return t->trans[sum];
}
/* given a data stream, decode dynamic trees from it */
static int decode_trees (UZLIB_DATA *d, UZLIB_TREE *lt, UZLIB_TREE *dt) {
uchar lengths[288+32];
uint hlit, hdist, hclen, hlimit;
uint i, num, length;
/* get 5 bits HLIT (257-286) */
hlit = read_bits(d, 5, 257);
/* get 5 bits HDIST (1-32) */
hdist = read_bits(d, 5, 1);
/* get 4 bits HCLEN (4-19) */
hclen = read_bits(d, 4, 4);
for (i = 0; i < 19; ++i) lengths[i] = 0;
/* read code lengths for code length alphabet */
for (i = 0; i < hclen; ++i) {
/* get 3 bits code length (0-7) */
uint clen = read_bits(d, 3, 0);
lengths[d->clcidx[i]] = clen;
}
/* build code length tree, temporarily use length tree */
build_tree(lt, lengths, 19);
/* decode code lengths for the dynamic trees */
hlimit = hlit + hdist;
for (num = 0; num < hlimit; ) {
int sym = decode_symbol(d, lt);
uchar fill_value = 0;
int lbits, lbase = 3;
/* error decoding */
if (sym < 0)
return sym;
switch (sym) {
case 16:
/* copy previous code length 3-6 times (read 2 bits) */
fill_value = lengths[num - 1];
lbits = 2;
break;
case 17:
/* repeat code length 0 for 3-10 times (read 3 bits) */
lbits = 3;
break;
case 18:
/* repeat code length 0 for 11-138 times (read 7 bits) */
lbits = 7;
lbase = 11;
break;
default:
/* values 0-15 represent the actual code lengths */
lengths[num++] = sym;
/* continue the for loop */
continue;
}
/* special code length 16-18 are handled here */
length = read_bits(d, lbits, lbase);
if (num + length > hlimit)
return UZLIB_DATA_ERROR;
for (; length; --length)
lengths[num++] = fill_value;
}
/* build dynamic trees */
build_tree(lt, lengths, hlit);
build_tree(dt, lengths + hlit, hdist);
return UZLIB_OK;
}
/* ----------------------------- *
* -- block inflate functions -- *
* ----------------------------- */
/* given a stream and two trees, inflate a block of data */
static int inflate_block_data (UZLIB_DATA *d, UZLIB_TREE *lt, UZLIB_TREE *dt) {
if (d->curLen == 0) {
int dist;
int sym = decode_symbol(d, lt);
/* literal byte */
if (sym < 256) {
DBG_PRINT("huff sym: %02x %c\n", sym, sym);
d->put_byte(sym);
return UZLIB_OK;
}
/* end of block */
if (sym == 256)
return UZLIB_DONE;
/* substring from sliding dictionary */
sym -= 257;
/* possibly get more bits from length code */
d->curLen = read_bits(d, d->lengthBits[sym], d->lengthBase[sym]);
dist = decode_symbol(d, dt);
/* possibly get more bits from distance code */
d->lzOffs = read_bits(d, d->distBits[dist], d->distBase[dist]);
DBG_PRINT("huff dict: -%u for %u\n", d->lzOffs, d->curLen);
}
/* copy next byte from dict substring */
uchar b = d->recall_byte(d->lzOffs);
DBG_PRINT("huff dict byte(%u): -%u - %02x %c\n\n",
d->curLen, d->lzOffs, b, b);
d->put_byte(b);
d->curLen--;
return UZLIB_OK;
}
/* inflate an uncompressed block of data */
static int inflate_uncompressed_block (UZLIB_DATA *d) {
if (d->curLen == 0) {
uint length = get_uint16(d);
uint invlength = get_uint16(d);
/* check length */
if (length != (~invlength & 0x0000ffff))
return UZLIB_DATA_ERROR;
/* increment length to properly return UZLIB_DONE below, without
producing data at the same time */
d->curLen = length + 1;
/* make sure we start next block on a byte boundary */
d->bitcount = 0;
}
if (--d->curLen == 0) {
return UZLIB_DONE;
}
d->put_byte(d->get_byte());
return UZLIB_OK;
}
/* -------------------------- *
* -- main parse functions -- *
* -------------------------- */
static int parse_gzip_header(UZLIB_DATA *d) {
/* check id bytes */
if (d->get_byte() != 0x1f || d->get_byte() != 0x8b)
return UZLIB_DATA_ERROR;
if (d->get_byte() != 8) /* check method is deflate */
return UZLIB_DATA_ERROR;
uchar flg = d->get_byte();/* get flag byte */
if (flg & 0xe0)/* check that reserved bits are zero */
return UZLIB_DATA_ERROR;
skip_bytes(d, 6); /* skip rest of base header of 10 bytes */
if (flg & UZLIB_FEXTRA) /* skip extra data if present */
skip_bytes(d, get_uint16(d));
if (flg & UZLIB_FNAME) /* skip file name if present */
skip_bytes(d,0);
if (flg & UZLIB_FCOMMENT) /* skip file comment if present */
skip_bytes(d,0);
if (flg & UZLIB_FHCRC) /* ignore header crc if present */
skip_bytes(d,2);
return UZLIB_OK;
}
/* inflate next byte of compressed stream */
static int uncompress_stream (UZLIB_DATA *d) {
do {
int res;
/* start a new block */
if (d->bType == -1) {
next_blk:
/* read final block flag */
d->bFinal = getbit(d);
/* read block type (2 bits) */
d->bType = read_bits(d, 2, 0);
DBG_PRINT("Started new block: type=%d final=%d\n", d->bType, d->bFinal);
if (d->bType == 1) {
/* build fixed huffman trees */
build_fixed_trees(&d->ltree, &d->dtree);
} else if (d->bType == 2) {
/* decode trees from stream */
res = decode_trees(d, &d->ltree, &d->dtree);
if (res != UZLIB_OK)
return res;
}
}
/* process current block */
switch (d->bType) {
case 0:
/* decompress uncompressed block */
res = inflate_uncompressed_block(d);
break;
case 1:
case 2:
/* decompress block with fixed or dynamic huffman trees. These */
/* trees were decoded previously, so it's the same routine for both */
res = inflate_block_data(d, &d->ltree, &d->dtree);
break;
default:
return UZLIB_DATA_ERROR;
}
if (res == UZLIB_DONE && !d->bFinal) {
/* the block has ended (without producing more data), but we
can't return without data, so start procesing next block */
goto next_blk;
}
if (res != UZLIB_OK)
return res;
} while (--d->destSize);
return UZLIB_OK;
}
/*
* This implementation has a different usecase to Paul Sokolovsky's
* uzlib implementation, in that it is designed to target IoT devices
* such as the ESP8266. Here clarity and compact code size is an
* advantage, but the ESP8266 only has 40-45Kb free heap, and has to
* process files with an unpacked size of up 256Kb, so a streaming
* implementation is essential.
*
* I have taken the architectural decision to hide the implementation
* detials from the uncompress routines and the caller must provide
* three support routines to handle the streaming:
*
* void get_byte(void)
* void put_byte(uchar b)
* uchar recall_byte(uint offset)
*
* This last must be able to recall an output byte with an offet up to
* the maximum dictionary size.
*/
int uzlib_inflate (
uchar (*get_byte)(void),
void (*put_byte)(uchar v),
uchar (*recall_byte)(uint offset),
uint len, uint *crc, void **state) {
int res;
/* initialize decompression structure */
UZLIB_DATA *d = (UZLIB_DATA *) uz_malloc(sizeof(*d));
if (!d)
return UZLIB_MEMORY_ERROR;
*state = d;
d->bitcount = 0;
d->bFinal = 0;
d->bType = -1;
d->curLen = 0;
d->destSize = len;
d->get_byte = get_byte;
d->put_byte = put_byte;
d->recall_byte = recall_byte;
if ((res = UZLIB_SETJMP(unwindAddr)) != 0) {
if (crc)
*crc = d->checksum;
/* handle long jump */
if (d) {
uz_free(d);
*state = NULL;
}
return res;
}
/* create RAM copy of clcidx byte array */
memcpy(d->clcidx, CLCIDX_INIT, sizeof(d->clcidx));
/* build extra bits and base tables */
build_bits_base(d->lengthBits, d->lengthBase, 4, 3);
build_bits_base(d->distBits, d->distBase, 2, 1);
d->lengthBits[28] = 0; /* fix a special case */
d->lengthBase[28] = 258;
if ((res = parse_gzip_header(d))== UZLIB_OK)
while ((res = uncompress_stream(d)) == UZLIB_OK)
{}
if (res == UZLIB_DONE) {
d->checksum = get_le_uint32(d);
(void) get_le_uint32(d); /* already got length so ignore */
}
UZLIB_THROW(res);
}
Whilst the Lua Virtual Machine (LVM) can compile Lua source dynamically and this can prove
very flexible during development, you will use less RAM resources if you precompile
your sources before execution.
## Compiling Lua directly on your ESP8266
- The standard [string.dump \(function)](https://www.lua.org/manual/5.1/manual.html#pdf-string.dump) returns a string containing the binary code for the specified function and you can write this to a SPIFFS file.
- [`node.compile()`](modules/node/#nodecompile) wraps this 'load and dump to file' operation into a single atomic library call.
The issue with both of these approaches is that compilation is RAM-intensive and hence
you will find that you will need to break your application into a lot of small and
compilable modules in order to avoid hitting RAM constraints. This can be mitigated
by doing all compiles immediately after a [node.restart()`](modules/node/#noderestart).
## Compiling Lua on your PC for Uploading
If you install `lua` on your development PC or Laptop then you can use the standard Lua
compiler to syntax check any Lua source before downloading it to the ESP8266 module. However,
the NodeMCU compiler output uses different data types (e.g. it supports ROMtables) so the
compiled output from standard `luac` cannot run on the ESP8266.
Compiling source on one platform for use on another (e.g. Intel 64-bit Windows to ESP8266) is
known as _cross-compilation_ and the NodeMCU firmware build now automatically generates
a `luac.cross` image as standard in the firmware root directory; this can be used to
compile and to syntax-check Lua source on the Development machine for execution under
NodeMCU Lua on the ESP8266.
`luac.cross` will translate Lua source files into binary files that can be later loaded
and executed by the LVM. Such binary files, which normally have the `.lc` (lua code)
extension are loaded directly by the LVM without the RAM overhead of compilation.
Each `luac.cross` execution produces a single output file containing the bytecodes
for all source files given in the output file `luac.out`, but you would normally
change this with the `-o` option. If you wish you can mix Lua source files (and
even Lua binary files) on the command line. You can use '-' to indicate the
standard input as a source file and '--' to signal the end of options (that is, all
remaining arguments will be treated as files even if they start with '-').
`luac.cross` supports the standard `luac` options `-l`, `-o`, `-p`, `-s` and `-v`,
as well as the `-h` option which produces the current help overview.
NodeMCU also implements some major extensions to support the use of the
[Lua Flash Store (LFS)](lfs.md)), in that it can produce an LFS image file which
is loaded as an overlay into the firmware in flash memory; the LVM can access and
execute this code directly from flash without needing to store code in RAM. This
mode is enabled by specifying the `-f`option.
`luac.cross` supports two separate image formats:
- **Compact relocatable**. This is selected by the `-f` option. Here the compiler compresses the compiled binary so that image is small for downloading over Wifi/WAN (e.g. a full 64Kb LFS image is compressed down to a 22Kb file.) The LVM processes such image in two passes with the integrity of the image validated on the first, and the LFS itself gets upated on the second. The LVM also checks that the image will fit in the allocated LFS region before loading, but you can also use the `-m` option to throw a compile error if the image is too large, for example `-m 0x10000` will raise an error if the image will not load into a 64Kb regions.
- **Absolute**. This is selected by the `-a <baseAddr>` option. Here the compiler fixes all addresses relative to the base address specified. This allows an LFS absolute image to be loaded directly into the ESP flash using a tool such as `esptool.py`.
These two modes target two separate use cases: the compact relocatable format
facilitates simple OTA updates to an LFS based Lua application; the absolute format
facilitates factory installation of LFS based applicaitons.
Also note that the `app/lua/luac_cross` make and Makefile can be executed to build
just the `luac.cross` image. You must first ensure that the following options in
`app/include/user_config.h` are matched to your target configuration:
```c
//#define LUA_NUMBER_INTEGRAL // uncomment if you want an integer build
//#define LUA_FLASH_STORE 0x10000 // uncomment if you LFS support
```
Developers have successfully built this on Linux (including docker builds), MacOS, Win10/WSL and WinX/Cygwin.
......@@ -199,11 +199,10 @@ Reload the [LFS (Lua Flash Store)](../lfs.md) with the flash image provided. Fla
`node.flashreload(imageName)`
#### Parameters
`imageName` The of name of a image file in the filesystem to be loaded into the LFS.
`imageName` The name of a image file in the filesystem to be loaded into the LFS.
#### Returns
If the LFS image has the incorrect signature or size, then `false` is returned.
In the case of the `imagename` being a valid LFS image, this is then loaded into flash. The ESP is then immediately rebooted so control is not returned to the calling application.
`Error message` LFS images are now gzip compressed. In the case of the `imagename` being a valid LFS image, this is expanded and loaded into flash. The ESP is then immediately rebooted, _so control is not returned to the calling Lua application_ in the case of a successful reload. This reload process internally makes two passes through the LFS image file; and on the first it validates the file and header formats and detects any errors. If any is detected then an error string is returned.
## node.flashsize()
......
......@@ -78,3 +78,24 @@ end
G.module = nil -- disable Lua 5.0 style modules to save RAM
package.seeall = nil
--[[-------------------------------------------------------------------------------
These replaces the builtins loadfile & dofile with ones which preferentially
loads the corresponding module from LFS if present. Flipping the search order
is an exercise left to the reader.-
---------------------------------------------------------------------------------]]
local lf, df = loadfile, dofile
G.loadfile = function(n)
local mod, ext = n:match("(.*)%.(l[uc]a?)");
local fn, ba = index(mod)
if ba or (ext ~= 'lc' and ext ~= 'lua') then return lf(n) else return fn end
end
G.dofile = function(n)
local mod, ext = n:match("(.*)%.(l[uc]a?)");
local fn, ba = index(mod)
if ba or (ext ~= 'lc' and ext ~= 'lua') then return df(n) else return fn() end
end
......@@ -26,6 +26,7 @@ pages:
- Building the firmware: 'en/build.md'
- Flashing the firmware: 'en/flash.md'
- Uploading code: 'en/upload.md'
- Compiling code: 'en/compiling.md'
- Support: 'en/support.md'
- FAQs:
- Lua Developer FAQ: 'en/lua-developer-faq.md'
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment