summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug6
-rw-r--r--lib/Kconfig.kasan2
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bootconfig.c2
-rw-r--r--lib/decompress_unxz.c10
-rw-r--r--lib/flex_proportions.c28
-rw-r--r--lib/iov_iter.c36
-rw-r--r--lib/kunit/executor_test.c4
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--lib/packing.c2
-rw-r--r--lib/pci_iomap.c43
-rw-r--r--lib/random32.c1
-rw-r--r--lib/sbitmap.c95
-rw-r--r--lib/xz/Kconfig13
-rw-r--r--lib/xz/xz_dec_lzma2.c182
-rw-r--r--lib/xz/xz_dec_stream.c6
-rw-r--r--lib/xz/xz_dec_syms.c9
-rw-r--r--lib/xz/xz_private.h3
-rw-r--r--lib/zlib_inflate/inffast.c13
19 files changed, 415 insertions, 44 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ed4a31e34098..40e4766bc541 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -295,7 +295,7 @@ config DEBUG_INFO_DWARF4
config DEBUG_INFO_DWARF5
bool "Generate DWARF Version 5 debuginfo"
- depends on GCC_VERSION >= 50000 || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
+ depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
depends on !DEBUG_INFO_BTF
help
Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
@@ -346,7 +346,7 @@ config FRAME_WARN
int "Warn for stack frames larger than"
range 0 8192
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
- default 1536 if (!64BIT && PARISC)
+ default 1536 if (!64BIT && (PARISC || XTENSA))
default 1024 if (!64BIT && !PARISC)
default 2048 if 64BIT
help
@@ -458,7 +458,7 @@ config STACK_VALIDATION
config VMLINUX_VALIDATION
bool
- depends on STACK_VALIDATION && DEBUG_ENTRY && !PARAVIRT
+ depends on STACK_VALIDATION && DEBUG_ENTRY
default y
config VMLINUX_MAP
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 1e2d10f86011..cdc842d090db 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -66,6 +66,7 @@ choice
config KASAN_GENERIC
bool "Generic mode"
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
+ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
help
@@ -86,6 +87,7 @@ config KASAN_GENERIC
config KASAN_SW_TAGS
bool "Software tag-based mode"
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
+ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
help
diff --git a/lib/Makefile b/lib/Makefile
index 5efd1b435a37..a841be5244ac 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -351,7 +351,7 @@ obj-$(CONFIG_OBJAGG) += objagg.o
obj-$(CONFIG_PLDMFW) += pldmfw/
# KUnit tests
-CFLAGS_bitfield_kunit.o := $(call cc-option,-Wframe-larger-than=10240)
+CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index f8419cff1147..5ae248b29373 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -792,7 +792,7 @@ void __init xbc_destroy_all(void)
xbc_data = NULL;
xbc_data_size = 0;
xbc_node_num = 0;
- memblock_free(__pa(xbc_nodes), sizeof(struct xbc_node) * XBC_NODE_MAX);
+ memblock_free_ptr(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
xbc_nodes = NULL;
brace_index = 0;
}
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
index a2f38e23004a..9f4262ee33a5 100644
--- a/lib/decompress_unxz.c
+++ b/lib/decompress_unxz.c
@@ -20,8 +20,8 @@
*
* The worst case for in-place decompression is that the beginning of
* the file is compressed extremely well, and the rest of the file is
- * uncompressible. Thus, we must look for worst-case expansion when the
- * compressor is encoding uncompressible data.
+ * incompressible. Thus, we must look for worst-case expansion when the
+ * compressor is encoding incompressible data.
*
* The structure of the .xz file in case of a compressed kernel is as follows.
* Sizes (as bytes) of the fields are in parenthesis.
@@ -58,7 +58,7 @@
* uncompressed size of the payload is in practice never less than the
* payload size itself. The LZMA2 format would allow uncompressed size
* to be less than the payload size, but no sane compressor creates such
- * files. LZMA2 supports storing uncompressible data in uncompressed form,
+ * files. LZMA2 supports storing incompressible data in uncompressed form,
* so there's never a need to create payloads whose uncompressed size is
* smaller than the compressed size.
*
@@ -167,8 +167,8 @@
* memeq and memzero are not used much and any remotely sane implementation
* is fast enough. memcpy/memmove speed matters in multi-call mode, but
* the kernel image is decompressed in single-call mode, in which only
- * memcpy speed can matter and only if there is a lot of uncompressible data
- * (LZMA2 stores uncompressible chunks in uncompressed form). Thus, the
+ * memmove speed can matter and only if there is a lot of incompressible data
+ * (LZMA2 stores incompressible chunks in uncompressed form). Thus, the
* functions below should just be kept small; it's probably not worth
* optimizing for speed.
*/
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index 451543937524..53e7eb1dd76c 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -217,11 +217,12 @@ static void fprop_reflect_period_percpu(struct fprop_global *p,
}
/* Event of type pl happened */
-void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
+void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
+ long nr)
{
fprop_reflect_period_percpu(p, pl);
- percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
- percpu_counter_add(&p->events, 1);
+ percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
+ percpu_counter_add(&p->events, nr);
}
void fprop_fraction_percpu(struct fprop_global *p,
@@ -253,20 +254,29 @@ void fprop_fraction_percpu(struct fprop_global *p,
}
/*
- * Like __fprop_inc_percpu() except that event is counted only if the given
+ * Like __fprop_add_percpu() except that event is counted only if the given
* type has fraction smaller than @max_frac/FPROP_FRAC_BASE
*/
-void __fprop_inc_percpu_max(struct fprop_global *p,
- struct fprop_local_percpu *pl, int max_frac)
+void __fprop_add_percpu_max(struct fprop_global *p,
+ struct fprop_local_percpu *pl, int max_frac, long nr)
{
if (unlikely(max_frac < FPROP_FRAC_BASE)) {
unsigned long numerator, denominator;
+ s64 tmp;
fprop_fraction_percpu(p, pl, &numerator, &denominator);
- if (numerator >
- (((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
+ /* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
+ tmp = (u64)denominator * max_frac -
+ ((u64)numerator << FPROP_FRAC_SHIFT);
+ if (tmp < 0) {
+ /* Maximum fraction already exceeded? */
return;
+ } else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
+ /* Add just enough for the fraction to saturate */
+ nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
+ FPROP_FRAC_BASE - max_frac);
+ }
}
- __fprop_inc_percpu(p, pl);
+ __fprop_add_percpu(p, pl, nr);
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f2d50d69a6c3..755c10c5138c 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1972,3 +1972,39 @@ int import_single_range(int rw, void __user *buf, size_t len,
return 0;
}
EXPORT_SYMBOL(import_single_range);
+
+/**
+ * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
+ * iov_iter_save_state() was called.
+ *
+ * @i: &struct iov_iter to restore
+ * @state: state to restore from
+ *
+ * Used after iov_iter_save_state() to bring restore @i, if operations may
+ * have advanced it.
+ *
+ * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
+ */
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
+{
+ if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
+ !iov_iter_is_kvec(i))
+ return;
+ i->iov_offset = state->iov_offset;
+ i->count = state->count;
+ /*
+ * For the *vec iters, nr_segs + iov is constant - if we increment
+ * the vec, then we also decrement the nr_segs count. Hence we don't
+ * need to track both of these, just one is enough and we can deduct
+ * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
+ * size, so we can just increment the iov pointer as they are unionzed.
+ * ITER_BVEC _may_ be the same size on some archs, but on others it is
+ * not. Be safe and handle it separately.
+ */
+ BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
+ if (iov_iter_is_bvec(i))
+ i->bvec -= state->nr_segs - i->nr_segs;
+ else
+ i->iov -= state->nr_segs - i->nr_segs;
+ i->nr_segs = state->nr_segs;
+}
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index cdbe54b16501..e14a18af573d 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -116,8 +116,8 @@ static void kfree_at_end(struct kunit *test, const void *to_free)
/* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
if (IS_ERR_OR_NULL(to_free))
return;
- kunit_alloc_and_get_resource(test, NULL, kfree_res_free, GFP_KERNEL,
- (void *)to_free);
+ kunit_alloc_resource(test, NULL, kfree_res_free, GFP_KERNEL,
+ (void *)to_free);
}
static struct kunit_suite *alloc_fake_suite(struct kunit *test,
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 161108e5d2fe..71652e1c397c 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -258,7 +258,7 @@ static void init_shared_classes(void)
#define WWAF(x) ww_acquire_fini(x)
#define WWL(x, c) ww_mutex_lock(x, c)
-#define WWT(x) ww_mutex_trylock(x)
+#define WWT(x) ww_mutex_trylock(x, NULL)
#define WWL1(x) ww_mutex_lock(x, NULL)
#define WWU(x) ww_mutex_unlock(x)
diff --git a/lib/packing.c b/lib/packing.c
index 6ed72dccfdb5..9a72f4bbf0e2 100644
--- a/lib/packing.c
+++ b/lib/packing.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#include <linux/packing.h>
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
index 2d3eb1cb73b8..ce39ce9f3526 100644
--- a/lib/pci_iomap.c
+++ b/lib/pci_iomap.c
@@ -134,4 +134,47 @@ void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
return pci_iomap_wc_range(dev, bar, 0, maxlen);
}
EXPORT_SYMBOL_GPL(pci_iomap_wc);
+
+/*
+ * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
+ * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
+ * the different IOMAP ranges.
+ *
+ * But if the architecture does not use the generic iomap code, and if
+ * it has _not_ defined it's own private pci_iounmap function, we define
+ * it here.
+ *
+ * NOTE! This default implementation assumes that if the architecture
+ * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
+ * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
+ * and does not need unmapping with 'ioport_unmap()'.
+ *
+ * If you have different rules for your architecture, you need to
+ * implement your own pci_iounmap() that knows the rules for where
+ * and how IO vs MEM get mapped.
+ *
+ * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
+ * from legacy <asm-generic/io.h> header file behavior. In particular,
+ * it would seem to make sense to do the iounmap(p) for the non-IO-space
+ * case here regardless, but that's not what the old header file code
+ * did. Probably incorrectly, but this is meant to be bug-for-bug
+ * compatible.
+ */
+#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
+ uintptr_t start = (uintptr_t) PCI_IOBASE;
+ uintptr_t addr = (uintptr_t) p;
+
+ if (addr >= start && addr < start + IO_SPACE_LIMIT)
+ return;
+ iounmap(p);
+#endif
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
+
#endif /* CONFIG_PCI */
diff --git a/lib/random32.c b/lib/random32.c
index 4d0e05e471d7..a57a0e18819d 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -39,6 +39,7 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/bitops.h>
+#include <linux/slab.h>
#include <asm/unaligned.h>
#include <trace/events/random.h>
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index b25db9be938a..2709ab825499 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -489,6 +489,57 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq)
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
+unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
+ unsigned int *offset)
+{
+ struct sbitmap *sb = &sbq->sb;
+ unsigned int hint, depth;
+ unsigned long index, nr;
+ int i;
+
+ if (unlikely(sb->round_robin))
+ return 0;
+
+ depth = READ_ONCE(sb->depth);
+ hint = update_alloc_hint_before_get(sb, depth);
+
+ index = SB_NR_TO_INDEX(sb, hint);
+
+ for (i = 0; i < sb->map_nr; i++) {
+ struct sbitmap_word *map = &sb->map[index];
+ unsigned long get_mask;
+
+ sbitmap_deferred_clear(map);
+ if (map->word == (1UL << (map->depth - 1)) - 1)
+ continue;
+
+ nr = find_first_zero_bit(&map->word, map->depth);
+ if (nr + nr_tags <= map->depth) {
+ atomic_long_t *ptr = (atomic_long_t *) &map->word;
+ int map_tags = min_t(int, nr_tags, map->depth);
+ unsigned long val, ret;
+
+ get_mask = ((1UL << map_tags) - 1) << nr;
+ do {
+ val = READ_ONCE(map->word);
+ ret = atomic_long_cmpxchg(ptr, val, get_mask | val);
+ } while (ret != val);
+ get_mask = (get_mask & ~ret) >> nr;
+ if (get_mask) {
+ *offset = nr + (index << sb->shift);
+ update_alloc_hint_after_get(sb, depth, hint,
+ *offset + map_tags - 1);
+ return get_mask;
+ }
+ }
+ /* Jump to next index. */
+ if (++index >= sb->map_nr)
+ index = 0;
+ }
+
+ return 0;
+}
+
int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
unsigned int shallow_depth)
{
@@ -577,6 +628,46 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
}
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
+static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
+{
+ if (likely(!sb->round_robin && tag < sb->depth))
+ data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
+}
+
+void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
+ int *tags, int nr_tags)
+{
+ struct sbitmap *sb = &sbq->sb;
+ unsigned long *addr = NULL;
+ unsigned long mask = 0;
+ int i;
+
+ smp_mb__before_atomic();
+ for (i = 0; i < nr_tags; i++) {
+ const int tag = tags[i] - offset;
+ unsigned long *this_addr;
+
+ /* since we're clearing a batch, skip the deferred map */
+ this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
+ if (!addr) {
+ addr = this_addr;
+ } else if (addr != this_addr) {
+ atomic_long_andnot(mask, (atomic_long_t *) addr);
+ mask = 0;
+ addr = this_addr;
+ }
+ mask |= (1UL << SB_NR_TO_BIT(sb, tag));
+ }
+
+ if (mask)
+ atomic_long_andnot(mask, (atomic_long_t *) addr);
+
+ smp_mb__after_atomic();
+ sbitmap_queue_wake_up(sbq);
+ sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
+ tags[nr_tags - 1] - offset);
+}
+
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu)
{
@@ -601,9 +692,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
*/
smp_mb__after_atomic();
sbitmap_queue_wake_up(sbq);
-
- if (likely(!sbq->sb.round_robin && nr < sbq->sb.depth))
- *per_cpu_ptr(sbq->sb.alloc_hint, cpu) = nr;
+ sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index 5cb50245a878..adce22ac18d6 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -39,6 +39,19 @@ config XZ_DEC_SPARC
default y
select XZ_DEC_BCJ
+config XZ_DEC_MICROLZMA
+ bool "MicroLZMA decoder"
+ default n
+ help
+ MicroLZMA is a header format variant where the first byte
+ of a raw LZMA stream (without the end of stream marker) has
+ been replaced with a bitwise-negation of the lc/lp/pb
+ properties byte. MicroLZMA was created to be used in EROFS
+ but can be used by other things too where wasting minimal
+ amount of space for headers is important.
+
+ Unless you know that you need this, say N.
+
endif
config XZ_DEC_BCJ
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index 7a6781e3f47b..27ce34520e78 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -248,6 +248,10 @@ struct lzma2_dec {
* before the first LZMA chunk.
*/
bool need_props;
+
+#ifdef XZ_DEC_MICROLZMA
+ bool pedantic_microlzma;
+#endif
};
struct xz_dec_lzma2 {
@@ -387,7 +391,14 @@ static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
*left -= copy_size;
- memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
+ /*
+ * If doing in-place decompression in single-call mode and the
+ * uncompressed size of the file is larger than the caller
+ * thought (i.e. it is invalid input!), the buffers below may
+ * overlap and cause undefined behavior with memcpy().
+ * With valid inputs memcpy() would be fine here.
+ */
+ memmove(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
dict->pos += copy_size;
if (dict->full < dict->pos)
@@ -397,7 +408,11 @@ static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
if (dict->pos == dict->end)
dict->pos = 0;
- memcpy(b->out + b->out_pos, b->in + b->in_pos,
+ /*
+ * Like above but for multi-call mode: use memmove()
+ * to avoid undefined behavior with invalid input.
+ */
+ memmove(b->out + b->out_pos, b->in + b->in_pos,
copy_size);
}
@@ -408,6 +423,12 @@ static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
}
}
+#ifdef XZ_DEC_MICROLZMA
+# define DICT_FLUSH_SUPPORTS_SKIPPING true
+#else
+# define DICT_FLUSH_SUPPORTS_SKIPPING false
+#endif
+
/*
* Flush pending data from dictionary to b->out. It is assumed that there is
* enough space in b->out. This is guaranteed because caller uses dict_limit()
@@ -421,8 +442,19 @@ static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b)
if (dict->pos == dict->end)
dict->pos = 0;
- memcpy(b->out + b->out_pos, dict->buf + dict->start,
- copy_size);
+ /*
+ * These buffers cannot overlap even if doing in-place
+ * decompression because in multi-call mode dict->buf
+ * has been allocated by us in this file; it's not
+ * provided by the caller like in single-call mode.
+ *
+ * With MicroLZMA, b->out can be NULL to skip bytes that
+ * the caller doesn't need. This cannot be done with XZ
+ * because it would break BCJ filters.
+ */
+ if (!DICT_FLUSH_SUPPORTS_SKIPPING || b->out != NULL)
+ memcpy(b->out + b->out_pos, dict->buf + dict->start,
+ copy_size);
}
dict->start = dict->pos;
@@ -488,7 +520,7 @@ static __always_inline void rc_normalize(struct rc_dec *rc)
* functions so that the compiler is supposed to be able to more easily avoid
* an extra branch. In this particular version of the LZMA decoder, this
* doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
- * on x86). Using a non-splitted version results in nicer looking code too.
+ * on x86). Using a non-split version results in nicer looking code too.
*
* NOTE: This must return an int. Do not make it return a bool or the speed
* of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care,
@@ -774,6 +806,7 @@ static void lzma_reset(struct xz_dec_lzma2 *s)
s->lzma.rep1 = 0;
s->lzma.rep2 = 0;
s->lzma.rep3 = 0;
+ s->lzma.len = 0;
/*
* All probabilities are initialized to the same value. This hack
@@ -1157,8 +1190,6 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
}
}
- s->lzma.len = 0;
-
s->lzma2.sequence = SEQ_CONTROL;
s->lzma2.need_dict_reset = true;
@@ -1174,3 +1205,140 @@ XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
kfree(s);
}
+
+#ifdef XZ_DEC_MICROLZMA
+/* This is a wrapper struct to have a nice struct name in the public API. */
+struct xz_dec_microlzma {
+ struct xz_dec_lzma2 s;
+};
+
+enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s_ptr,
+ struct xz_buf *b)
+{
+ struct xz_dec_lzma2 *s = &s_ptr->s;
+
+ /*
+ * sequence is SEQ_PROPERTIES before the first input byte,
+ * SEQ_LZMA_PREPARE until a total of five bytes have been read,
+ * and SEQ_LZMA_RUN for the rest of the input stream.
+ */
+ if (s->lzma2.sequence != SEQ_LZMA_RUN) {
+ if (s->lzma2.sequence == SEQ_PROPERTIES) {
+ /* One byte is needed for the props. */
+ if (b->in_pos >= b->in_size)
+ return XZ_OK;
+
+ /*
+ * Don't increment b->in_pos here. The same byte is
+ * also passed to rc_read_init() which will ignore it.
+ */
+ if (!lzma_props(s, ~b->in[b->in_pos]))
+ return XZ_DATA_ERROR;
+
+ s->lzma2.sequence = SEQ_LZMA_PREPARE;
+ }
+
+ /*
+ * xz_dec_microlzma_reset() doesn't validate the compressed
+ * size so we do it here. We have to limit the maximum size
+ * to avoid integer overflows in lzma2_lzma(). 3 GiB is a nice
+ * round number and much more than users of this code should
+ * ever need.
+ */
+ if (s->lzma2.compressed < RC_INIT_BYTES
+ || s->lzma2.compressed > (3U << 30))
+ return XZ_DATA_ERROR;
+
+ if (!rc_read_init(&s->rc, b))
+ return XZ_OK;
+
+ s->lzma2.compressed -= RC_INIT_BYTES;
+ s->lzma2.sequence = SEQ_LZMA_RUN;
+
+ dict_reset(&s->dict, b);
+ }
+
+ /* This is to allow increasing b->out_size between calls. */
+ if (DEC_IS_SINGLE(s->dict.mode))
+ s->dict.end = b->out_size - b->out_pos;
+
+ while (true) {
+ dict_limit(&s->dict, min_t(size_t, b->out_size - b->out_pos,
+ s->lzma2.uncompressed));
+
+ if (!lzma2_lzma(s, b))
+ return XZ_DATA_ERROR;
+
+ s->lzma2.uncompressed -= dict_flush(&s->dict, b);
+
+ if (s->lzma2.uncompressed == 0) {
+ if (s->lzma2.pedantic_microlzma) {
+ if (s->lzma2.compressed > 0 || s->lzma.len > 0
+ || !rc_is_finished(&s->rc))
+ return XZ_DATA_ERROR;
+ }
+
+ return XZ_STREAM_END;
+ }
+
+ if (b->out_pos == b->out_size)
+ return XZ_OK;
+
+ if (b->in_pos == b->in_size
+ && s->temp.size < s->lzma2.compressed)
+ return XZ_OK;
+ }
+}
+
+struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
+ uint32_t dict_size)
+{
+ struct xz_dec_microlzma *s;
+
+ /* Restrict dict_size to the same range as in the LZMA2 code. */
+ if (dict_size < 4096 || dict_size > (3U << 30))
+ return NULL;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (s == NULL)
+ return NULL;
+
+ s->s.dict.mode = mode;
+ s->s.dict.size = dict_size;
+
+ if (DEC_IS_MULTI(mode)) {
+ s->s.dict.end = dict_size;
+
+ s->s.dict.buf = vmalloc(dict_size);
+ if (s->s.dict.buf == NULL) {
+ kfree(s);
+ return NULL;
+ }
+ }
+
+ return s;
+}
+
+void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, uint32_t comp_size,
+ uint32_t uncomp_size, int uncomp_size_is_exact)
+{
+ /*
+ * comp_size is validated in xz_dec_microlzma_run().
+ * uncomp_size can safely be anything.
+ */
+ s->s.lzma2.compressed = comp_size;
+ s->s.lzma2.uncompressed = uncomp_size;
+ s->s.lzma2.pedantic_microlzma = uncomp_size_is_exact;
+
+ s->s.lzma2.sequence = SEQ_PROPERTIES;
+ s->s.temp.size = 0;
+}
+
+void xz_dec_microlzma_end(struct xz_dec_microlzma *s)
+{
+ if (DEC_IS_MULTI(s->s.dict.mode))
+ vfree(s->s.dict.buf);
+
+ kfree(s);
+}
+#endif
diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c
index fea86deaaa01..683570b93a8c 100644
--- a/lib/xz/xz_dec_stream.c
+++ b/lib/xz/xz_dec_stream.c
@@ -402,12 +402,12 @@ static enum xz_ret dec_stream_header(struct xz_dec *s)
* we will accept other check types too, but then the check won't
* be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given.
*/
+ if (s->temp.buf[HEADER_MAGIC_SIZE + 1] > XZ_CHECK_MAX)
+ return XZ_OPTIONS_ERROR;
+
s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1];
#ifdef XZ_DEC_ANY_CHECK
- if (s->check_type > XZ_CHECK_MAX)
- return XZ_OPTIONS_ERROR;
-
if (s->check_type > XZ_CHECK_CRC32)
return XZ_UNSUPPORTED_CHECK;
#else
diff --git a/lib/xz/xz_dec_syms.c b/lib/xz/xz_dec_syms.c
index 32eb3c03aede..61098c67a413 100644
--- a/lib/xz/xz_dec_syms.c
+++ b/lib/xz/xz_dec_syms.c
@@ -15,8 +15,15 @@ EXPORT_SYMBOL(xz_dec_reset);
EXPORT_SYMBOL(xz_dec_run);
EXPORT_SYMBOL(xz_dec_end);
+#ifdef CONFIG_XZ_DEC_MICROLZMA
+EXPORT_SYMBOL(xz_dec_microlzma_alloc);
+EXPORT_SYMBOL(xz_dec_microlzma_reset);
+EXPORT_SYMBOL(xz_dec_microlzma_run);
+EXPORT_SYMBOL(xz_dec_microlzma_end);
+#endif
+
MODULE_DESCRIPTION("XZ decompressor");
-MODULE_VERSION("1.0");
+MODULE_VERSION("1.1");
MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org> and Igor Pavlov");
/*
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
index 09360ebb510e..bf1e94ec7873 100644
--- a/lib/xz/xz_private.h
+++ b/lib/xz/xz_private.h
@@ -37,6 +37,9 @@
# ifdef CONFIG_XZ_DEC_SPARC
# define XZ_DEC_SPARC
# endif
+# ifdef CONFIG_XZ_DEC_MICROLZMA
+# define XZ_DEC_MICROLZMA
+# endif
# define memeq(a, b, size) (memcmp(a, b, size) == 0)
# define memzero(buf, size) memset(buf, 0, size)
# endif
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index f19c4fbe1be7..2843f9bb42ac 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -253,13 +253,12 @@ void inflate_fast(z_streamp strm, unsigned start)
sfrom = (unsigned short *)(from);
loops = len >> 1;
- do
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- *sout++ = *sfrom++;
-#else
- *sout++ = get_unaligned16(sfrom++);
-#endif
- while (--loops);
+ do {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ *sout++ = *sfrom++;
+ else
+ *sout++ = get_unaligned16(sfrom++);
+ } while (--loops);
out = (unsigned char *)sout;
from = (unsigned char *)sfrom;
} else { /* dist == 1 or dist == 2 */