[PATCH] arm64: Fix "vtop" command to display swap information on Linux 6.10 and later
by Guanyou Chen
Hi Lianbo
Kernel commit 55564814a838 ("arm64/mm: Move PTE_PRESENT_INVALID
to overlay PTE_NG"), which is contained in Linux 6.10 and
later kernels, changed the format of swap entries on arm64.
Without the patch, the "vtop" command cannot display swap information
before:
crash> vtop 2000000
VIRTUAL PHYSICAL
2000000 (not mapped)
PAGE DIRECTORY: ffffff8106356000
PGD: ffffff8106356000 => 800000186355003
PMD: ffffff8106355080 => 8000001476f5003
PTE: ffffff80c76f5000 => 101a62004
PTE OFFSET: 1055330
vtop: cannot determine swap location
without the patch:
crash> vtop 2000000
VIRTUAL PHYSICAL
2000000 (not mapped)
PAGE DIRECTORY: ffffff8106356000
PGD: ffffff8106356000 => 800000186355003
PMD: ffffff8106355080 => 8000001476f5003
PTE: ffffff80c76f5000 => 101a62004
PTE SWAP OFFSET
101a62004 /first_stage_ramdisk/dev/block/zram0 1055330
VMA START END FLAGS FILE
ffffff81a06e8b00 2000000 22000000 100073
SWAP: /first_stage_ramdisk/dev/block/zram0 OFFSET: 1055330
Link: https://lore.kernel.org/r/20240503144604.151095-4-ryan.roberts@arm.com
Signed-off-by: Guanyou.Chen <chenguanyou(a)xiaomi.com>
---
arm64.c | 11 ++++++++++-
memory.c | 5 ++++-
2 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/arm64.c b/arm64.c
index 1723595..c125655 100644
--- a/arm64.c
+++ b/arm64.c
@@ -712,7 +712,16 @@ arm64_init(int when)
}
}
- if (THIS_KERNEL_VERSION >= LINUX(5,19,0)) {
+ if (THIS_KERNEL_VERSION >= LINUX(6,10,0)) {
+ ms->__SWP_TYPE_BITS = 5;
+ ms->__SWP_TYPE_SHIFT = 6;
+ ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1);
+ ms->__SWP_OFFSET_SHIFT = 12;
+ ms->__SWP_OFFSET_BITS = 50;
+ ms->__SWP_OFFSET_MASK = ((1UL << ms->__SWP_OFFSET_BITS) - 1);
+ ms->PTE_PROT_NONE = 0; /* unused */
+ ms->PTE_FILE = 0; /* unused */
+ } else if (THIS_KERNEL_VERSION >= LINUX(5,19,0)) {
ms->__SWP_TYPE_BITS = 5;
ms->__SWP_TYPE_SHIFT = 3;
ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1);
diff --git a/memory.c b/memory.c
index 400d31a..cbc8d2f 100644
--- a/memory.c
+++ b/memory.c
@@ -16415,6 +16415,8 @@ get_swapdev(ulong type, char *buf)
ulong vfsmnt;
char *devname;
char buf1[BUFSIZE];
+ int swap_file_is_file =
+ STREQ(MEMBER_TYPE_NAME("swap_info_struct", "swap_file"), "file");
swap_info_init();
@@ -16474,7 +16476,8 @@ get_swapdev(ulong type, char *buf)
vfsmnt = ULONG(vt->swap_info_struct +
OFFSET(swap_info_struct_swap_vfsmnt));
get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt);
- } else if (VALID_MEMBER (swap_info_struct_old_block_size))
{
+ } else if (VALID_MEMBER (swap_info_struct_old_block_size)
+ || swap_file_is_file) {
devname = vfsmount_devname(file_to_vfsmnt(swap_file),
buf1, BUFSIZE);
get_pathname(file_to_dentry(swap_file),
--
2.34.1
Thanks.
Guanyou
1 week, 2 days
[PATCH v3] ppc64: Fix bt printing error stack trace
by Tao Liu
A error stack trace of bt cmd observed:
crash> bt 1
PID: 1 TASK: c000000003714b80 CPU: 2 COMMAND: "systemd"
#0 [c0000000037735c0] _end at c0000000037154b0 (unreliable)
#1 [c000000003773770] __switch_to at c00000000001fa9c
#2 [c0000000037737d0] __schedule at c00000000112e4ec
#3 [c0000000037738b0] schedule at c00000000112ea80
...
The #0 stack trace is incorrect, the function address shouldn't exceed _end.
The reason is for kernel commit cd52414d5a6c ("powerpc/64: ELFv2 use
minimal stack frames in int and switch frame sizes"), the offset of pt_regs
to sp changed from STACK_FRAME_OVERHEAD, i.e 112, to STACK_SWITCH_FRAME_REGS.
For CONFIG_PPC64_ELF_ABI_V1, it's 112, for ABI_V2, it's 48. So the nip will
read a wrong value from stack when ABI_V2 enabled.
After the patch:
crash> bt 1
PID: 1 TASK: c000000003714b80 CPU: 2 COMMAND: "systemd"
#0 [c0000000037737d0] __schedule at c00000000112e4ec
#1 [c0000000037738b0] schedule at c00000000112ea80
...
Signed-off-by: Tao Liu <ltao(a)redhat.com>
Suggested-by: Aditya Gupta <adityag(a)linux.ibm.com>
---
v1 Discussion: https://www.mail-archive.com/devel@lists.crash-utility.osci.io/msg01181.html
v2 No discussion: https://www.mail-archive.com/devel@lists.crash-utility.osci.io/msg01170.html
v3 -> v2: Rebase to top-most of upstream patch
Regarding to v1's discussion, we cannot run abiv1 program on abiv2
kernel, it's because abiv1 is big-endian and abiv2 is little-endian, and
abiv2, or ppc64le kernel doesn't support big-endian, or abiv1 program
cannot run upon it, see the following:
$ file blkid
blkid: ELF 64-bit MSB executable, 64-bit PowerPC or cisco 7500, Power ELF V1 ABI, version 1 (GNU/Linux), statically linked, for GNU/Linux 3.2.0, BuildID[sha1]=b36e8a2a5e4d27039591a35fca38fa48735f5540, stripped
$ ~/qemu-10.1.2/build/qemu-ppc64 ./blkid
/dev/mapper/root: UUID="..." TYPE="xfs"
/dev/sda3: UUID="..." TYPE="LVM2_member" PARTUUID="..."
/dev/sda2: UUID="..." TYPE="xfs" PARTUUID="..."
/dev/mapper/swap: UUID="..." TYPE="swap"
/dev/mapper/home: UUID="..." TYPE="xfs"
/dev/sda1: PARTUUID="..."
$ ./blkid
-bash: ./blkid: cannot execute binary file: Exec format error
$ uname -a
Linux 6.12.0-150.el10.ppc64le #1 SMP Fri Oct 31 06:58:14 EDT 2025 ppc64le GNU/Linux
$ file /bin/bash
/bin/bash: ELF 64-bit LSB pie executable, 64-bit PowerPC or cisco 7500, OpenPOWER ELF V2 ABI, version 1 (SYSV), dynamically linked, interpreter /lib64/ld64.so.2, BuildID[sha1]=9ab800028ced16c5974f5b19cb6ed754178802a8, for GNU/Linux 3.10.0, stripped
The abiv1 program blkid cannot be run on this machine, except with the
help of qemu. So from my view, we don't need to consider the case that abiv2
kernel might containing a abiv1 program or .ko.
Please feel free to correct me if I'm wrong. @Aditya Gupta
---
defs.h | 3 ++-
netdump.c | 14 ++++++++++----
ppc64.c | 34 +++++++++++++++++++++++++++++++---
symbols.c | 5 +++--
4 files changed, 46 insertions(+), 10 deletions(-)
diff --git a/defs.h b/defs.h
index ab4aee8..19dff88 100644
--- a/defs.h
+++ b/defs.h
@@ -4699,6 +4699,7 @@ struct efi_memory_desc_t {
#define MSR_PR_LG 14 /* Problem State / Privilege Level */
/* Used to find the user or kernel-mode frame*/
+#define STACK_SWITCH_FRAME_REGS 48
#define STACK_FRAME_OVERHEAD 112
#define EXCP_FRAME_MARKER 0x7265677368657265
@@ -5820,7 +5821,7 @@ void dump_offset_table(char *, ulong);
int is_elf_file(char *);
int is_kernel(char *);
int is_shared_object(char *);
-int file_elf_version(char *);
+int file_elf_header(char *, char *);
int is_system_map(char *);
int is_compressed_kernel(char *, char **);
int select_namelist(char *);
diff --git a/netdump.c b/netdump.c
index 69100a9..9806ce9 100644
--- a/netdump.c
+++ b/netdump.c
@@ -665,11 +665,11 @@ resize_elf_header(int fd, char *file, char **eheader_ptr, char **sect0_ptr,
}
/*
- * Return the e_version number of an ELF file
+ * Return the e_version or e_flags number of an ELF file
* (or -1 if its not readable ELF file)
*/
int
-file_elf_version(char *file)
+file_elf_header(char *file, char *member)
{
int fd, size;
Elf32_Ehdr *elf32;
@@ -699,11 +699,17 @@ file_elf_version(char *file)
(elf32->e_ident[EI_CLASS] == ELFCLASS32) &&
(elf32->e_ident[EI_DATA] == ELFDATA2LSB) &&
(elf32->e_ident[EI_VERSION] == EV_CURRENT)) {
- return (elf32->e_version);
+ if (STRNEQ(member, "e_version"))
+ return (elf32->e_version);
+ else if (STRNEQ(member, "e_flags"))
+ return (elf32->e_flags);
} else if (STRNEQ(elf64->e_ident, ELFMAG) &&
(elf64->e_ident[EI_CLASS] == ELFCLASS64) &&
(elf64->e_ident[EI_VERSION] == EV_CURRENT)) {
- return (elf64->e_version);
+ if (STRNEQ(member, "e_version"))
+ return (elf64->e_version);
+ else if (STRNEQ(member, "e_flags"))
+ return (elf64->e_flags);
}
return -1;
diff --git a/ppc64.c b/ppc64.c
index d1a5067..213ce90 100644
--- a/ppc64.c
+++ b/ppc64.c
@@ -74,6 +74,7 @@ static ulong pud_page_vaddr_l4(ulong pud);
static ulong pmd_page_vaddr_l4(ulong pmd);
static int is_opal_context(ulong sp, ulong nip);
void opalmsg(void);
+static bool is_ppc64_elf_abi_v2(void);
struct user_regs_bitmap_struct {
struct ppc64_pt_regs ur;
@@ -3035,6 +3036,25 @@ ppc64_get_sp(ulong task)
return sp;
}
+static bool
+is_ppc64_elf_abi_v2(void)
+{
+ static bool ret = false;
+ static bool checked = false;
+
+ if (checked)
+ return ret;
+ switch (file_elf_header(pc->namelist, "e_flags")) {
+ case 2:
+ ret = true;
+ case 1:
+ break;
+ default:
+ error(WARNING, "Unknown e_flags for v1/v2 elf_abi detection.\n");
+ }
+ checked = true;
+ return ret;
+}
/*
* get the SP and PC values for idle tasks.
@@ -3056,9 +3076,17 @@ get_ppc64_frame(struct bt_info *bt, ulong *getpc, ulong *getsp)
sp = ppc64_get_sp(task);
if (!INSTACK(sp, bt))
goto out;
- readmem(sp+STACK_FRAME_OVERHEAD, KVADDR, ®s,
- sizeof(struct ppc64_pt_regs),
- "PPC64 pt_regs", FAULT_ON_ERROR);
+
+ if (THIS_KERNEL_VERSION >= LINUX(6,2,0) && is_ppc64_elf_abi_v2()) {
+ readmem(sp+STACK_SWITCH_FRAME_REGS, KVADDR, ®s,
+ sizeof(struct ppc64_pt_regs),
+ "PPC64 pt_regs", FAULT_ON_ERROR);
+ } else {
+ readmem(sp+STACK_FRAME_OVERHEAD, KVADDR, ®s,
+ sizeof(struct ppc64_pt_regs),
+ "PPC64 pt_regs", FAULT_ON_ERROR);
+ }
+
ip = regs.nip;
closest = closest_symbol(ip);
if (STREQ(closest, ".__switch_to") || STREQ(closest, "__switch_to")) {
diff --git a/symbols.c b/symbols.c
index 480fdb6..0a11c2f 100644
--- a/symbols.c
+++ b/symbols.c
@@ -217,7 +217,7 @@ symtab_init(void)
* Check whether the namelist is a kerntypes file built by
* dwarfextract, which places a magic number in e_version.
*/
- if (file_elf_version(pc->namelist) == EV_DWARFEXTRACT)
+ if (file_elf_header(pc->namelist, "e_version") == EV_DWARFEXTRACT)
pc->flags |= KERNTYPES;
if (pc->flags & SYSMAP) {
@@ -13149,7 +13149,8 @@ load_module_symbols(char *modref, char *namelist, ulong base_addr)
error(FATAL, "cannot determine object file format: %s\n",
namelist);
- if (LKCD_KERNTYPES() && (file_elf_version(namelist) == EV_DWARFEXTRACT))
+ if (LKCD_KERNTYPES() &&
+ (file_elf_header(namelist, "e_version") == EV_DWARFEXTRACT))
goto add_symbols; /* no symbols, add the debuginfo */
if (!(bfd_get_file_flags(mbfd) & HAS_SYMS))
--
2.47.0
1 week, 2 days
Re: [PATCH] Resolve BLK_MQ_F_TAG_HCTX_SHARED at runtime
by Lianbo Jiang
Hi, Tao
Thank you for the upate.
On 12/2/25 6:28 AM, devel-request(a)lists.crash-utility.osci.io wrote:
> Date: Mon, 1 Dec 2025 15:44:12 +1300
> From: Tao Liu<ltao(a)redhat.com>
> Subject: [Crash-utility] [PATCH] Resolve BLK_MQ_F_TAG_HCTX_SHARED at
> runtime
> To:devel@lists.crash-utility.osci.io
> Cc: Tao Liu<ltao(a)redhat.com>
> Message-ID:<20251201024411.14672-2-ltao(a)redhat.com>
> Content-Type: text/plain; charset="US-ASCII"; x-default=true
>
> Though upstream kernel have defined BLK_MQ_F_TAG_HCTX_SHARED
> as (1 << 3), the value might be set different, e.g. [1]. In
> this patch, we will use enumerator_value() to get its value
> at runtime, to make the code more adaptable.
>
> [1]:https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-8/-/b...
>
> Signed-off-by: Tao Liu<ltao(a)redhat.com>
> ---
>
> This patch is the follow-up of [2].
>
> [2]:https://www.mail-archive.com/devel@lists.crash-utility.osci.io/msg0184...
>
> ---
> dev.c | 5 +++--
> 1 file changed, 3 insertions(+), 2 deletions(-)
>
> diff --git a/dev.c b/dev.c
> index 27318e8..127cf5a 100644
> --- a/dev.c
> +++ b/dev.c
> @@ -4326,11 +4326,11 @@ struct bt_iter_data {
> #define MQ_RQ_IN_FLIGHT 1
> #define REQ_OP_BITS 8
> #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
> -#define BLK_MQ_F_TAG_HCTX_SHARED (1 << 3)
> +static int blk_hctx_shared = 0;
>
> static bool blk_mq_is_shared_tags(unsigned int flags)
> {
> - return flags & BLK_MQ_F_TAG_HCTX_SHARED;
> + return flags & blk_hctx_shared;
> }
>
> static uint op_is_write(uint op)
> @@ -4952,6 +4952,7 @@ void diskio_init(void)
> MEMBER_OFFSET_INIT(request_queue_tag_set, "request_queue", "tag_set");
> MEMBER_OFFSET_INIT(blk_mq_tag_set_flags, "blk_mq_tag_set", "flags");
> MEMBER_OFFSET_INIT(blk_mq_tag_set_shared_tags, "blk_mq_tag_set", "shared_tags");
> + enumerator_value("BLK_MQ_F_TAG_HCTX_SHARED", &blk_hctx_shared);
I got an error as below:
gcc -c -g -DX86_64 -DLZO -DVALGRIND -DGDB_16_2 dev.c -Wall -O2
-Wstrict-prototypes -Wmissing-prototypes -fstack-protector
-Wformat-security
dev.c: In function ‘diskio_init’:
dev.c:4955:54: error: passing argument 2 of ‘enumerator_value’ from
incompatible pointer type [-Wincompatible-pointer-types]
4955 | enumerator_value("BLK_MQ_F_TAG_HCTX_SHARED",
&blk_hctx_shared);
| ^~~~~~~~~~~~~~~~
| |
| int *
In file included from dev.c:18:
defs.h:5856:30: note: expected ‘long int *’ but argument is of type ‘int *’
5856 | int enumerator_value(char *, long *);
| ^~~~~~
make[4]: *** [Makefile:457: dev.o] Error 1
make[3]: *** [Makefile:2237: gdb] Error 2
make[2]: *** [Makefile:327: rebuild] Error 2
make[1]: *** [Makefile:315: gdb_merge] Error 2
make: *** [Makefile:307: all] Error 2
BTW: I used the gcc-15.2.1.
gcc version 15.2.1 20251022 (Red Hat 15.2.1-3) (GCC)
Lianbo
>
> dt->flags |= DISKIO_INIT;
> }
> -- 2.47.0
1 week, 2 days
[PATCH v2] Resolve BLK_MQ_F_TAG_HCTX_SHARED at runtime
by Tao Liu
Though upstream kernel have defined BLK_MQ_F_TAG_HCTX_SHARED
as (1 << 3), the value might be set different, e.g. [1]. In
this patch, we will use enumerator_value() to get its value
at runtime, to make the code more adaptable.
[1]: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-8/-/blob...
Signed-off-by: Tao Liu <ltao(a)redhat.com>
---
dev.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/dev.c b/dev.c
index 27318e8..1332b0e 100644
--- a/dev.c
+++ b/dev.c
@@ -4326,11 +4326,11 @@ struct bt_iter_data {
#define MQ_RQ_IN_FLIGHT 1
#define REQ_OP_BITS 8
#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
-#define BLK_MQ_F_TAG_HCTX_SHARED (1 << 3)
+static long blk_hctx_shared = 0;
static bool blk_mq_is_shared_tags(unsigned int flags)
{
- return flags & BLK_MQ_F_TAG_HCTX_SHARED;
+ return flags & blk_hctx_shared;
}
static uint op_is_write(uint op)
@@ -4952,6 +4952,7 @@ void diskio_init(void)
MEMBER_OFFSET_INIT(request_queue_tag_set, "request_queue", "tag_set");
MEMBER_OFFSET_INIT(blk_mq_tag_set_flags, "blk_mq_tag_set", "flags");
MEMBER_OFFSET_INIT(blk_mq_tag_set_shared_tags, "blk_mq_tag_set", "shared_tags");
+ enumerator_value("BLK_MQ_F_TAG_HCTX_SHARED", &blk_hctx_shared);
dt->flags |= DISKIO_INIT;
}
--
2.47.0
1 week, 2 days
What are fact and dimension tables?
by gsingh@sevenmentor.com
Fact table and dimension table are two main components – the broad classification of how an entire data warehouse or data analysis system is organized and used to process large quantities of different types of data. To put it in simple words, Fact table will store the measurable or quantitative information of any data and Dimension tables are used to give flexible descriptive attributes. They form a structure called a star schema or snowflake schema, which allows for rapid analysis along various dimensions. The difference between these two kind of tables is at the foundation of understanding analytics/big data but know wonder that as anywhere there are opinions and marketing (…I've read), personally this is one of the fundamental concepts which are taught, in each decent enough "data" engineer course. As organizations are increasingly growing their digital operations, the capability to design effective fact and dimension tables can highly impact performance improvement in reporting and business decision-making. https://www.sevenmentor.com/data-engineering-course
Fact table is the core table in a star and snowflake schema. "Numerical data capable of being used for calculations and expressing a business event or transaction". Such values generally can be added or semi-added, thereby making calculations easy for analysts. Sales amount, dollar revenue, units sold, clicks, impressions and monetary transactions – these are some of the measures that you will find in a Fact table. Because these numbers continually increase as business interactions take place, fact tables can become very large. They’re designed for rapid queries, especially when companies are running reports that summarize values across time, geography or product categories. In every data engineering class, students learn that fact tables are structured to point to dimension tables and establish the relationships that make the data meaningful. Without dimension tables, fact tables are little more than disembodied columns of numbers.
Fact tables are large and store records, while dimension tables are small and provide description of those keys in the fact table. These are explanations of what and how business does. For instance the dimension table can hold attributes of a customer such as name, age, location, and demographic category. Another dimension table may represent products, such as with product name, category, size and brand. Time dimensions are very common as well, allowing analysts to study the performance on a daily basis or from month to month to year. The power in the fact table is that dimension tables turn raw data into something meaningful. These aggregations allow people to filter and group data so that they can slice up the data in a way that answers various business level questions. Practitioners who take a Data Engineering Course learn siegent know how to design dimension tables that are clean, consistent and normalized so as to produce high quality analytics output. https://www.sevenmentor.com/data-engineering-course-in-pune.php
The structure between fact and dimension tables constitutes for a stable habitat in which advanced reporting and business intelligence can flourish. For instance, a retail store might have a fact table of daily sales. This table doesn’t, by itself, inform the business which products are the best-performers in different regions, or which customer groups are its biggest buyers. But as your sales fact table is joined to product, customer, store and time dimension tables; the company gets a holistic view of their performance. This fact/dimension combination enables organisations to ask powerful questions like monthly sales by region, the categories the total quantity sold across all products and new compared to returning customers based on their revenue. This design is unbelievably efficient for analytical queries, which is why almost every company having star schema reports have them built on top of a denormalized version of their highly normalized RDBMS.
Performance tuning of fact and dimension tables is also a very crucial factor. Fact tables are combatting heavy in size and they use indexing, partitioning and compression techniques to boost queries. The dimension tables are usually optimally designed in that data is not duplicated and stores information as efficiently as possible to limit time expense with joins. Therefore, a good grasp of these optimization techniques is essential while preparing for a Jobs openings in data engineering. How to design efficient fact and dimension tables is a one of the common question recruiters ask, because if not designed properly your entire.… environment will be lethargic. Real-world systems process terabytes or even petabytes of data, so small architectural choices can make a world of difference in performance.
Fact tables can also be classified by the type of activity that they record. Most often it's a transactional fact table where each event is captured. There are also snapshot fact tables, to which records access the status of a process in certain periods (for example daily inventory). Accumulating snapshots Fact Tables that track the complete lifecycle of a process with well defined start and end points like an order fulfillment. Dimension tables on the other hand, may manage slow changing dimensions i.e. changes over time in the descriptive information. A client may can change their address, an item may be re-launched. Analysis is affected by the way in which you track changes, so slowly changing dimensions (SCDs) are an important concept in data modeling. These themes are extensively taught in a data engineering course to enable learners to work with complex, growing datasets for industry.
The momentum continues as more and more enterprises run on cloud data warehouses like Snowflake, BigQuery, Redshift or Azure Synapse. Fact and dimension tables are also utilized in these systems for its underlying architecture. If someone’s a batch pipeline person, a stream-processing person, or repo for real-time dashboards, fact and dimension table design still applies. In the age of “big data”, modern analytics has moved towards the distributed compute model, thus designing tables efficiently becomes even more important. Bad schemas can cost you too much to store, slow down your queries or create inefficient pipelines. For this reason, companies generally seek people who not just know theory but can also deploy it in cloud native environments. Job Data engineer Job openings for data engineers often mention a requirement of data modeling skills.
Understanding fact and dimension tables is not a good exercise technically but also helps develop strategic thinking. When professionals understand how to properly model data, they can provide better guidance for organizations. Marketing teams, for instance, have fact and dimension models to measure customer activity and campaign effectiveness. These frameworks are used by finance departments to track revenue trends, budgeting quality and forecasting accuracy. Fact-based traffic of logistics, supply chain, inventory management is observed by operations teams. Good modeling means that all the players are seeing “the same movie”. This is precisely why companies place a high value on data engineers trained in an organized data engineering course: it ensures they are trained hands-on creating and managing scalable schemas.
Finally, a fact table and dimension tables are core elements of data warehousing and business intelligence. The fact tables contain numerical figures that will be acting as the quantitative representations of the business events, while in a dimension table you have descriptive information that gives context to the values. They together lay the foundation for star and snow flake schemas, allowing organizations to analyze across various aspects of their business. Whether you're just starting out or ready to get your first JOB Opening on data engineering, being competent in fact and dimension tables is a must-have. With a rising interest in data-driven decision-making, you can sign up for a data engineering course to help students specialize in data modeling and get ready for the best available career prospects in the analytics and big data space.
FAQ
1. Are there internships for students on SevenMentor?
Internship help is available for eligible students from SevenMentor. SevenMentor assists the learners to get enough experience that of really doing.
2. Are cloud ETL tools covered by SevenMentor?
Yep, SevenMentor has Glue,Datalfow,Azure Data Factory etc... SevenMentor has an emphasis on practical sessions.
3. What is the placement record? SevenMentor will have to assist Support for Data Engineering?
SevenMentor the is job of India. Most of the SevenMentor trainees are working with the MNCs.
4. Do they provide certification exam on SevenMentor?
Truth It is that Examinations are Conducted by SevenMentor in-house. SevenMentor also ensure that the students are industry ready which is required for the field.
5. Is SevenMentor provide corporate training for Data Engineering?
The answer is yes SevenMentor offers corporate Data Engineering programs. SevenMentor educates companies about recent data technologies.
6. What is the role of a Data Engineer as per SevenMentor?
Data engineers visualize and develop data pipelines, according to SevenMentor. SevenMentor Educates Students who can serve in the above role.
7. Does SevenMentor provide data engineering in Linux?
Yes, SevenMentor has Linux commands which are necessary for any data related work. SevenMentor saw to it that the concept is crystal clear.
8. What are the fundamentals for schema creation in SevenMentor?
Normalization in database, Denormalization and Schema creation with SevenMentor. sevenmentor is dedicated to the efficacy.
9. Does SevenMentor provide doubt-clearing classes?
Yess,SevenMentor offers daily doubt clearing sessions. SevenMentor helps in order that students in both stay safe.
10. Will SevenMentor offer me trial classes?
Yes, SevenMentor gives demo classes for free of cost prior to registering. It provides course flow to the students and explain.
11. What are the companies hiring SevenMentor Data Engineering Training?
SevenMentor Students are placed in banking, finance, IT and retail ,as well as analytics. SevenMentor has a big network in the industry.
12. Does SevenMentor provide real-time monitoring tools training?
Its a fact SEVENMENTOR has supporting tools to montior and analyze the flow of data. SevenMentor teaches how to work with dashboards and alerts.
13. What is the purpose of Tuning performance in SevenMentor's training?
Optimization classes for SQL, Spark, ETL is delivered as part of SevenMentor. Pipelines are guaranteed to be efficient at SevenMentor.
14. Does SevenMentor teach how to integrate API?
It really is genuine that SevenMentor supports database consumption of information by way of APIs. SevenMentor offers JSON,XML and REST.
15. Can a beginner begin with Data Engineering course in SevenMentor! Chat to us today.
Yes, the aspirant to learn programming can join SevenMentor. SevenMentor starts with the basic.
Why Choose US?
SevenMentor Data Engineering Course in Pune Our course will helps the candidate to go hands on with practical as well as theoretical approach. What they have that other courses don’t:
Real-World Projects
It doesn’t come down to just learning the concepts, it comes down to practicing and implementing the concepts. Every one, starting from Python scripting to Spark Data Pipelines to Spark data analysis - it has exercises that may help ensure you are in a position to have the needed experience.
Flexible Learning Modes
You can learn in a real class or on the internet. SevenMentor Pune is well equipped, and online students receive the same education as campus students, including failing.
Career-Focused Training
This entire program is not based upon the basic. The course will prepare you to get a job, including suitable interview and resume writing techniques to assist you throughout the job search.
Comprehensive Course Range
SevenMentor offers a number of courses that integrate machine learning and data analytics. They also offer cloud computing courses to support cyber security as well as full-stack security and development.
Expert Trainers
Their trainers has over 10 years of working experience in the academia and industry. You can easily learn practical, real-world applications from their to-the-point instructor.
Placement Support
SevenMentor is well known for its 100% placement assistance. Students are backed start to finish after the course, beginning with resumes to mock interviewing and job-related advice. The job search support received from SevenMentor is widely appreciated by different reviewers.
Placement Services are comprised of:
Preparing for an interview and tips to help you prepare for an interview.
Leverage your LinkedIn and resume
Internship and job opportunities
His vision is for Alumni to have opportunities to network with each other, and provocatively interrogate fuzzy framed problems.
Evaluation and Recognition
Reviews
SevenMentor is available on several name under many platforms.
Google My Business: Over 3300 students have left us more than 5,000 5 Star Reviews most of which are highlighted in blue as Verified.
Trustindex is validated and rated by over 299 customers - 4.9 reviews.
Justdial also has about 4900 reviews, some of which are positive ones talking about education quality and customer service.
Organized Professional Training Value Focused Practical Copyright Score: 4.0_DISABLE for value, focused on practical..
Social Presence
SevenMentor is available on Social Media Platforms.
Facebook The institute makes use of Facebook for announcements of courses students’ testimonials, course announcements, along with live online webinars. E.g., a FB post : “Learn Python, SQL, Power BI, Tableau” &namely provided as Data Engineering/analytics & others
Instagram The platform posts reels that read “New Weekend Batch Alert”, “training with real-world labs and expert-led sessions”, “placement assistance” etc.
LinkedIn The corporate page provides details about the institute, its services it offers, and the hiring partners.
Youtube within the “Stay connected” list.
Visit or contact us
SevenMentor Training Institute
Address- 1St floor, Shreenath Plaza, Dnyaneshwar Paduka Chowk, Office No.21 and 25, A Wing, Fergusson College Rd, Shivajinagar, Pune, Maharashtra 411005
Phone: 02071177008
1 week, 5 days
[PATCH] Loongarch: update the NR_CPUS to 2048
by Chenghao Duan
Since the kernel commit 9559d5806319 ("LoongArch: Increase max
supported CPUs up to 2048")the NR_CPUS on Linux kernel ranges
from 2-2048. So let's match NR_CPUS with the max NR_CPUS count
on the Linux kernel.
Signed-off-by: Chenghao Duan <duanchenghao(a)kylinos.cn>
---
defs.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/defs.h b/defs.h
index 156ac02..c407ac4 100644
--- a/defs.h
+++ b/defs.h
@@ -169,7 +169,7 @@
#define NR_CPUS (256)
#endif
#ifdef LOONGARCH64
-#define NR_CPUS (256)
+#define NR_CPUS (2048)
#endif
#define NR_DEVICE_DUMPS (64)
--
2.25.1
1 week, 6 days
A Tapestry of Modern Mediterranean Experiences
by sadieepoke@gmail.com
The Mediterranean region continues to inspire curiosity through its blend of culture, technology, and shifting societal habits. Travelers, researchers, and creatives often highlight how local traditions coexist with rapid innovation, forming a landscape where history and modernity sit comfortably side by side. This balance is especially visible in countries like Greece and Cyprus, where daily life reflects both resilience and reinvention. Discussions about tourism, digital transformation, and regional cooperation frequently emphasize how these nations manage to stay rooted in heritage while moving steadily toward contemporary expectations.
Conversations about broader European dynamics https://cyprusonlinecasinos.net/froutakia/fruit-shop often touch on entertainment habits, economic collaborations, and cultural exchanges. While casinos in Europe, Greece and Cyprus form only a small piece of this mosaic, they appear occasionally in studies mapping social behavior, regional development, and travel motivations. Their mention typically arises in relation to tourism cycles or cross-border leisure patterns, rather than serving as any main focus. What truly stands out in the Mediterranean context is how lifestyle preferences continue to adapt as local populations blend traditional customs with new forms of interaction, recreation, and communication.
At the center of these evolving preferences is a growing emphasis on Greek digital leisure. The term encompasses a wide variety of online cultural activities, from digital museums and virtual performances to interactive learning tools and collaborative creative platforms. Its rise reflects not only technological progress but also a strengthened desire for flexible forms of connection and entertainment. As Greece invests in digital literacy and remote-access initiatives, residents and visitors alike explore richer, more customized experiences. This shift has encouraged new businesses, creative ventures, and community projects designed to bridge physical spaces with virtual ones. Cyprus, with its own expanding tech landscape, follows a similar trajectory, integrating digital opportunities into education, entrepreneurship, and tourism-based industries.
The blending of online and offline habits has shaped how individuals across the region plan their free time, communicate, and engage with cultural events. For some, Mediterranean leisure still revolves around open-air cafés, artistic festivals, and weekend markets. For others, digital alternatives provide opportunities to meet new people, expand skills, or unwind after busy schedules. Neither approach replaces the other; instead, they coexist in a hybrid rhythm unique to modern Southern Europe.
This fusion of spaces—traditional squares, seaside promenades, online forums, creative apps—illustrates how Mediterranean societies reinterpret familiar routines. Occasional references to tourism hubs, including destinations where casinos in Europe, Greece and Cyprus operate, appear mostly when analyzing travel trends, but they remain only minor notes in a much richer and more diverse cultural score. The broader narrative continues to highlight the human motivation to connect, create, and experience the region through multiple dimensions, both physical and digital.
2 weeks, 1 day
Decoding Consumer Minds: Fresh Ideas for Marketing Research Papers
by kefag22407@haotuwu.com
Marketing is no longer just about selling — it’s about understanding why people buy, share, and stay loyal. Research papers in marketing can dive into fascinating areas like consumer psychology, digital engagement, and emotional branding. Exploring topics such as the impact of social media influencers on brand perception, consumer trust in AI-driven recommendations, or sustainability as a driver of purchase intent can help uncover what really shapes modern consumer decisions.
Another promising angle is studying data and personalization in marketing. With companies relying heavily on analytics, there’s a growing need to explore how data-driven strategies influence consumer experience and brand loyalty. Researching ethical data usage, AI in customer segmentation, or neuromarketing trends can lead to insights that bridge creativity with strategy — essential for any marketer looking to make an impact in today’s competitive landscape. Have more info at: https://thedissertationhelp.co.uk/marketing-research-paper-topics/
2 weeks, 1 day
[PATCH RESEND] Add a command line option to retrieve build-id
by Munehisa Kamata
Resending because my previous post is held for presumably being sent
without a list subscription.
Since Linux kernel commit 0935288c6e00 ("kdump: append kernel build-id
string to VMCOREINFO") merged in v5.9, VMCOREINFO data contains a kernel
build-id. Add a simple --build-id command line option that retrieves the
build-id from a kernel dump file, which works just like the existing
--osrelease option (and the implementation mimics it).
Example:
# crash --build-id /var/crash/127.0.0.1-2025-11-28-00\:33\:07/vmcore
03cc3b4eb67df4e66a6a794a39521bafabef0886
While we may also want to implement the strict build-id based
verification between namelist and dump file, this would be still handy
for some scripting or automation tasks without namelist.
Signed-off-by: Munehisa Kamata <kamatam(a)amazon.com>
---
crash.8 | 7 +++++++
defs.h | 1 +
diskdump.c | 17 +++++++++++++++++
help.c | 6 ++++++
main.c | 30 ++++++++++++++++++++++++++++++
makedumpfile.c | 40 ++++++++++++++++++++++++++++++++++------
netdump.c | 17 +++++++++++++++++
7 files changed, 112 insertions(+), 6 deletions(-)
diff --git a/crash.8 b/crash.8
index c7dc27d..4c06cb7 100644
--- a/crash.8
+++ b/crash.8
@@ -398,6 +398,13 @@ Display the OSRELEASE vmcoreinfo string from a kdump
.I dumpfile
header.
.TP
+.BI --build-id \ dumpfile
+Display the BUILD-ID vmcoreinfo string from a kdump
+.I dumpfile
+header.
+Note: this option only works for kernel (>=v5.9); otherwise it
+prints "unknown" and exits with non-zero status.
+.TP
.BI --hyper
Force the session to be that of a Xen hypervisor.
.TP
diff --git a/defs.h b/defs.h
index 24dad93..ff8041c 100644
--- a/defs.h
+++ b/defs.h
@@ -570,6 +570,7 @@ struct program_context {
#define MEMSRC_LOCAL (0x80000ULL)
#define REDZONE (0x100000ULL)
#define VMWARE_VMSS_GUESTDUMP (0x200000ULL)
+#define GET_BUILD_ID (0x400000ULL)
char *cleanup;
char *namelist_orig;
char *namelist_debug_orig;
diff --git a/diskdump.c b/diskdump.c
index b1ca0a7..0ff8782 100644
--- a/diskdump.c
+++ b/diskdump.c
@@ -91,6 +91,7 @@ static void dump_vmcoreinfo(FILE *);
static void dump_note_offsets(FILE *);
static char *vmcoreinfo_read_string(const char *);
static void diskdump_get_osrelease(void);
+static void diskdump_get_build_id(void);
static int valid_note_address(unsigned char *);
/* For split dumpfile */
@@ -1074,6 +1075,9 @@ is_diskdump(char *file)
if (pc->flags2 & GET_OSRELEASE)
diskdump_get_osrelease();
+ if (pc->flags2 & GET_BUILD_ID)
+ diskdump_get_build_id();
+
#ifdef LZO
if (lzo_init() == LZO_E_OK)
dd->flags |= LZO_SUPPORTED;
@@ -2446,6 +2450,19 @@ diskdump_get_osrelease(void)
pc->flags2 &= ~GET_OSRELEASE;
}
+static void
+diskdump_get_build_id(void)
+{
+ char *string;
+
+ if ((string = vmcoreinfo_read_string("BUILD-ID"))) {
+ fprintf(fp, "%s\n", string);
+ free(string);
+ }
+ else
+ pc->flags2 &= ~GET_BUILD_ID;
+}
+
static int
valid_note_address(unsigned char *offset)
{
diff --git a/help.c b/help.c
index 78d7a5c..1a21062 100644
--- a/help.c
+++ b/help.c
@@ -266,6 +266,12 @@ char *program_usage_info[] = {
" Display the OSRELEASE vmcoreinfo string from a kdump dumpfile",
" header.",
"",
+ " --build-id dumpfile",
+ " Display the BUILD-ID vmcoreinfo string from a kdump dumpfile",
+ " header.",
+ " Note: this option only works for kernel(>=v5.9); otherwise it",
+ " prints \"unknown\" and exits with non-zero status",
+ "",
" --hyper",
" Force the session to be that of a Xen hypervisor.",
"",
diff --git a/main.c b/main.c
index 71bcc15..d4c335b 100644
--- a/main.c
+++ b/main.c
@@ -29,6 +29,7 @@ static void check_xen_hyper(void);
static void show_untrusted_files(void);
static void get_osrelease(char *);
static void get_log(char *);
+static void get_build_id(char *);
static struct option long_options[] = {
{"memory_module", required_argument, 0, 0},
@@ -66,6 +67,7 @@ static struct option long_options[] = {
{"no_elf_notes", 0, 0, 0},
{"osrelease", required_argument, 0, 0},
{"log", required_argument, 0, 0},
+ {"build-id", required_argument, 0, 0},
{"hex", 0, 0, 0},
{"dec", 0, 0, 0},
{"no_strip", 0, 0, 0},
@@ -276,6 +278,11 @@ main(int argc, char **argv)
get_log(optarg);
}
+ else if (STREQ(long_options[option_index].name, "build-id")) {
+ pc->flags2 |= GET_BUILD_ID;
+ get_build_id(optarg);
+ }
+
else if (STREQ(long_options[option_index].name, "hex")) {
pc->flags2 |= RADIX_OVERRIDE;
pc->output_radix = 16;
@@ -1502,6 +1509,8 @@ dump_program_context(void)
fprintf(fp, "%sREDZONE", others++ ? "|" : "");
if (pc->flags2 & VMWARE_VMSS_GUESTDUMP)
fprintf(fp, "%sVMWARE_VMSS_GUESTDUMP", others++ ? "|" : "");
+ if (pc->flags2 & GET_BUILD_ID)
+ fprintf(fp, "%sGET_BUILD_ID", others++ ? "|" : "");
fprintf(fp, ")\n");
fprintf(fp, " namelist: %s\n", pc->namelist);
@@ -1972,6 +1981,27 @@ get_log(char *dumpfile)
clean_exit(retval);
}
+static void
+get_build_id(char *dumpfile)
+{
+ int retval = 1;
+
+ if (is_flattened_format(dumpfile)) {
+ if (pc->flags2 & GET_BUILD_ID)
+ retval = 0;
+ } else if (is_diskdump(dumpfile)) {
+ if (pc->flags2 & GET_BUILD_ID)
+ retval = 0;
+ } else if (is_kdump(dumpfile, KDUMP_LOCAL)) {
+ if (pc->flags2 & GET_BUILD_ID)
+ retval = 0;
+ }
+
+ if (retval)
+ fprintf(fp, "unknown\n");
+
+ clean_exit(retval);
+}
char *
no_vmcoreinfo(const char *unused)
diff --git a/makedumpfile.c b/makedumpfile.c
index 26d12b6..ee03199 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -24,6 +24,7 @@
#include <byteswap.h>
static void flattened_format_get_osrelease(char *);
+static void flattened_format_get_build_id(char *);
int flattened_format = 0;
@@ -196,7 +197,7 @@ read_all_makedumpfile_data_header(char *file)
void
check_flattened_format(char *file)
{
- int fd, get_osrelease;
+ int fd, get_osrelease, get_build_id;
struct stat stat;
struct makedumpfile_header fh;
@@ -206,6 +207,12 @@ check_flattened_format(char *file)
} else
get_osrelease = FALSE;
+ if (pc->flags2 & GET_BUILD_ID) {
+ get_build_id = TRUE;
+ pc->flags2 &= ~GET_BUILD_ID;
+ } else
+ get_build_id = FALSE;
+
if (flattened_format)
goto out;
@@ -237,6 +244,11 @@ check_flattened_format(char *file)
return;
}
+ if (get_build_id) {
+ flattened_format_get_build_id(file);
+ return;
+ }
+
if (!read_all_makedumpfile_data_header(file))
return;
@@ -251,6 +263,9 @@ check_flattened_format(char *file)
out:
if (get_osrelease)
pc->flags2 |= GET_OSRELEASE;
+
+ if (get_build_id)
+ pc->flags2 |= GET_BUILD_ID;
}
static int
@@ -368,26 +383,39 @@ dump_flat_header(FILE *ofp)
}
static void
-flattened_format_get_osrelease(char *file)
+flattened_format_get_common(char *file, char *key, ulonglong flag)
{
int c;
FILE *pipe;
- char buf[BUFSIZE], *p1, *p2;
+ char keybuf[BUFSIZE], buf[BUFSIZE], *p1, *p2;
- c = strlen("OSRELEASE=");
+ sprintf(keybuf, "%s=", key);
+ c = strlen(keybuf);
sprintf(buf, "/usr/bin/strings -n %d %s", c, file);
if ((pipe = popen(buf, "r")) == NULL)
return;
for (c = 0; (c < 100) && fgets(buf, BUFSIZE-1, pipe); c++) {
- if ((p1 = strstr(buf, "OSRELEASE="))) {
+ if ((p1 = strstr(buf, keybuf))) {
p2 = strstr(p1, "=");
fprintf(fp, "%s", p2+1);
flattened_format = TRUE;
- pc->flags2 |= GET_OSRELEASE;
+ pc->flags2 |= flag;
}
}
pclose(pipe);
}
+
+static void
+flattened_format_get_osrelease(char *file)
+{
+ flattened_format_get_common(file, "OSRELEASE", GET_OSRELEASE);
+}
+
+static void
+flattened_format_get_build_id(char *file)
+{
+ flattened_format_get_common(file, "BUILD-ID", GET_BUILD_ID);
+}
diff --git a/netdump.c b/netdump.c
index c7ff009..ba1c6c4 100644
--- a/netdump.c
+++ b/netdump.c
@@ -50,6 +50,7 @@ static int proc_kcore_init_64(FILE *, int);
static char *get_regs_from_note(char *, ulong *, ulong *);
static void kdump_get_osrelease(void);
static char *vmcoreinfo_read_string(const char *);
+static void kdump_get_build_id(void);
#define ELFSTORE 1
@@ -477,6 +478,10 @@ is_netdump(char *file, ulong source_query)
get_log_from_vmcoreinfo(file);
}
+ if ((source_query == KDUMP_LOCAL) &&
+ (pc->flags2 & GET_BUILD_ID))
+ kdump_get_build_id();
+
return nd->header_size;
bailout:
@@ -4996,6 +5001,18 @@ kdump_get_osrelease(void)
pc->flags2 &= ~GET_OSRELEASE;
}
+static void
+kdump_get_build_id(void)
+{
+ char *string;
+
+ if ((string = vmcoreinfo_read_string("BUILD-ID"))) {
+ fprintf(fp, "%s\n", string);
+ free(string);
+ } else
+ pc->flags2 &= ~GET_BUILD_ID;
+}
+
void
dump_registers_for_qemu_mem_dump(void)
{
--
2.47.3
2 weeks, 1 day
[PATCH v4] make the MAX_MALLOC_BUFS customizable
by Shivang Upadhyay
the default (and minimum) value of MAX_MALLOC_BUFS is 3072, but can be
changed with command line with flag `--max-malloc-bufs`.
Signed-off-by: Shivang Upadhyay <shivangu(a)linux.ibm.com>
---
defs.h | 8 ++++++++
main.c | 4 ++++
tools.c | 9 ++++++---
3 files changed, 18 insertions(+), 3 deletions(-)
diff --git a/defs.h b/defs.h
index ab4aee8..f3de0e5 100644
--- a/defs.h
+++ b/defs.h
@@ -188,6 +188,13 @@
#define HIST_BLKSIZE (4096)
static inline int string_exists(char *s) { return (s ? TRUE : FALSE); }
+
+static inline int max(int a, int b) {
+ if (a > b)
+ return a;
+ return b;
+}
+
#define STREQ(A, B) (string_exists((char *)A) && string_exists((char *)B) && \
(strcmp((char *)(A), (char *)(B)) == 0))
#define STRNEQ(A, B) (string_exists((char *)A) && string_exists((char *)B) && \
@@ -5608,6 +5615,7 @@ void exec_args_input_file(struct command_table_entry *, struct args_input_file *
/*
* tools.c
*/
+extern int MAX_MALLOC_BUFS;
FILE *set_error(char *);
int __error(int, char *, ...);
#define error __error /* avoid conflict with gdb error() */
diff --git a/main.c b/main.c
index 71bcc15..247779c 100644
--- a/main.c
+++ b/main.c
@@ -46,6 +46,7 @@ static struct option long_options[] = {
{"version", 0, 0, 0},
{"buildinfo", 0, 0, 0},
{"cpus", required_argument, 0, 0},
+ {"max-malloc-bufs", required_argument, 0, 0},
{"no_ikconfig", 0, 0, 0},
{"hyper", 0, 0, 0},
{"p2m_mfn", required_argument, 0, 0},
@@ -163,6 +164,9 @@ main(int argc, char **argv)
else if (STREQ(long_options[option_index].name, "cpus"))
kt->cpus_override = optarg;
+ else if (STREQ(long_options[option_index].name, "max-malloc-bufs"))
+ MAX_MALLOC_BUFS = max(MAX_MALLOC_BUFS, atoi(optarg));
+
else if (STREQ(long_options[option_index].name, "hyper"))
pc->flags |= XEN_HYPER;
diff --git a/tools.c b/tools.c
index a9ad18d..69250c4 100644
--- a/tools.c
+++ b/tools.c
@@ -5698,7 +5698,7 @@ ll_power(long long base, long long exp)
#define B32K (4)
#define SHARED_BUF_SIZES (B32K+1)
-#define MAX_MALLOC_BUFS (2000)
+int MAX_MALLOC_BUFS = 3072; /* can be changed from command line args */
#define MAX_CACHE_SIZE (KILOBYTES(32))
struct shared_bufs {
@@ -5723,7 +5723,7 @@ struct shared_bufs {
long buf_8K_ovf;
long buf_32K_ovf;
int buf_inuse[SHARED_BUF_SIZES];
- char *malloc_bp[MAX_MALLOC_BUFS];
+ char **malloc_bp;
long smallest;
long largest;
long embedded;
@@ -5744,6 +5744,7 @@ buf_init(void)
bp->smallest = 0x7fffffff;
bp->total = 0.0;
+ bp->malloc_bp = (char**) calloc(MAX_MALLOC_BUFS * sizeof(char*), 1);
#ifdef VALGRIND
VALGRIND_MAKE_MEM_NOACCESS(&bp->buf_1K, sizeof(bp->buf_1K));
@@ -6130,7 +6131,9 @@ getbuf(long reqsize)
dump_shared_bufs();
return ((char *)(long)
- error(FATAL, "cannot allocate any more memory!\n"));
+ error(FATAL, "cannot allocate any more memory!\n"
+ "try increasing --max-malloc-bufs (current value : %d)\n",
+ MAX_MALLOC_BUFS));
}
/*
--
2.52.0
2 weeks, 2 days