forked from piyl8cs5f/Iot_Cs_best
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
10169 lines
304 KiB
10169 lines
304 KiB
diff -Nupr old/fs/jffs2/acl.c new/fs/jffs2/acl.c
|
|
--- old/fs/jffs2/acl.c 2022-05-09 17:15:24.350000000 +0800
|
|
+++ new/fs/jffs2/acl.c 1970-01-01 08:00:00.000000000 +0800
|
|
@@ -1,307 +0,0 @@
|
|
-/*
|
|
- * JFFS2 -- Journalling Flash File System, Version 2.
|
|
- *
|
|
- * Copyright © 2006 NEC Corporation
|
|
- *
|
|
- * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
|
|
- *
|
|
- * For licensing information, see the file 'LICENCE' in this directory.
|
|
- *
|
|
- */
|
|
-
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/slab.h>
|
|
-#include <linux/fs.h>
|
|
-#include <linux/sched.h>
|
|
-#include <linux/time.h>
|
|
-#include <linux/crc32.h>
|
|
-#include <linux/jffs2.h>
|
|
-#include <linux/xattr.h>
|
|
-#include <linux/posix_acl_xattr.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
-#include "nodelist.h"
|
|
-
|
|
-static size_t jffs2_acl_size(int count)
|
|
-{
|
|
- if (count <= 4) {
|
|
- return sizeof(struct jffs2_acl_header)
|
|
- + count * sizeof(struct jffs2_acl_entry_short);
|
|
- } else {
|
|
- return sizeof(struct jffs2_acl_header)
|
|
- + 4 * sizeof(struct jffs2_acl_entry_short)
|
|
- + (count - 4) * sizeof(struct jffs2_acl_entry);
|
|
- }
|
|
-}
|
|
-
|
|
-static int jffs2_acl_count(size_t size)
|
|
-{
|
|
- size_t s;
|
|
-
|
|
- size -= sizeof(struct jffs2_acl_header);
|
|
- if (size < 4 * sizeof(struct jffs2_acl_entry_short)) {
|
|
- if (size % sizeof(struct jffs2_acl_entry_short))
|
|
- return -1;
|
|
- return size / sizeof(struct jffs2_acl_entry_short);
|
|
- } else {
|
|
- s = size - 4 * sizeof(struct jffs2_acl_entry_short);
|
|
- if (s % sizeof(struct jffs2_acl_entry))
|
|
- return -1;
|
|
- return s / sizeof(struct jffs2_acl_entry) + 4;
|
|
- }
|
|
-}
|
|
-
|
|
-static struct posix_acl *jffs2_acl_from_medium(void *value, size_t size)
|
|
-{
|
|
- void *end = value + size;
|
|
- struct jffs2_acl_header *header = value;
|
|
- struct jffs2_acl_entry *entry;
|
|
- struct posix_acl *acl;
|
|
- uint32_t ver;
|
|
- int i, count;
|
|
-
|
|
- if (!value)
|
|
- return NULL;
|
|
- if (size < sizeof(struct jffs2_acl_header))
|
|
- return ERR_PTR(-EINVAL);
|
|
- ver = je32_to_cpu(header->a_version);
|
|
- if (ver != JFFS2_ACL_VERSION) {
|
|
- JFFS2_WARNING("Invalid ACL version. (=%u)\n", ver);
|
|
- return ERR_PTR(-EINVAL);
|
|
- }
|
|
-
|
|
- value += sizeof(struct jffs2_acl_header);
|
|
- count = jffs2_acl_count(size);
|
|
- if (count < 0)
|
|
- return ERR_PTR(-EINVAL);
|
|
- if (count == 0)
|
|
- return NULL;
|
|
-
|
|
- acl = posix_acl_alloc(count, GFP_KERNEL);
|
|
- if (!acl)
|
|
- return ERR_PTR(-ENOMEM);
|
|
-
|
|
- for (i=0; i < count; i++) {
|
|
- entry = value;
|
|
- if (value + sizeof(struct jffs2_acl_entry_short) > end)
|
|
- goto fail;
|
|
- acl->a_entries[i].e_tag = je16_to_cpu(entry->e_tag);
|
|
- acl->a_entries[i].e_perm = je16_to_cpu(entry->e_perm);
|
|
- switch (acl->a_entries[i].e_tag) {
|
|
- case ACL_USER_OBJ:
|
|
- case ACL_GROUP_OBJ:
|
|
- case ACL_MASK:
|
|
- case ACL_OTHER:
|
|
- value += sizeof(struct jffs2_acl_entry_short);
|
|
- break;
|
|
-
|
|
- case ACL_USER:
|
|
- value += sizeof(struct jffs2_acl_entry);
|
|
- if (value > end)
|
|
- goto fail;
|
|
- acl->a_entries[i].e_uid =
|
|
- make_kuid(&init_user_ns,
|
|
- je32_to_cpu(entry->e_id));
|
|
- break;
|
|
- case ACL_GROUP:
|
|
- value += sizeof(struct jffs2_acl_entry);
|
|
- if (value > end)
|
|
- goto fail;
|
|
- acl->a_entries[i].e_gid =
|
|
- make_kgid(&init_user_ns,
|
|
- je32_to_cpu(entry->e_id));
|
|
- break;
|
|
-
|
|
- default:
|
|
- goto fail;
|
|
- }
|
|
- }
|
|
- if (value != end)
|
|
- goto fail;
|
|
- return acl;
|
|
- fail:
|
|
- posix_acl_release(acl);
|
|
- return ERR_PTR(-EINVAL);
|
|
-}
|
|
-
|
|
-static void *jffs2_acl_to_medium(const struct posix_acl *acl, size_t *size)
|
|
-{
|
|
- struct jffs2_acl_header *header;
|
|
- struct jffs2_acl_entry *entry;
|
|
- void *e;
|
|
- size_t i;
|
|
-
|
|
- *size = jffs2_acl_size(acl->a_count);
|
|
- header = kmalloc(struct_size(header, a_entries, acl->a_count),
|
|
- GFP_KERNEL);
|
|
- if (!header)
|
|
- return ERR_PTR(-ENOMEM);
|
|
- header->a_version = cpu_to_je32(JFFS2_ACL_VERSION);
|
|
- e = header + 1;
|
|
- for (i=0; i < acl->a_count; i++) {
|
|
- const struct posix_acl_entry *acl_e = &acl->a_entries[i];
|
|
- entry = e;
|
|
- entry->e_tag = cpu_to_je16(acl_e->e_tag);
|
|
- entry->e_perm = cpu_to_je16(acl_e->e_perm);
|
|
- switch(acl_e->e_tag) {
|
|
- case ACL_USER:
|
|
- entry->e_id = cpu_to_je32(
|
|
- from_kuid(&init_user_ns, acl_e->e_uid));
|
|
- e += sizeof(struct jffs2_acl_entry);
|
|
- break;
|
|
- case ACL_GROUP:
|
|
- entry->e_id = cpu_to_je32(
|
|
- from_kgid(&init_user_ns, acl_e->e_gid));
|
|
- e += sizeof(struct jffs2_acl_entry);
|
|
- break;
|
|
-
|
|
- case ACL_USER_OBJ:
|
|
- case ACL_GROUP_OBJ:
|
|
- case ACL_MASK:
|
|
- case ACL_OTHER:
|
|
- e += sizeof(struct jffs2_acl_entry_short);
|
|
- break;
|
|
-
|
|
- default:
|
|
- goto fail;
|
|
- }
|
|
- }
|
|
- return header;
|
|
- fail:
|
|
- kfree(header);
|
|
- return ERR_PTR(-EINVAL);
|
|
-}
|
|
-
|
|
-struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
|
|
-{
|
|
- struct posix_acl *acl;
|
|
- char *value = NULL;
|
|
- int rc, xprefix;
|
|
-
|
|
- switch (type) {
|
|
- case ACL_TYPE_ACCESS:
|
|
- xprefix = JFFS2_XPREFIX_ACL_ACCESS;
|
|
- break;
|
|
- case ACL_TYPE_DEFAULT:
|
|
- xprefix = JFFS2_XPREFIX_ACL_DEFAULT;
|
|
- break;
|
|
- default:
|
|
- BUG();
|
|
- }
|
|
- rc = do_jffs2_getxattr(inode, xprefix, "", NULL, 0);
|
|
- if (rc > 0) {
|
|
- value = kmalloc(rc, GFP_KERNEL);
|
|
- if (!value)
|
|
- return ERR_PTR(-ENOMEM);
|
|
- rc = do_jffs2_getxattr(inode, xprefix, "", value, rc);
|
|
- }
|
|
- if (rc > 0) {
|
|
- acl = jffs2_acl_from_medium(value, rc);
|
|
- } else if (rc == -ENODATA || rc == -ENOSYS) {
|
|
- acl = NULL;
|
|
- } else {
|
|
- acl = ERR_PTR(rc);
|
|
- }
|
|
- kfree(value);
|
|
- return acl;
|
|
-}
|
|
-
|
|
-static int __jffs2_set_acl(struct inode *inode, int xprefix, struct posix_acl *acl)
|
|
-{
|
|
- char *value = NULL;
|
|
- size_t size = 0;
|
|
- int rc;
|
|
-
|
|
- if (acl) {
|
|
- value = jffs2_acl_to_medium(acl, &size);
|
|
- if (IS_ERR(value))
|
|
- return PTR_ERR(value);
|
|
- }
|
|
- rc = do_jffs2_setxattr(inode, xprefix, "", value, size, 0);
|
|
- if (!value && rc == -ENODATA)
|
|
- rc = 0;
|
|
- kfree(value);
|
|
-
|
|
- return rc;
|
|
-}
|
|
-
|
|
-int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
|
-{
|
|
- int rc, xprefix;
|
|
-
|
|
- switch (type) {
|
|
- case ACL_TYPE_ACCESS:
|
|
- xprefix = JFFS2_XPREFIX_ACL_ACCESS;
|
|
- if (acl) {
|
|
- umode_t mode;
|
|
-
|
|
- rc = posix_acl_update_mode(inode, &mode, &acl);
|
|
- if (rc)
|
|
- return rc;
|
|
- if (inode->i_mode != mode) {
|
|
- struct iattr attr;
|
|
-
|
|
- attr.ia_valid = ATTR_MODE | ATTR_CTIME;
|
|
- attr.ia_mode = mode;
|
|
- attr.ia_ctime = current_time(inode);
|
|
- rc = jffs2_do_setattr(inode, &attr);
|
|
- if (rc < 0)
|
|
- return rc;
|
|
- }
|
|
- }
|
|
- break;
|
|
- case ACL_TYPE_DEFAULT:
|
|
- xprefix = JFFS2_XPREFIX_ACL_DEFAULT;
|
|
- if (!S_ISDIR(inode->i_mode))
|
|
- return acl ? -EACCES : 0;
|
|
- break;
|
|
- default:
|
|
- return -EINVAL;
|
|
- }
|
|
- rc = __jffs2_set_acl(inode, xprefix, acl);
|
|
- if (!rc)
|
|
- set_cached_acl(inode, type, acl);
|
|
- return rc;
|
|
-}
|
|
-
|
|
-int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, umode_t *i_mode)
|
|
-{
|
|
- struct posix_acl *default_acl, *acl;
|
|
- int rc;
|
|
-
|
|
- cache_no_acl(inode);
|
|
-
|
|
- rc = posix_acl_create(dir_i, i_mode, &default_acl, &acl);
|
|
- if (rc)
|
|
- return rc;
|
|
-
|
|
- if (default_acl) {
|
|
- set_cached_acl(inode, ACL_TYPE_DEFAULT, default_acl);
|
|
- posix_acl_release(default_acl);
|
|
- }
|
|
- if (acl) {
|
|
- set_cached_acl(inode, ACL_TYPE_ACCESS, acl);
|
|
- posix_acl_release(acl);
|
|
- }
|
|
- return 0;
|
|
-}
|
|
-
|
|
-int jffs2_init_acl_post(struct inode *inode)
|
|
-{
|
|
- int rc;
|
|
-
|
|
- if (inode->i_default_acl) {
|
|
- rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_DEFAULT, inode->i_default_acl);
|
|
- if (rc)
|
|
- return rc;
|
|
- }
|
|
-
|
|
- if (inode->i_acl) {
|
|
- rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_ACCESS, inode->i_acl);
|
|
- if (rc)
|
|
- return rc;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
diff -Nupr old/fs/jffs2/acl.h new/fs/jffs2/acl.h
|
|
--- old/fs/jffs2/acl.h 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/acl.h 2022-05-10 14:52:22.930000000 +0800
|
|
@@ -8,6 +8,8 @@
|
|
* For licensing information, see the file 'LICENCE' in this directory.
|
|
*
|
|
*/
|
|
+#ifndef _JFFS2_ACL_H_
|
|
+#define _JFFS2_ACL_H_
|
|
|
|
struct jffs2_acl_entry {
|
|
jint16_t e_tag;
|
|
@@ -27,11 +29,6 @@ struct jffs2_acl_header {
|
|
|
|
#ifdef CONFIG_JFFS2_FS_POSIX_ACL
|
|
|
|
-struct posix_acl *jffs2_get_acl(struct inode *inode, int type);
|
|
-int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
|
|
-extern int jffs2_init_acl_pre(struct inode *, struct inode *, umode_t *);
|
|
-extern int jffs2_init_acl_post(struct inode *);
|
|
-
|
|
#else
|
|
|
|
#define jffs2_get_acl (NULL)
|
|
@@ -40,3 +37,4 @@ extern int jffs2_init_acl_post(struct in
|
|
#define jffs2_init_acl_post(inode) (0)
|
|
|
|
#endif /* CONFIG_JFFS2_FS_POSIX_ACL */
|
|
+#endif /* _JFFS2_ACL_H_ */
|
|
diff -Nupr old/fs/jffs2/background.c new/fs/jffs2/background.c
|
|
--- old/fs/jffs2/background.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/background.c 2022-05-10 14:53:26.200000000 +0800
|
|
@@ -10,156 +10,113 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
#include <linux/kernel.h>
|
|
-#include <linux/jffs2.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
-#include <linux/completion.h>
|
|
-#include <linux/sched/signal.h>
|
|
-#include <linux/freezer.h>
|
|
-#include <linux/kthread.h>
|
|
+#include <stdio.h>
|
|
#include "nodelist.h"
|
|
+#include "vfs_jffs2.h"
|
|
+#include "mtd_partition.h"
|
|
|
|
+#define GC_THREAD_FLAG_TRIG 1
|
|
+#define GC_THREAD_FLAG_STOP 2
|
|
+#define GC_THREAD_FLAG_HAS_EXIT 4
|
|
|
|
-static int jffs2_garbage_collect_thread(void *);
|
|
+extern struct MtdNorDev jffs2_dev_list[CONFIG_MTD_PATTITION_NUM];
|
|
+static void jffs2_garbage_collect_thread(unsigned long data);
|
|
|
|
void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c)
|
|
{
|
|
- assert_spin_locked(&c->erase_completion_lock);
|
|
- if (c->gc_task && jffs2_thread_should_wake(c))
|
|
- send_sig(SIGHUP, c->gc_task, 1);
|
|
+ struct super_block *sb = OFNI_BS_2SFFJ(c);
|
|
+ /* Wake up the thread */
|
|
+ jffs2_dbg(1, "jffs2_garbage_collect_trigger\n");
|
|
+ LOS_EventWrite(&sb->s_gc_thread_flags, GC_THREAD_FLAG_TRIG);
|
|
}
|
|
|
|
/* This must only ever be called when no GC thread is currently running */
|
|
-int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
|
|
+void jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
|
|
{
|
|
- struct task_struct *tsk;
|
|
- int ret = 0;
|
|
+ struct super_block *sb = OFNI_BS_2SFFJ(c);
|
|
+ TSK_INIT_PARAM_S stGcTask;
|
|
|
|
- BUG_ON(c->gc_task);
|
|
+ if (c == NULL)
|
|
+ return;
|
|
|
|
- init_completion(&c->gc_thread_start);
|
|
- init_completion(&c->gc_thread_exit);
|
|
+ if (sb->s_root == NULL)
|
|
+ return;
|
|
|
|
- tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index);
|
|
- if (IS_ERR(tsk)) {
|
|
- pr_warn("fork failed for JFFS2 garbage collect thread: %ld\n",
|
|
- -PTR_ERR(tsk));
|
|
- complete(&c->gc_thread_exit);
|
|
- ret = PTR_ERR(tsk);
|
|
- } else {
|
|
- /* Wait for it... */
|
|
- jffs2_dbg(1, "Garbage collect thread is pid %d\n", tsk->pid);
|
|
- wait_for_completion(&c->gc_thread_start);
|
|
- ret = tsk->pid;
|
|
+ LOS_EventInit(&sb->s_gc_thread_flags);
|
|
+
|
|
+ /* Start the thread. Doesn't matter if it fails -- it's only an
|
|
+ * optimisation anyway */
|
|
+ (void)memset_s(&stGcTask, sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));
|
|
+
|
|
+ stGcTask.pfnTaskEntry = (TSK_ENTRY_FUNC)jffs2_garbage_collect_thread;
|
|
+ stGcTask.auwArgs[0] = (UINTPTR)c;
|
|
+ stGcTask.uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;
|
|
+ stGcTask.pcName = "jffs2_gc_thread";
|
|
+#ifdef LOSCFG_KERNEL_SMP
|
|
+ unsigned int i;
|
|
+ for (i = 0; i < CONFIG_MTD_PATTITION_NUM; i++) {
|
|
+ if (sb->s_dev == &jffs2_dev_list[i])
|
|
+ break;
|
|
}
|
|
+ stGcTask.usCpuAffiMask = CPUID_TO_AFFI_MASK(i % LOSCFG_KERNEL_CORE_NUM);
|
|
+#endif
|
|
+ stGcTask.usTaskPrio = JFFS2_GC_THREAD_PRIORITY;
|
|
|
|
- return ret;
|
|
+ if (LOS_TaskCreate(&sb->s_gc_thread, &stGcTask))
|
|
+ JFFS2_ERROR("Create gc task failed!!!\n");
|
|
}
|
|
|
|
void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c)
|
|
{
|
|
- int wait = 0;
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- if (c->gc_task) {
|
|
- jffs2_dbg(1, "Killing GC task %d\n", c->gc_task->pid);
|
|
- send_sig(SIGKILL, c->gc_task, 1);
|
|
- wait = 1;
|
|
- }
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
- if (wait)
|
|
- wait_for_completion(&c->gc_thread_exit);
|
|
+ struct super_block *sb = OFNI_BS_2SFFJ(c);
|
|
+
|
|
+ JFFS2_DEBUG("jffs2_stop_garbage_collect_thread\n");
|
|
+ /* Stop the thread and wait for it if necessary */
|
|
+
|
|
+ LOS_EventWrite(&sb->s_gc_thread_flags, GC_THREAD_FLAG_STOP);
|
|
+
|
|
+ JFFS2_DEBUG("jffs2_stop_garbage_collect_thread wait\n");
|
|
+
|
|
+ (void)LOS_EventRead(&sb->s_gc_thread_flags,
|
|
+ GC_THREAD_FLAG_HAS_EXIT,
|
|
+ LOS_WAITMODE_OR | LOS_WAITMODE_CLR,
|
|
+ LOS_WAIT_FOREVER);
|
|
+
|
|
+ // Kill and free the resources ... this is safe due to the flag
|
|
+ // from the thread.
|
|
+ (void)LOS_TaskDelete(sb->s_gc_thread);
|
|
+ (void)LOS_EventWrite(&sb->s_gc_thread_flags, 0xFFFFFFFF);
|
|
}
|
|
|
|
-static int jffs2_garbage_collect_thread(void *_c)
|
|
+static void jffs2_garbage_collect_thread(unsigned long data)
|
|
{
|
|
- struct jffs2_sb_info *c = _c;
|
|
- sigset_t hupmask;
|
|
+ struct jffs2_sb_info *c = (struct jffs2_sb_info *)data;
|
|
+ struct super_block *sb = OFNI_BS_2SFFJ(c);
|
|
+ unsigned int flag = 0;
|
|
+
|
|
+ jffs2_dbg(1, "jffs2_garbage_collect_thread START\n");
|
|
+ while(1) {
|
|
+ flag = LOS_EventRead(&sb->s_gc_thread_flags,
|
|
+ GC_THREAD_FLAG_TRIG | GC_THREAD_FLAG_STOP,
|
|
+ LOS_WAITMODE_OR | LOS_WAITMODE_CLR,
|
|
+ LOS_WAIT_FOREVER
|
|
+ );
|
|
+ if (flag & GC_THREAD_FLAG_STOP)
|
|
+ break;
|
|
|
|
- siginitset(&hupmask, sigmask(SIGHUP));
|
|
- allow_signal(SIGKILL);
|
|
- allow_signal(SIGSTOP);
|
|
- allow_signal(SIGHUP);
|
|
-
|
|
- c->gc_task = current;
|
|
- complete(&c->gc_thread_start);
|
|
-
|
|
- set_user_nice(current, 10);
|
|
-
|
|
- set_freezable();
|
|
- for (;;) {
|
|
- sigprocmask(SIG_UNBLOCK, &hupmask, NULL);
|
|
- again:
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- if (!jffs2_thread_should_wake(c)) {
|
|
- set_current_state (TASK_INTERRUPTIBLE);
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
- jffs2_dbg(1, "%s(): sleeping...\n", __func__);
|
|
- schedule();
|
|
- } else {
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
- }
|
|
- /* Problem - immediately after bootup, the GCD spends a lot
|
|
- * of time in places like jffs2_kill_fragtree(); so much so
|
|
- * that userspace processes (like gdm and X) are starved
|
|
- * despite plenty of cond_resched()s and renicing. Yield()
|
|
- * doesn't help, either (presumably because userspace and GCD
|
|
- * are generally competing for a higher latency resource -
|
|
- * disk).
|
|
- * This forces the GCD to slow the hell down. Pulling an
|
|
- * inode in with read_inode() is much preferable to having
|
|
- * the GC thread get there first. */
|
|
- schedule_timeout_interruptible(msecs_to_jiffies(50));
|
|
-
|
|
- if (kthread_should_stop()) {
|
|
- jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__);
|
|
- goto die;
|
|
- }
|
|
+ jffs2_dbg(1, "jffs2: GC THREAD GC BEGIN\n");
|
|
|
|
- /* Put_super will send a SIGKILL and then wait on the sem.
|
|
- */
|
|
- while (signal_pending(current) || freezing(current)) {
|
|
- unsigned long signr;
|
|
-
|
|
- if (try_to_freeze())
|
|
- goto again;
|
|
-
|
|
- signr = kernel_dequeue_signal();
|
|
-
|
|
- switch(signr) {
|
|
- case SIGSTOP:
|
|
- jffs2_dbg(1, "%s(): SIGSTOP received\n",
|
|
- __func__);
|
|
- kernel_signal_stop();
|
|
- break;
|
|
-
|
|
- case SIGKILL:
|
|
- jffs2_dbg(1, "%s(): SIGKILL received\n",
|
|
- __func__);
|
|
- goto die;
|
|
-
|
|
- case SIGHUP:
|
|
- jffs2_dbg(1, "%s(): SIGHUP received\n",
|
|
- __func__);
|
|
- break;
|
|
- default:
|
|
- jffs2_dbg(1, "%s(): signal %ld received\n",
|
|
- __func__, signr);
|
|
- }
|
|
- }
|
|
- /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
|
|
- sigprocmask(SIG_BLOCK, &hupmask, NULL);
|
|
+ if (sb->s_root == NULL)
|
|
+ return;
|
|
|
|
- jffs2_dbg(1, "%s(): pass\n", __func__);
|
|
if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
|
|
- pr_notice("No space for garbage collection. Aborting GC thread\n");
|
|
- goto die;
|
|
+ PRINTK("No space for garbage collection. "
|
|
+ "Aborting JFFS2 GC thread\n");
|
|
+ break;
|
|
}
|
|
+ jffs2_dbg(1, "jffs2: GC THREAD GC END\n");
|
|
}
|
|
- die:
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- c->gc_task = NULL;
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
- complete_and_exit(&c->gc_thread_exit, 0);
|
|
+ JFFS2_DEBUG("jffs2_garbage_collect_thread EXIT\n");
|
|
+ LOS_EventWrite(&sb->s_gc_thread_flags, GC_THREAD_FLAG_HAS_EXIT);
|
|
}
|
|
diff -Nupr old/fs/jffs2/build.c new/fs/jffs2/build.c
|
|
--- old/fs/jffs2/build.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/build.c 2022-05-10 15:01:38.800000000 +0800
|
|
@@ -10,15 +10,13 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
+#include <dirent.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
-#include <linux/vmalloc.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
-#include <linux/mm.h> /* kvfree() */
|
|
+#include <mtd_dev.h>
|
|
#include "nodelist.h"
|
|
+#include "los_exc.h"
|
|
|
|
static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *,
|
|
struct jffs2_inode_cache *, struct jffs2_full_dirent **);
|
|
@@ -50,8 +48,7 @@ next_inode(int *i, struct jffs2_inode_ca
|
|
|
|
|
|
static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
|
|
- struct jffs2_inode_cache *ic,
|
|
- int *dir_hardlinks)
|
|
+ struct jffs2_inode_cache *ic)
|
|
{
|
|
struct jffs2_full_dirent *fd;
|
|
|
|
@@ -372,20 +369,24 @@ int jffs2_do_mount_fs(struct jffs2_sb_in
|
|
int ret;
|
|
int i;
|
|
int size;
|
|
+ struct super_block *sb;
|
|
+ struct MtdNorDev *device;
|
|
|
|
c->free_size = c->flash_size;
|
|
c->nr_blocks = c->flash_size / c->sector_size;
|
|
- size = sizeof(struct jffs2_eraseblock) * c->nr_blocks;
|
|
+ sb = OFNI_BS_2SFFJ(c);
|
|
+ device = (struct MtdNorDev *)(sb->s_dev);
|
|
+ size = sizeof(struct jffs2_eraseblock) *(c->nr_blocks + device->blockStart);
|
|
#ifndef __ECOS
|
|
if (jffs2_blocks_use_vmalloc(c))
|
|
- c->blocks = vzalloc(size);
|
|
+ c->blocks = malloc(size);
|
|
else
|
|
#endif
|
|
c->blocks = kzalloc(size, GFP_KERNEL);
|
|
if (!c->blocks)
|
|
return -ENOMEM;
|
|
|
|
- for (i=0; i<c->nr_blocks; i++) {
|
|
+ for (i = device->blockStart; i < c->nr_blocks + device->blockStart; i++) {
|
|
INIT_LIST_HEAD(&c->blocks[i].list);
|
|
c->blocks[i].offset = i * c->sector_size;
|
|
c->blocks[i].free_size = c->sector_size;
|
|
diff -Nupr old/fs/jffs2/compr.c new/fs/jffs2/compr.c
|
|
--- old/fs/jffs2/compr.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/compr.c 2022-05-10 15:02:17.440000000 +0800
|
|
@@ -12,14 +12,13 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
#include "compr.h"
|
|
+#include "jffs2.h"
|
|
+#include "user_copy.h"
|
|
|
|
-static DEFINE_SPINLOCK(jffs2_compressor_list_lock);
|
|
-
|
|
+static spinlock_t jffs2_compressor_list_lock;
|
|
/* Available compressors are on this list */
|
|
-static LIST_HEAD(jffs2_compressor_list);
|
|
+static LINUX_LIST_HEAD(jffs2_compressor_list);
|
|
|
|
/* Actual compression mode */
|
|
static int jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY;
|
|
@@ -71,15 +70,15 @@ static int jffs2_is_best_compression(str
|
|
* could not be compressed; probably because we couldn't find the requested
|
|
* compression mode.
|
|
*/
|
|
-static int jffs2_selected_compress(u8 compr, unsigned char *data_in,
|
|
- unsigned char **cpage_out, u32 *datalen, u32 *cdatalen)
|
|
+static int jffs2_selected_compress(uint8_t compr, unsigned char *data_in,
|
|
+ unsigned char **cpage_out, uint32_t *datalen, uint32_t *cdatalen)
|
|
{
|
|
struct jffs2_compressor *this;
|
|
int err, ret = JFFS2_COMPR_NONE;
|
|
uint32_t orig_slen, orig_dlen;
|
|
- char *output_buf;
|
|
+ unsigned char *output_buf;
|
|
|
|
- output_buf = kmalloc(*cdatalen, GFP_KERNEL);
|
|
+ output_buf = kmalloc(*cdatalen,GFP_KERNEL);
|
|
if (!output_buf) {
|
|
pr_warn("No memory for compressor allocation. Compression failed.\n");
|
|
return ret;
|
|
@@ -265,11 +264,16 @@ int jffs2_decompress(struct jffs2_sb_inf
|
|
switch (comprtype & 0xff) {
|
|
case JFFS2_COMPR_NONE:
|
|
/* This should be special-cased elsewhere, but we might as well deal with it */
|
|
- memcpy(data_out, cdata_in, datalen);
|
|
+ if (LOS_CopyFromKernel(data_out, datalen, cdata_in, datalen) != 0) {
|
|
+ return -EFAULT;
|
|
+ }
|
|
none_stat_decompr_blocks++;
|
|
break;
|
|
case JFFS2_COMPR_ZERO:
|
|
- memset(data_out, 0, datalen);
|
|
+ ret = LOS_UserMemClear(data_out, datalen);
|
|
+ if (ret != 0) {
|
|
+ return ret;
|
|
+ }
|
|
break;
|
|
default:
|
|
spin_lock(&jffs2_compressor_list_lock);
|
|
diff -Nupr old/fs/jffs2/compr.h new/fs/jffs2/compr.h
|
|
--- old/fs/jffs2/compr.h 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/compr.h 2022-05-10 15:02:50.040000000 +0800
|
|
@@ -13,18 +13,20 @@
|
|
#define __JFFS2_COMPR_H__
|
|
|
|
#include <linux/kernel.h>
|
|
-#include <linux/vmalloc.h>
|
|
#include <linux/list.h>
|
|
#include <linux/types.h>
|
|
#include <linux/string.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/errno.h>
|
|
-#include <linux/fs.h>
|
|
-#include <linux/jffs2.h>
|
|
-#include "jffs2_fs_i.h"
|
|
-#include "jffs2_fs_sb.h"
|
|
+#include <linux/stat.h>
|
|
#include "nodelist.h"
|
|
|
|
+#ifdef __cplusplus
|
|
+#if __cplusplus
|
|
+extern "C" {
|
|
+#endif /* __cplusplus */
|
|
+#endif /* __cplusplus */
|
|
+
|
|
#define JFFS2_RUBINMIPS_PRIORITY 10
|
|
#define JFFS2_DYNRUBIN_PRIORITY 20
|
|
#define JFFS2_LZARI_PRIORITY 30
|
|
@@ -102,4 +104,10 @@ int jffs2_lzo_init(void);
|
|
void jffs2_lzo_exit(void);
|
|
#endif
|
|
|
|
+#ifdef __cplusplus
|
|
+#if __cplusplus
|
|
+}
|
|
+#endif /* __cplusplus */
|
|
+#endif /* __cplusplus */
|
|
+
|
|
#endif /* __JFFS2_COMPR_H__ */
|
|
diff -Nupr old/fs/jffs2/compr_lzo.c new/fs/jffs2/compr_lzo.c
|
|
--- old/fs/jffs2/compr_lzo.c 2022-05-09 17:15:24.350000000 +0800
|
|
+++ new/fs/jffs2/compr_lzo.c 1970-01-01 08:00:00.000000000 +0800
|
|
@@ -1,110 +0,0 @@
|
|
-/*
|
|
- * JFFS2 -- Journalling Flash File System, Version 2.
|
|
- *
|
|
- * Copyright © 2007 Nokia Corporation. All rights reserved.
|
|
- * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
|
|
- *
|
|
- * Created by Richard Purdie <rpurdie@openedhand.com>
|
|
- *
|
|
- * For licensing information, see the file 'LICENCE' in this directory.
|
|
- *
|
|
- */
|
|
-
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/sched.h>
|
|
-#include <linux/vmalloc.h>
|
|
-#include <linux/init.h>
|
|
-#include <linux/lzo.h>
|
|
-#include "compr.h"
|
|
-
|
|
-static void *lzo_mem;
|
|
-static void *lzo_compress_buf;
|
|
-static DEFINE_MUTEX(deflate_mutex); /* for lzo_mem and lzo_compress_buf */
|
|
-
|
|
-static void free_workspace(void)
|
|
-{
|
|
- vfree(lzo_mem);
|
|
- vfree(lzo_compress_buf);
|
|
-}
|
|
-
|
|
-static int __init alloc_workspace(void)
|
|
-{
|
|
- lzo_mem = vmalloc(LZO1X_MEM_COMPRESS);
|
|
- lzo_compress_buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
|
|
-
|
|
- if (!lzo_mem || !lzo_compress_buf) {
|
|
- free_workspace();
|
|
- return -ENOMEM;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int jffs2_lzo_compress(unsigned char *data_in, unsigned char *cpage_out,
|
|
- uint32_t *sourcelen, uint32_t *dstlen)
|
|
-{
|
|
- size_t compress_size;
|
|
- int ret;
|
|
-
|
|
- mutex_lock(&deflate_mutex);
|
|
- ret = lzo1x_1_compress(data_in, *sourcelen, lzo_compress_buf, &compress_size, lzo_mem);
|
|
- if (ret != LZO_E_OK)
|
|
- goto fail;
|
|
-
|
|
- if (compress_size > *dstlen)
|
|
- goto fail;
|
|
-
|
|
- memcpy(cpage_out, lzo_compress_buf, compress_size);
|
|
- mutex_unlock(&deflate_mutex);
|
|
-
|
|
- *dstlen = compress_size;
|
|
- return 0;
|
|
-
|
|
- fail:
|
|
- mutex_unlock(&deflate_mutex);
|
|
- return -1;
|
|
-}
|
|
-
|
|
-static int jffs2_lzo_decompress(unsigned char *data_in, unsigned char *cpage_out,
|
|
- uint32_t srclen, uint32_t destlen)
|
|
-{
|
|
- size_t dl = destlen;
|
|
- int ret;
|
|
-
|
|
- ret = lzo1x_decompress_safe(data_in, srclen, cpage_out, &dl);
|
|
-
|
|
- if (ret != LZO_E_OK || dl != destlen)
|
|
- return -1;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static struct jffs2_compressor jffs2_lzo_comp = {
|
|
- .priority = JFFS2_LZO_PRIORITY,
|
|
- .name = "lzo",
|
|
- .compr = JFFS2_COMPR_LZO,
|
|
- .compress = &jffs2_lzo_compress,
|
|
- .decompress = &jffs2_lzo_decompress,
|
|
- .disabled = 0,
|
|
-};
|
|
-
|
|
-int __init jffs2_lzo_init(void)
|
|
-{
|
|
- int ret;
|
|
-
|
|
- ret = alloc_workspace();
|
|
- if (ret < 0)
|
|
- return ret;
|
|
-
|
|
- ret = jffs2_register_compressor(&jffs2_lzo_comp);
|
|
- if (ret)
|
|
- free_workspace();
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-void jffs2_lzo_exit(void)
|
|
-{
|
|
- jffs2_unregister_compressor(&jffs2_lzo_comp);
|
|
- free_workspace();
|
|
-}
|
|
diff -Nupr old/fs/jffs2/compr_rtime.c new/fs/jffs2/compr_rtime.c
|
|
--- old/fs/jffs2/compr_rtime.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/compr_rtime.c 2022-05-10 15:05:05.970000000 +0800
|
|
@@ -25,7 +25,7 @@
|
|
#include <linux/types.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/string.h>
|
|
-#include <linux/jffs2.h>
|
|
+#include "jffs2.h"
|
|
#include "compr.h"
|
|
|
|
/* _compress returns the compressed size, -1 if bigger */
|
|
diff -Nupr old/fs/jffs2/compr_rubin.c new/fs/jffs2/compr_rubin.c
|
|
--- old/fs/jffs2/compr_rubin.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/compr_rubin.c 2022-05-10 15:05:51.830000000 +0800
|
|
@@ -10,15 +10,12 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
#include <linux/string.h>
|
|
#include <linux/types.h>
|
|
-#include <linux/jffs2.h>
|
|
#include <linux/errno.h>
|
|
+#include "jffs2.h"
|
|
#include "compr.h"
|
|
|
|
-
|
|
#define RUBIN_REG_SIZE 16
|
|
#define UPPER_BIT_RUBIN (((long) 1)<<(RUBIN_REG_SIZE-1))
|
|
#define LOWER_BITS_RUBIN ((((long) 1)<<(RUBIN_REG_SIZE-1))-1)
|
|
@@ -48,7 +45,7 @@ static inline void init_pushpull(struct
|
|
unsigned buflen, unsigned ofs,
|
|
unsigned reserve)
|
|
{
|
|
- pp->buf = buf;
|
|
+ pp->buf = (unsigned char *)buf;
|
|
pp->buflen = buflen;
|
|
pp->ofs = ofs;
|
|
pp->reserve = reserve;
|
|
@@ -267,7 +264,7 @@ static int rubin_do_compress(int bit_div
|
|
int pos=0;
|
|
struct rubin_state rs;
|
|
|
|
- init_pushpull(&rs.pp, cpage_out, *dstlen * 8, 0, 32);
|
|
+ init_pushpull(&rs.pp, (char *)cpage_out, *dstlen * 8, 0, 32);
|
|
|
|
init_rubin(&rs, bit_divider, bits);
|
|
|
|
@@ -366,14 +363,14 @@ static int jffs2_dynrubin_compress(unsig
|
|
}
|
|
|
|
static void rubin_do_decompress(int bit_divider, int *bits,
|
|
- unsigned char *cdata_in,
|
|
+ unsigned char *cdata_in,
|
|
unsigned char *page_out, uint32_t srclen,
|
|
uint32_t destlen)
|
|
{
|
|
int outpos = 0;
|
|
struct rubin_state rs;
|
|
|
|
- init_pushpull(&rs.pp, cdata_in, srclen, 0, 0);
|
|
+ init_pushpull(&rs.pp, (char *)cdata_in, srclen, 0, 0);
|
|
init_decode(&rs, bit_divider, bits);
|
|
|
|
while (outpos < destlen)
|
|
diff -Nupr old/fs/jffs2/compr_zlib.c new/fs/jffs2/compr_zlib.c
|
|
--- old/fs/jffs2/compr_zlib.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/compr_zlib.c 2022-05-10 15:06:46.640000000 +0800
|
|
@@ -10,15 +10,10 @@
|
|
*
|
|
*/
|
|
|
|
-#if !defined(__KERNEL__) && !defined(__ECOS)
|
|
-#error "The userspace support got too messy and was removed. Update your mkfs.jffs2"
|
|
-#endif
|
|
-
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
#include <linux/kernel.h>
|
|
-#include <linux/zlib.h>
|
|
+#include <zlib.h>
|
|
#include <linux/zutil.h>
|
|
+#include <linux/semaphore.h>
|
|
#include "nodelist.h"
|
|
#include "compr.h"
|
|
|
|
@@ -35,39 +30,8 @@ static DEFINE_MUTEX(deflate_mutex);
|
|
static DEFINE_MUTEX(inflate_mutex);
|
|
static z_stream inf_strm, def_strm;
|
|
|
|
-#ifdef __KERNEL__ /* Linux-only */
|
|
-#include <linux/vmalloc.h>
|
|
-#include <linux/init.h>
|
|
-#include <linux/mutex.h>
|
|
-
|
|
-static int __init alloc_workspaces(void)
|
|
-{
|
|
- def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS,
|
|
- MAX_MEM_LEVEL));
|
|
- if (!def_strm.workspace)
|
|
- return -ENOMEM;
|
|
-
|
|
- jffs2_dbg(1, "Allocated %d bytes for deflate workspace\n",
|
|
- zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL));
|
|
- inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
|
|
- if (!inf_strm.workspace) {
|
|
- vfree(def_strm.workspace);
|
|
- return -ENOMEM;
|
|
- }
|
|
- jffs2_dbg(1, "Allocated %d bytes for inflate workspace\n",
|
|
- zlib_inflate_workspacesize());
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void free_workspaces(void)
|
|
-{
|
|
- vfree(def_strm.workspace);
|
|
- vfree(inf_strm.workspace);
|
|
-}
|
|
-#else
|
|
#define alloc_workspaces() (0)
|
|
#define free_workspaces() do { } while(0)
|
|
-#endif /* __KERNEL__ */
|
|
|
|
static int jffs2_zlib_compress(unsigned char *data_in,
|
|
unsigned char *cpage_out,
|
|
@@ -80,7 +44,7 @@ static int jffs2_zlib_compress(unsigned
|
|
|
|
mutex_lock(&deflate_mutex);
|
|
|
|
- if (Z_OK != zlib_deflateInit(&def_strm, 3)) {
|
|
+ if (Z_OK != deflateInit(&def_strm, 3)) {
|
|
pr_warn("deflateInit failed\n");
|
|
mutex_unlock(&deflate_mutex);
|
|
return -1;
|
|
@@ -98,21 +62,21 @@ static int jffs2_zlib_compress(unsigned
|
|
(*sourcelen-def_strm.total_in), def_strm.avail_out);
|
|
jffs2_dbg(1, "calling deflate with avail_in %ld, avail_out %ld\n",
|
|
def_strm.avail_in, def_strm.avail_out);
|
|
- ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH);
|
|
+ ret = deflate(&def_strm, Z_PARTIAL_FLUSH);
|
|
jffs2_dbg(1, "deflate returned with avail_in %ld, avail_out %ld, total_in %ld, total_out %ld\n",
|
|
def_strm.avail_in, def_strm.avail_out,
|
|
def_strm.total_in, def_strm.total_out);
|
|
if (ret != Z_OK) {
|
|
jffs2_dbg(1, "deflate in loop returned %d\n", ret);
|
|
- zlib_deflateEnd(&def_strm);
|
|
+ deflateEnd(&def_strm);
|
|
mutex_unlock(&deflate_mutex);
|
|
return -1;
|
|
}
|
|
}
|
|
def_strm.avail_out += STREAM_END_SPACE;
|
|
def_strm.avail_in = 0;
|
|
- ret = zlib_deflate(&def_strm, Z_FINISH);
|
|
- zlib_deflateEnd(&def_strm);
|
|
+ ret = deflate(&def_strm, Z_FINISH);
|
|
+ deflateEnd(&def_strm);
|
|
|
|
if (ret != Z_STREAM_END) {
|
|
jffs2_dbg(1, "final deflate returned %d\n", ret);
|
|
@@ -171,18 +135,18 @@ static int jffs2_zlib_decompress(unsigne
|
|
}
|
|
|
|
|
|
- if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
|
|
+ if (Z_OK != inflateInit2(&inf_strm, wbits)) {
|
|
pr_warn("inflateInit failed\n");
|
|
mutex_unlock(&inflate_mutex);
|
|
return 1;
|
|
}
|
|
|
|
- while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK)
|
|
+ while((ret = inflate(&inf_strm, Z_FINISH)) == Z_OK)
|
|
;
|
|
if (ret != Z_STREAM_END) {
|
|
pr_notice("inflate returned %d\n", ret);
|
|
}
|
|
- zlib_inflateEnd(&inf_strm);
|
|
+ inflateEnd(&inf_strm);
|
|
mutex_unlock(&inflate_mutex);
|
|
return 0;
|
|
}
|
|
@@ -204,13 +168,30 @@ int __init jffs2_zlib_init(void)
|
|
{
|
|
int ret;
|
|
|
|
+ ret = pthread_mutex_init(&inflate_mutex, NULL);
|
|
+ if (ret) {
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = pthread_mutex_init(&deflate_mutex, NULL);
|
|
+ if (ret) {
|
|
+ pthread_mutex_destroy(&inflate_mutex);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
ret = alloc_workspaces();
|
|
- if (ret)
|
|
- return ret;
|
|
+ if (ret) {
|
|
+ pthread_mutex_destroy(&inflate_mutex);
|
|
+ pthread_mutex_destroy(&deflate_mutex);
|
|
+ return ret;
|
|
+ }
|
|
|
|
ret = jffs2_register_compressor(&jffs2_zlib_comp);
|
|
- if (ret)
|
|
- free_workspaces();
|
|
+ if (ret) {
|
|
+ pthread_mutex_destroy(&inflate_mutex);
|
|
+ pthread_mutex_destroy(&deflate_mutex);
|
|
+ free_workspaces();
|
|
+ }
|
|
|
|
return ret;
|
|
}
|
|
@@ -219,4 +200,6 @@ void jffs2_zlib_exit(void)
|
|
{
|
|
jffs2_unregister_compressor(&jffs2_zlib_comp);
|
|
free_workspaces();
|
|
+ pthread_mutex_destroy(&inflate_mutex);
|
|
+ pthread_mutex_destroy(&deflate_mutex);
|
|
}
|
|
diff -Nupr old/fs/jffs2/debug.c new/fs/jffs2/debug.c
|
|
--- old/fs/jffs2/debug.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/debug.c 2022-05-10 15:11:46.200000000 +0800
|
|
@@ -10,15 +10,12 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
#include <linux/kernel.h>
|
|
#include <linux/types.h>
|
|
#include <linux/pagemap.h>
|
|
-#include <linux/crc32.h>
|
|
-#include <linux/jffs2.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
#include <linux/slab.h>
|
|
+#include <mtd_dev.h>
|
|
+#include "los_crc32.h"
|
|
#include "nodelist.h"
|
|
#include "debug.h"
|
|
|
|
@@ -133,7 +130,7 @@ __jffs2_dbg_prewrite_paranoia_check(stru
|
|
if (!buf)
|
|
return;
|
|
|
|
- ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
|
|
+ ret = jffs2_flash_read(c, ofs, len, &retlen, (char *)buf);
|
|
if (ret || (retlen != len)) {
|
|
JFFS2_WARNING("read %d bytes failed or short. ret %d, retlen %zd.\n",
|
|
len, ret, retlen);
|
|
diff -Nupr old/fs/jffs2/debug.h new/fs/jffs2/debug.h
|
|
--- old/fs/jffs2/debug.h 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/debug.h 2022-05-10 15:12:30.850000000 +0800
|
|
@@ -14,7 +14,12 @@
|
|
#define _JFFS2_DEBUG_H_
|
|
|
|
#include <linux/sched.h>
|
|
-
|
|
+#include "los_process.h"
|
|
+#ifdef __cplusplus
|
|
+#if __cplusplus
|
|
+extern "C" {
|
|
+#endif /* __cplusplus */
|
|
+#endif /* __cplusplus */
|
|
#ifndef CONFIG_JFFS2_FS_DEBUG
|
|
#define CONFIG_JFFS2_FS_DEBUG 0
|
|
#endif
|
|
@@ -71,25 +76,26 @@ do { \
|
|
|
|
/* The prefixes of JFFS2 messages */
|
|
#define JFFS2_DBG KERN_DEBUG
|
|
+#define JFFS2_DBG_LVL KERN_DEBUG
|
|
#define JFFS2_DBG_PREFIX "[JFFS2 DBG]"
|
|
#define JFFS2_DBG_MSG_PREFIX JFFS2_DBG JFFS2_DBG_PREFIX
|
|
|
|
/* JFFS2 message macros */
|
|
#define JFFS2_ERROR(fmt, ...) \
|
|
- pr_err("error: (%d) %s: " fmt, \
|
|
- task_pid_nr(current), __func__, ##__VA_ARGS__)
|
|
+ pr_err("error: (%u) %s: " fmt, \
|
|
+ LOS_GetCurrProcessID, __func__, ##__VA_ARGS__)
|
|
|
|
#define JFFS2_WARNING(fmt, ...) \
|
|
- pr_warn("warning: (%d) %s: " fmt, \
|
|
- task_pid_nr(current), __func__, ##__VA_ARGS__)
|
|
+ pr_warn("warning: (%u) %s: " fmt, \
|
|
+ LOS_GetCurrProcessID, __func__, ##__VA_ARGS__)
|
|
|
|
#define JFFS2_NOTICE(fmt, ...) \
|
|
- pr_notice("notice: (%d) %s: " fmt, \
|
|
- task_pid_nr(current), __func__, ##__VA_ARGS__)
|
|
+ pr_notice("notice: (%u) %s: " fmt, \
|
|
+ LOS_GetCurrProcessID, __func__, ##__VA_ARGS__)
|
|
|
|
#define JFFS2_DEBUG(fmt, ...) \
|
|
- printk(KERN_DEBUG "[JFFS2 DBG] (%d) %s: " fmt, \
|
|
- task_pid_nr(current), __func__, ##__VA_ARGS__)
|
|
+ printk(KERN_DEBUG "[JFFS2 DBG] (%u) %s: " fmt, \
|
|
+ LOS_GetCurrProcessID, __func__, ##__VA_ARGS__)
|
|
|
|
/*
|
|
* We split our debugging messages on several parts, depending on the JFFS2
|
|
@@ -272,4 +278,10 @@ __jffs2_dbg_dump_node(struct jffs2_sb_in
|
|
#define jffs2_dbg_acct_sanity_check_nolock(c, jeb)
|
|
#endif /* !JFFS2_DBG_SANITY_CHECKS */
|
|
|
|
+#ifdef __cplusplus
|
|
+#if __cplusplus
|
|
+}
|
|
+#endif /* __cplusplus */
|
|
+#endif /* __cplusplus */
|
|
+
|
|
#endif /* _JFFS2_DEBUG_H_ */
|
|
diff -Nupr old/fs/jffs2/dir.c new/fs/jffs2/dir.c
|
|
--- old/fs/jffs2/dir.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/dir.c 2022-05-10 16:08:26.380000000 +0800
|
|
@@ -10,95 +10,42 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
+#include <dirent.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/fs.h>
|
|
-#include <linux/crc32.h>
|
|
-#include <linux/jffs2.h>
|
|
-#include "jffs2_fs_i.h"
|
|
-#include "jffs2_fs_sb.h"
|
|
-#include <linux/time.h>
|
|
+#include "los_crc32.h"
|
|
#include "nodelist.h"
|
|
-
|
|
-static int jffs2_readdir (struct file *, struct dir_context *);
|
|
-
|
|
-static int jffs2_create (struct inode *,struct dentry *,umode_t,
|
|
- bool);
|
|
-static struct dentry *jffs2_lookup (struct inode *,struct dentry *,
|
|
- unsigned int);
|
|
-static int jffs2_link (struct dentry *,struct inode *,struct dentry *);
|
|
-static int jffs2_unlink (struct inode *,struct dentry *);
|
|
-static int jffs2_symlink (struct inode *,struct dentry *,const char *);
|
|
-static int jffs2_mkdir (struct inode *,struct dentry *,umode_t);
|
|
-static int jffs2_rmdir (struct inode *,struct dentry *);
|
|
-static int jffs2_mknod (struct inode *,struct dentry *,umode_t,dev_t);
|
|
-static int jffs2_rename (struct inode *, struct dentry *,
|
|
- struct inode *, struct dentry *,
|
|
- unsigned int);
|
|
-
|
|
-const struct file_operations jffs2_dir_operations =
|
|
-{
|
|
- .read = generic_read_dir,
|
|
- .iterate_shared=jffs2_readdir,
|
|
- .unlocked_ioctl=jffs2_ioctl,
|
|
- .fsync = jffs2_fsync,
|
|
- .llseek = generic_file_llseek,
|
|
-};
|
|
-
|
|
-
|
|
-const struct inode_operations jffs2_dir_inode_operations =
|
|
-{
|
|
- .create = jffs2_create,
|
|
- .lookup = jffs2_lookup,
|
|
- .link = jffs2_link,
|
|
- .unlink = jffs2_unlink,
|
|
- .symlink = jffs2_symlink,
|
|
- .mkdir = jffs2_mkdir,
|
|
- .rmdir = jffs2_rmdir,
|
|
- .mknod = jffs2_mknod,
|
|
- .rename = jffs2_rename,
|
|
- .get_acl = jffs2_get_acl,
|
|
- .set_acl = jffs2_set_acl,
|
|
- .setattr = jffs2_setattr,
|
|
- .listxattr = jffs2_listxattr,
|
|
-};
|
|
-
|
|
-/***********************************************************************/
|
|
-
|
|
+#include "vfs_jffs2.h"
|
|
+#include "jffs2_hash.h"
|
|
|
|
/* We keep the dirent list sorted in increasing order of name hash,
|
|
and we use the same hash function as the dentries. Makes this
|
|
nice and simple
|
|
*/
|
|
-static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
|
|
- unsigned int flags)
|
|
+struct jffs2_inode *jffs2_lookup(struct jffs2_inode *dir_i, const unsigned char *d_name, int namelen)
|
|
{
|
|
struct jffs2_inode_info *dir_f;
|
|
struct jffs2_full_dirent *fd = NULL, *fd_list;
|
|
uint32_t ino = 0;
|
|
- struct inode *inode = NULL;
|
|
- unsigned int nhash;
|
|
+ uint32_t hash = full_name_hash(d_name, namelen);
|
|
+ struct jffs2_inode *inode = NULL;
|
|
|
|
jffs2_dbg(1, "jffs2_lookup()\n");
|
|
|
|
- if (target->d_name.len > JFFS2_MAX_NAME_LEN)
|
|
+ if (namelen > JFFS2_MAX_NAME_LEN)
|
|
return ERR_PTR(-ENAMETOOLONG);
|
|
|
|
dir_f = JFFS2_INODE_INFO(dir_i);
|
|
|
|
- /* The 'nhash' on the fd_list is not the same as the dentry hash */
|
|
- nhash = full_name_hash(NULL, target->d_name.name, target->d_name.len);
|
|
-
|
|
mutex_lock(&dir_f->sem);
|
|
|
|
/* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */
|
|
- for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= nhash; fd_list = fd_list->next) {
|
|
- if (fd_list->nhash == nhash &&
|
|
- (!fd || fd_list->version > fd->version) &&
|
|
- strlen(fd_list->name) == target->d_name.len &&
|
|
- !strncmp(fd_list->name, target->d_name.name, target->d_name.len)) {
|
|
+ for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= hash; fd_list = fd_list->next) {
|
|
+ if (fd_list->nhash == hash &&
|
|
+ (!fd || fd_list->version > fd->version) &&
|
|
+ strlen((char *)fd_list->name) == namelen &&
|
|
+ !strncmp((char *)fd_list->name, (char *)d_name, namelen)) {
|
|
fd = fd_list;
|
|
}
|
|
}
|
|
@@ -111,176 +58,57 @@ static struct dentry *jffs2_lookup(struc
|
|
pr_warn("iget() failed for ino #%u\n", ino);
|
|
}
|
|
|
|
- return d_splice_alias(inode, target);
|
|
-}
|
|
-
|
|
-/***********************************************************************/
|
|
-
|
|
-
|
|
-static int jffs2_readdir(struct file *file, struct dir_context *ctx)
|
|
-{
|
|
- struct inode *inode = file_inode(file);
|
|
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
|
- struct jffs2_full_dirent *fd;
|
|
- unsigned long curofs = 1;
|
|
-
|
|
- jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n", inode->i_ino);
|
|
-
|
|
- if (!dir_emit_dots(file, ctx))
|
|
- return 0;
|
|
-
|
|
- mutex_lock(&f->sem);
|
|
- for (fd = f->dents; fd; fd = fd->next) {
|
|
- curofs++;
|
|
- /* First loop: curofs = 2; pos = 2 */
|
|
- if (curofs < ctx->pos) {
|
|
- jffs2_dbg(2, "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n",
|
|
- fd->name, fd->ino, fd->type, curofs, (unsigned long)ctx->pos);
|
|
- continue;
|
|
- }
|
|
- if (!fd->ino) {
|
|
- jffs2_dbg(2, "Skipping deletion dirent \"%s\"\n",
|
|
- fd->name);
|
|
- ctx->pos++;
|
|
- continue;
|
|
- }
|
|
- jffs2_dbg(2, "Dirent %ld: \"%s\", ino #%u, type %d\n",
|
|
- (unsigned long)ctx->pos, fd->name, fd->ino, fd->type);
|
|
- if (!dir_emit(ctx, fd->name, strlen(fd->name), fd->ino, fd->type))
|
|
- break;
|
|
- ctx->pos++;
|
|
- }
|
|
- mutex_unlock(&f->sem);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/***********************************************************************/
|
|
-
|
|
-
|
|
-static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
|
|
- umode_t mode, bool excl)
|
|
-{
|
|
- struct jffs2_raw_inode *ri;
|
|
- struct jffs2_inode_info *f, *dir_f;
|
|
- struct jffs2_sb_info *c;
|
|
- struct inode *inode;
|
|
- int ret;
|
|
-
|
|
- ri = jffs2_alloc_raw_inode();
|
|
- if (!ri)
|
|
- return -ENOMEM;
|
|
-
|
|
- c = JFFS2_SB_INFO(dir_i->i_sb);
|
|
-
|
|
- jffs2_dbg(1, "%s()\n", __func__);
|
|
-
|
|
- inode = jffs2_new_inode(dir_i, mode, ri);
|
|
-
|
|
- if (IS_ERR(inode)) {
|
|
- jffs2_dbg(1, "jffs2_new_inode() failed\n");
|
|
- jffs2_free_raw_inode(ri);
|
|
- return PTR_ERR(inode);
|
|
- }
|
|
-
|
|
- inode->i_op = &jffs2_file_inode_operations;
|
|
- inode->i_fop = &jffs2_file_operations;
|
|
- inode->i_mapping->a_ops = &jffs2_file_address_operations;
|
|
- inode->i_mapping->nrpages = 0;
|
|
-
|
|
- f = JFFS2_INODE_INFO(inode);
|
|
- dir_f = JFFS2_INODE_INFO(dir_i);
|
|
-
|
|
- /* jffs2_do_create() will want to lock it, _after_ reserving
|
|
- space and taking c-alloc_sem. If we keep it locked here,
|
|
- lockdep gets unhappy (although it's a false positive;
|
|
- nothing else will be looking at this inode yet so there's
|
|
- no chance of AB-BA deadlock involving its f->sem). */
|
|
- mutex_unlock(&f->sem);
|
|
-
|
|
- ret = jffs2_do_create(c, dir_f, f, ri, &dentry->d_name);
|
|
- if (ret)
|
|
- goto fail;
|
|
-
|
|
- dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime));
|
|
-
|
|
- jffs2_free_raw_inode(ri);
|
|
-
|
|
- jffs2_dbg(1, "%s(): Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n",
|
|
- __func__, inode->i_ino, inode->i_mode, inode->i_nlink,
|
|
- f->inocache->pino_nlink, inode->i_mapping->nrpages);
|
|
-
|
|
- d_instantiate_new(dentry, inode);
|
|
- return 0;
|
|
-
|
|
- fail:
|
|
- iget_failed(inode);
|
|
- jffs2_free_raw_inode(ri);
|
|
- return ret;
|
|
+ return inode;
|
|
}
|
|
|
|
-/***********************************************************************/
|
|
-
|
|
-
|
|
-static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry)
|
|
+int jffs2_unlink(struct jffs2_inode *dir_i, struct jffs2_inode *d_inode, const unsigned char *d_name)
|
|
{
|
|
struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb);
|
|
struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
|
|
- struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(d_inode(dentry));
|
|
+ struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(d_inode);
|
|
int ret;
|
|
- uint32_t now = JFFS2_NOW();
|
|
+ uint32_t now = Jffs2CurSec();
|
|
|
|
- ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
|
|
- dentry->d_name.len, dead_f, now);
|
|
+ ret = jffs2_do_unlink(c, dir_f, (const char *)d_name,
|
|
+ strlen((char *)d_name), dead_f, now);
|
|
if (dead_f->inocache)
|
|
- set_nlink(d_inode(dentry), dead_f->inocache->pino_nlink);
|
|
+ d_inode->i_nlink = dead_f->inocache->pino_nlink;
|
|
if (!ret)
|
|
- dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
|
|
+ dir_i->i_mtime = dir_i->i_ctime = now;
|
|
return ret;
|
|
}
|
|
-/***********************************************************************/
|
|
|
|
-
|
|
-static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct dentry *dentry)
|
|
+int jffs2_link(struct jffs2_inode *old_d_inode, struct jffs2_inode *dir_i, const unsigned char *d_name)
|
|
{
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dentry->d_sb);
|
|
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(old_dentry));
|
|
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(old_d_inode->i_sb);
|
|
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_d_inode);
|
|
struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
|
|
int ret;
|
|
uint8_t type;
|
|
uint32_t now;
|
|
|
|
- /* Don't let people make hard links to bad inodes. */
|
|
- if (!f->inocache)
|
|
- return -EIO;
|
|
-
|
|
- if (d_is_dir(old_dentry))
|
|
- return -EPERM;
|
|
-
|
|
/* XXX: This is ugly */
|
|
- type = (d_inode(old_dentry)->i_mode & S_IFMT) >> 12;
|
|
+ type = (old_d_inode->i_mode & S_IFMT) >> 12;
|
|
if (!type) type = DT_REG;
|
|
|
|
- now = JFFS2_NOW();
|
|
- ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len, now);
|
|
+ now = Jffs2CurSec();
|
|
+ ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, (const char *)d_name,
|
|
+ strlen((char *)d_name), now);
|
|
|
|
if (!ret) {
|
|
mutex_lock(&f->sem);
|
|
- set_nlink(d_inode(old_dentry), ++f->inocache->pino_nlink);
|
|
+ old_d_inode->i_nlink = ++f->inocache->pino_nlink;
|
|
mutex_unlock(&f->sem);
|
|
- d_instantiate(dentry, d_inode(old_dentry));
|
|
- dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
|
|
- ihold(d_inode(old_dentry));
|
|
+ dir_i->i_mtime = dir_i->i_ctime = now;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
-/***********************************************************************/
|
|
-
|
|
-static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char *target)
|
|
+int jffs2_symlink(struct jffs2_inode *dir_i, struct jffs2_inode **d_inode, const unsigned char *d_name, const char *target)
|
|
{
|
|
struct jffs2_inode_info *f, *dir_f;
|
|
struct jffs2_sb_info *c;
|
|
- struct inode *inode;
|
|
+ struct jffs2_inode *inode;
|
|
struct jffs2_raw_inode *ri;
|
|
struct jffs2_raw_dirent *rd;
|
|
struct jffs2_full_dnode *fn;
|
|
@@ -304,7 +132,7 @@ static int jffs2_symlink (struct inode *
|
|
/* Try to reserve enough space for both node and dirent.
|
|
* Just the node will do for now, though
|
|
*/
|
|
- namelen = dentry->d_name.len;
|
|
+ namelen = strlen((char *)d_name);
|
|
ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &alloclen,
|
|
ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
|
|
|
|
@@ -321,8 +149,6 @@ static int jffs2_symlink (struct inode *
|
|
return PTR_ERR(inode);
|
|
}
|
|
|
|
- inode->i_op = &jffs2_symlink_inode_operations;
|
|
-
|
|
f = JFFS2_INODE_INFO(inode);
|
|
|
|
inode->i_size = targetlen;
|
|
@@ -334,7 +160,7 @@ static int jffs2_symlink (struct inode *
|
|
ri->data_crc = cpu_to_je32(crc32(0, target, targetlen));
|
|
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
|
|
|
|
- fn = jffs2_write_dnode(c, f, ri, target, targetlen, ALLOC_NORMAL);
|
|
+ fn = jffs2_write_dnode(c, f, ri, (const unsigned char *)target, targetlen, ALLOC_NORMAL);
|
|
|
|
jffs2_free_raw_inode(ri);
|
|
|
|
@@ -347,7 +173,8 @@ static int jffs2_symlink (struct inode *
|
|
}
|
|
|
|
/* We use f->target field to store the target path. */
|
|
- f->target = kmemdup(target, targetlen + 1, GFP_KERNEL);
|
|
+
|
|
+ f->target = (unsigned char *)malloc(targetlen + 1);
|
|
if (!f->target) {
|
|
pr_warn("Can't allocate %d bytes of memory\n", targetlen + 1);
|
|
mutex_unlock(&f->sem);
|
|
@@ -355,7 +182,15 @@ static int jffs2_symlink (struct inode *
|
|
ret = -ENOMEM;
|
|
goto fail;
|
|
}
|
|
- inode->i_link = f->target;
|
|
+
|
|
+ ret = LOS_CopyToKernel((char *)f->target, targetlen + 1, target, targetlen + 1);
|
|
+ if (ret != EOK) {
|
|
+ (void)free(f->target);
|
|
+ f->target = NULL;
|
|
+ mutex_unlock(&f->sem);
|
|
+ jffs2_complete_reservation(c);
|
|
+ goto fail;
|
|
+ }
|
|
|
|
jffs2_dbg(1, "%s(): symlink's target '%s' cached\n",
|
|
__func__, (char *)f->target);
|
|
@@ -368,14 +203,6 @@ static int jffs2_symlink (struct inode *
|
|
|
|
jffs2_complete_reservation(c);
|
|
|
|
- ret = jffs2_init_security(inode, dir_i, &dentry->d_name);
|
|
- if (ret)
|
|
- goto fail;
|
|
-
|
|
- ret = jffs2_init_acl_post(inode);
|
|
- if (ret)
|
|
- goto fail;
|
|
-
|
|
ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
|
|
ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
|
|
if (ret)
|
|
@@ -400,13 +227,13 @@ static int jffs2_symlink (struct inode *
|
|
rd->pino = cpu_to_je32(dir_i->i_ino);
|
|
rd->version = cpu_to_je32(++dir_f->highest_version);
|
|
rd->ino = cpu_to_je32(inode->i_ino);
|
|
- rd->mctime = cpu_to_je32(JFFS2_NOW());
|
|
+ rd->mctime = cpu_to_je32(Jffs2CurSec());
|
|
rd->nsize = namelen;
|
|
rd->type = DT_LNK;
|
|
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
|
|
- rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
|
|
+ rd->name_crc = cpu_to_je32(crc32(0, (const char *)d_name, namelen));
|
|
|
|
- fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL);
|
|
+ fd = jffs2_write_dirent(c, dir_f, rd, (const unsigned char *)d_name, namelen, ALLOC_NORMAL);
|
|
|
|
if (IS_ERR(fd)) {
|
|
/* dirent failed to write. Delete the inode normally
|
|
@@ -418,7 +245,7 @@ static int jffs2_symlink (struct inode *
|
|
goto fail;
|
|
}
|
|
|
|
- dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
|
|
+ dir_i->i_mtime = dir_i->i_ctime = je32_to_cpu(rd->mctime);
|
|
|
|
jffs2_free_raw_dirent(rd);
|
|
|
|
@@ -429,20 +256,20 @@ static int jffs2_symlink (struct inode *
|
|
mutex_unlock(&dir_f->sem);
|
|
jffs2_complete_reservation(c);
|
|
|
|
- d_instantiate_new(dentry, inode);
|
|
+ *d_inode = inode;
|
|
return 0;
|
|
|
|
fail:
|
|
- iget_failed(inode);
|
|
+ inode->i_nlink = 0;
|
|
+ jffs2_iput(inode);
|
|
return ret;
|
|
}
|
|
|
|
-
|
|
-static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode)
|
|
+int jffs2_mkdir(struct jffs2_inode *dir_i, const unsigned char *d_name, int mode, struct jffs2_inode **new_i)
|
|
{
|
|
struct jffs2_inode_info *f, *dir_f;
|
|
struct jffs2_sb_info *c;
|
|
- struct inode *inode;
|
|
+ struct jffs2_inode *inode;
|
|
struct jffs2_raw_inode *ri;
|
|
struct jffs2_raw_dirent *rd;
|
|
struct jffs2_full_dnode *fn;
|
|
@@ -450,7 +277,7 @@ static int jffs2_mkdir (struct inode *di
|
|
int namelen;
|
|
uint32_t alloclen;
|
|
int ret;
|
|
-
|
|
+ mode &= ~S_IFMT;
|
|
mode |= S_IFDIR;
|
|
|
|
ri = jffs2_alloc_raw_inode();
|
|
@@ -462,9 +289,8 @@ static int jffs2_mkdir (struct inode *di
|
|
/* Try to reserve enough space for both node and dirent.
|
|
* Just the node will do for now, though
|
|
*/
|
|
- namelen = dentry->d_name.len;
|
|
- ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL,
|
|
- JFFS2_SUMMARY_INODE_SIZE);
|
|
+ namelen = strlen((char *)d_name);
|
|
+ ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
|
|
|
|
if (ret) {
|
|
jffs2_free_raw_inode(ri);
|
|
@@ -478,14 +304,8 @@ static int jffs2_mkdir (struct inode *di
|
|
jffs2_complete_reservation(c);
|
|
return PTR_ERR(inode);
|
|
}
|
|
-
|
|
- inode->i_op = &jffs2_dir_inode_operations;
|
|
- inode->i_fop = &jffs2_dir_operations;
|
|
-
|
|
f = JFFS2_INODE_INFO(inode);
|
|
|
|
- /* Directories get nlink 2 at start */
|
|
- set_nlink(inode, 2);
|
|
/* but ic->pino_nlink is the parent ino# */
|
|
f->inocache->pino_nlink = dir_i->i_ino;
|
|
|
|
@@ -500,6 +320,7 @@ static int jffs2_mkdir (struct inode *di
|
|
/* Eeek. Wave bye bye */
|
|
mutex_unlock(&f->sem);
|
|
jffs2_complete_reservation(c);
|
|
+
|
|
ret = PTR_ERR(fn);
|
|
goto fail;
|
|
}
|
|
@@ -511,14 +332,6 @@ static int jffs2_mkdir (struct inode *di
|
|
|
|
jffs2_complete_reservation(c);
|
|
|
|
- ret = jffs2_init_security(inode, dir_i, &dentry->d_name);
|
|
- if (ret)
|
|
- goto fail;
|
|
-
|
|
- ret = jffs2_init_acl_post(inode);
|
|
- if (ret)
|
|
- goto fail;
|
|
-
|
|
ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
|
|
ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
|
|
if (ret)
|
|
@@ -543,13 +356,13 @@ static int jffs2_mkdir (struct inode *di
|
|
rd->pino = cpu_to_je32(dir_i->i_ino);
|
|
rd->version = cpu_to_je32(++dir_f->highest_version);
|
|
rd->ino = cpu_to_je32(inode->i_ino);
|
|
- rd->mctime = cpu_to_je32(JFFS2_NOW());
|
|
+ rd->mctime = cpu_to_je32(Jffs2CurSec());
|
|
rd->nsize = namelen;
|
|
rd->type = DT_DIR;
|
|
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
|
|
- rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
|
|
+ rd->name_crc = cpu_to_je32(crc32(0, d_name, namelen));
|
|
|
|
- fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL);
|
|
+ fd = jffs2_write_dirent(c, dir_f, rd, d_name, namelen, ALLOC_NORMAL);
|
|
|
|
if (IS_ERR(fd)) {
|
|
/* dirent failed to write. Delete the inode normally
|
|
@@ -557,12 +370,12 @@ static int jffs2_mkdir (struct inode *di
|
|
jffs2_complete_reservation(c);
|
|
jffs2_free_raw_dirent(rd);
|
|
mutex_unlock(&dir_f->sem);
|
|
+ inode->i_nlink = 0;
|
|
ret = PTR_ERR(fd);
|
|
goto fail;
|
|
}
|
|
|
|
- dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
|
|
- inc_nlink(dir_i);
|
|
+ dir_i->i_mtime = dir_i->i_ctime = je32_to_cpu(rd->mctime);
|
|
|
|
jffs2_free_raw_dirent(rd);
|
|
|
|
@@ -572,300 +385,198 @@ static int jffs2_mkdir (struct inode *di
|
|
|
|
mutex_unlock(&dir_f->sem);
|
|
jffs2_complete_reservation(c);
|
|
+ *new_i = inode;
|
|
|
|
- d_instantiate_new(dentry, inode);
|
|
return 0;
|
|
|
|
fail:
|
|
- iget_failed(inode);
|
|
+ inode->i_nlink = 0;
|
|
+ jffs2_iput(inode);
|
|
return ret;
|
|
}
|
|
|
|
-static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
|
|
+int jffs2_rmdir (struct jffs2_inode *dir_i, struct jffs2_inode *d_inode, const unsigned char *d_name)
|
|
{
|
|
struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb);
|
|
struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
|
|
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(dentry));
|
|
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode);
|
|
struct jffs2_full_dirent *fd;
|
|
int ret;
|
|
- uint32_t now = JFFS2_NOW();
|
|
+ uint32_t now = Jffs2CurSec();
|
|
|
|
- mutex_lock(&f->sem);
|
|
for (fd = f->dents ; fd; fd = fd->next) {
|
|
if (fd->ino) {
|
|
- mutex_unlock(&f->sem);
|
|
+ PRINT_ERR("%s-%d: ret=%d\n", __FUNCTION__, __LINE__, ENOTEMPTY);
|
|
return -ENOTEMPTY;
|
|
}
|
|
}
|
|
- mutex_unlock(&f->sem);
|
|
|
|
- ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
|
|
- dentry->d_name.len, f, now);
|
|
- if (!ret) {
|
|
- dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
|
|
- clear_nlink(d_inode(dentry));
|
|
- drop_nlink(dir_i);
|
|
- }
|
|
+ ret = jffs2_do_unlink(c, dir_f, (const char *)d_name,
|
|
+ strlen((char *)d_name), f, now);
|
|
+ if (f->inocache)
|
|
+ d_inode->i_nlink = f->inocache->pino_nlink;
|
|
+ if (!ret)
|
|
+ dir_i->i_mtime = dir_i->i_ctime = now;
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
-static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode, dev_t rdev)
|
|
+int jffs2_rename (struct jffs2_inode *old_dir_i, struct jffs2_inode *d_inode, const unsigned char *old_d_name,
|
|
+ struct jffs2_inode *new_dir_i, const unsigned char *new_d_name)
|
|
{
|
|
- struct jffs2_inode_info *f, *dir_f;
|
|
- struct jffs2_sb_info *c;
|
|
- struct inode *inode;
|
|
- struct jffs2_raw_inode *ri;
|
|
- struct jffs2_raw_dirent *rd;
|
|
- struct jffs2_full_dnode *fn;
|
|
- struct jffs2_full_dirent *fd;
|
|
- int namelen;
|
|
- union jffs2_device_node dev;
|
|
- int devlen = 0;
|
|
- uint32_t alloclen;
|
|
int ret;
|
|
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb);
|
|
+ uint8_t type;
|
|
+ uint32_t now;
|
|
|
|
- ri = jffs2_alloc_raw_inode();
|
|
- if (!ri)
|
|
- return -ENOMEM;
|
|
-
|
|
- c = JFFS2_SB_INFO(dir_i->i_sb);
|
|
-
|
|
- if (S_ISBLK(mode) || S_ISCHR(mode))
|
|
- devlen = jffs2_encode_dev(&dev, rdev);
|
|
+ /* XXX: This is ugly */
|
|
+ type = (d_inode->i_mode & S_IFMT) >> 12;
|
|
+ if (!type) type = DT_REG;
|
|
|
|
- /* Try to reserve enough space for both node and dirent.
|
|
- * Just the node will do for now, though
|
|
- */
|
|
- namelen = dentry->d_name.len;
|
|
- ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &alloclen,
|
|
- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
|
|
+ now = Jffs2CurSec();
|
|
+ ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i),
|
|
+ d_inode->i_ino, type,
|
|
+ (const char *)new_d_name, strlen((char *)new_d_name), now);
|
|
|
|
- if (ret) {
|
|
- jffs2_free_raw_inode(ri);
|
|
+ if (ret)
|
|
return ret;
|
|
- }
|
|
|
|
- inode = jffs2_new_inode(dir_i, mode, ri);
|
|
|
|
- if (IS_ERR(inode)) {
|
|
- jffs2_free_raw_inode(ri);
|
|
- jffs2_complete_reservation(c);
|
|
- return PTR_ERR(inode);
|
|
+ /* If it was a directory we moved, and there was no victim,
|
|
+ increase i_nlink on its new parent */
|
|
+ if ((d_inode->i_mode & S_IFMT) == S_IFDIR) {
|
|
+ new_dir_i->i_nlink++;
|
|
}
|
|
- inode->i_op = &jffs2_file_inode_operations;
|
|
- init_special_inode(inode, inode->i_mode, rdev);
|
|
|
|
- f = JFFS2_INODE_INFO(inode);
|
|
-
|
|
- ri->dsize = ri->csize = cpu_to_je32(devlen);
|
|
- ri->totlen = cpu_to_je32(sizeof(*ri) + devlen);
|
|
- ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
|
|
-
|
|
- ri->compr = JFFS2_COMPR_NONE;
|
|
- ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen));
|
|
- ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
|
|
-
|
|
- fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, ALLOC_NORMAL);
|
|
+ /* Unlink the original */
|
|
+ ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
|
|
+ (const char *)old_d_name, strlen((char *)old_d_name), NULL, now);
|
|
|
|
- jffs2_free_raw_inode(ri);
|
|
+ /* We don't touch inode->i_nlink */
|
|
|
|
- if (IS_ERR(fn)) {
|
|
- /* Eeek. Wave bye bye */
|
|
+ if (ret) {
|
|
+ /* Oh shit. We really ought to make a single node which can do both atomically */
|
|
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode);
|
|
+ mutex_lock(&f->sem);
|
|
+ if (f->inocache)
|
|
+ d_inode->i_nlink = f->inocache->pino_nlink++;
|
|
mutex_unlock(&f->sem);
|
|
- jffs2_complete_reservation(c);
|
|
- ret = PTR_ERR(fn);
|
|
- goto fail;
|
|
- }
|
|
- /* No data here. Only a metadata node, which will be
|
|
- obsoleted by the first data write
|
|
- */
|
|
- f->metadata = fn;
|
|
- mutex_unlock(&f->sem);
|
|
-
|
|
- jffs2_complete_reservation(c);
|
|
-
|
|
- ret = jffs2_init_security(inode, dir_i, &dentry->d_name);
|
|
- if (ret)
|
|
- goto fail;
|
|
-
|
|
- ret = jffs2_init_acl_post(inode);
|
|
- if (ret)
|
|
- goto fail;
|
|
-
|
|
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
|
|
- ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
|
|
- if (ret)
|
|
- goto fail;
|
|
|
|
- rd = jffs2_alloc_raw_dirent();
|
|
- if (!rd) {
|
|
- /* Argh. Now we treat it like a normal delete */
|
|
- jffs2_complete_reservation(c);
|
|
- ret = -ENOMEM;
|
|
- goto fail;
|
|
+ pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n",
|
|
+ __func__, ret);
|
|
+ /* Might as well let the VFS know */
|
|
+ new_dir_i->i_mtime = new_dir_i->i_ctime = now;
|
|
+ return ret;
|
|
}
|
|
|
|
- dir_f = JFFS2_INODE_INFO(dir_i);
|
|
- mutex_lock(&dir_f->sem);
|
|
|
|
- rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
|
|
- rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
|
|
- rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
|
|
- rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
|
|
+ new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = now;
|
|
|
|
- rd->pino = cpu_to_je32(dir_i->i_ino);
|
|
- rd->version = cpu_to_je32(++dir_f->highest_version);
|
|
- rd->ino = cpu_to_je32(inode->i_ino);
|
|
- rd->mctime = cpu_to_je32(JFFS2_NOW());
|
|
- rd->nsize = namelen;
|
|
+ return 0;
|
|
+}
|
|
|
|
- /* XXX: This is ugly. */
|
|
- rd->type = (mode & S_IFMT) >> 12;
|
|
+int jffs2_create(struct jffs2_inode *dir_i, const unsigned char *d_name, int mode,
|
|
+ struct jffs2_inode **new_i)
|
|
+{
|
|
+ struct jffs2_raw_inode *ri;
|
|
+ struct jffs2_inode_info *f, *dir_f;
|
|
+ struct jffs2_sb_info *c;
|
|
+ struct jffs2_inode *inode;
|
|
+ int ret;
|
|
+ mode &= ~S_IFMT;
|
|
+ mode |= S_IFREG;
|
|
+ ri = jffs2_alloc_raw_inode();
|
|
+ if (!ri)
|
|
+ return -ENOMEM;
|
|
|
|
- rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
|
|
- rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
|
|
+ c = JFFS2_SB_INFO(dir_i->i_sb);
|
|
|
|
- fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL);
|
|
+ D1(printk(KERN_DEBUG "jffs2_create()\n"));
|
|
+ inode = jffs2_new_inode(dir_i, mode, ri);
|
|
|
|
- if (IS_ERR(fd)) {
|
|
- /* dirent failed to write. Delete the inode normally
|
|
- as if it were the final unlink() */
|
|
- jffs2_complete_reservation(c);
|
|
- jffs2_free_raw_dirent(rd);
|
|
- mutex_unlock(&dir_f->sem);
|
|
- ret = PTR_ERR(fd);
|
|
- goto fail;
|
|
+ if (IS_ERR(inode)) {
|
|
+ D1(printk(KERN_DEBUG "jffs2_new_inode() failed, error:%ld\n", PTR_ERR(inode)));
|
|
+ jffs2_free_raw_inode(ri);
|
|
+ return PTR_ERR(inode);
|
|
}
|
|
|
|
- dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
|
|
+ f = JFFS2_INODE_INFO(inode);
|
|
+ dir_f = JFFS2_INODE_INFO(dir_i);
|
|
|
|
- jffs2_free_raw_dirent(rd);
|
|
+ /* jffs2_do_create() will want to lock it, _after_ reserving
|
|
+ space and taking c-alloc_sem. If we keep it locked here,
|
|
+ lockdep gets unhappy (although it's a false positive;
|
|
+ nothing else will be looking at this inode yet so there's
|
|
+ no chance of AB-BA deadlock involving its f->sem). */
|
|
+ mutex_unlock(&f->sem);
|
|
+ ret = jffs2_do_create(c, dir_f, f, ri,
|
|
+ (const char *)d_name,
|
|
+ strlen((char *)d_name));
|
|
|
|
- /* Link the fd into the inode's list, obsoleting an old
|
|
- one if necessary. */
|
|
- jffs2_add_fd_to_list(c, fd, &dir_f->dents);
|
|
+ if (ret) {
|
|
+ inode->i_nlink = 0;
|
|
+ jffs2_iput(inode);
|
|
+ jffs2_free_raw_inode(ri);
|
|
+ return ret;
|
|
+ }
|
|
|
|
- mutex_unlock(&dir_f->sem);
|
|
- jffs2_complete_reservation(c);
|
|
+ jffs2_free_raw_inode(ri);
|
|
|
|
- d_instantiate_new(dentry, inode);
|
|
+ D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d)\n",
|
|
+ inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->pino_nlink));
|
|
+ *new_i = inode;
|
|
return 0;
|
|
+}
|
|
|
|
- fail:
|
|
- iget_failed(inode);
|
|
- return ret;
|
|
+static __inline void fill_name(char *dst_name, int nlen, const unsigned char *name, int namlen)
|
|
+{
|
|
+ int len = nlen < namlen ? nlen : namlen;
|
|
+ (void)memcpy_s(dst_name, nlen, name, len);
|
|
+ dst_name[len] = '\0';
|
|
}
|
|
|
|
-static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
|
|
- struct inode *new_dir_i, struct dentry *new_dentry,
|
|
- unsigned int flags)
|
|
+int jffs2_readdir(struct jffs2_inode *inode, off_t *offset, off_t *int_off, struct dirent *ent)
|
|
{
|
|
- int ret;
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb);
|
|
- struct jffs2_inode_info *victim_f = NULL;
|
|
- uint8_t type;
|
|
- uint32_t now;
|
|
+ struct jffs2_inode_info *f;
|
|
+ struct jffs2_full_dirent *fd;
|
|
+ off_t curofs = 0;
|
|
|
|
- if (flags & ~RENAME_NOREPLACE)
|
|
- return -EINVAL;
|
|
+ f = JFFS2_INODE_INFO(inode);
|
|
|
|
- /* The VFS will check for us and prevent trying to rename a
|
|
- * file over a directory and vice versa, but if it's a directory,
|
|
- * the VFS can't check whether the victim is empty. The filesystem
|
|
- * needs to do that for itself.
|
|
- */
|
|
- if (d_really_is_positive(new_dentry)) {
|
|
- victim_f = JFFS2_INODE_INFO(d_inode(new_dentry));
|
|
- if (d_is_dir(new_dentry)) {
|
|
- struct jffs2_full_dirent *fd;
|
|
-
|
|
- mutex_lock(&victim_f->sem);
|
|
- for (fd = victim_f->dents; fd; fd = fd->next) {
|
|
- if (fd->ino) {
|
|
- mutex_unlock(&victim_f->sem);
|
|
- return -ENOTEMPTY;
|
|
- }
|
|
- }
|
|
- mutex_unlock(&victim_f->sem);
|
|
+ mutex_lock(&f->sem);
|
|
+ for (fd = f->dents; fd; fd = fd->next) {
|
|
+ if (curofs++ < *int_off) {
|
|
+ D2(printk
|
|
+ (KERN_DEBUG
|
|
+ "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n",
|
|
+ fd->name, fd->ino, fd->type, curofs, offset));
|
|
+ continue;
|
|
}
|
|
- }
|
|
-
|
|
- /* XXX: We probably ought to alloc enough space for
|
|
- both nodes at the same time. Writing the new link,
|
|
- then getting -ENOSPC, is quite bad :)
|
|
- */
|
|
-
|
|
- /* Make a hard link */
|
|
-
|
|
- /* XXX: This is ugly */
|
|
- type = (d_inode(old_dentry)->i_mode & S_IFMT) >> 12;
|
|
- if (!type) type = DT_REG;
|
|
-
|
|
- now = JFFS2_NOW();
|
|
- ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i),
|
|
- d_inode(old_dentry)->i_ino, type,
|
|
- new_dentry->d_name.name, new_dentry->d_name.len, now);
|
|
-
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- if (victim_f) {
|
|
- /* There was a victim. Kill it off nicely */
|
|
- if (d_is_dir(new_dentry))
|
|
- clear_nlink(d_inode(new_dentry));
|
|
- else
|
|
- drop_nlink(d_inode(new_dentry));
|
|
- /* Don't oops if the victim was a dirent pointing to an
|
|
- inode which didn't exist. */
|
|
- if (victim_f->inocache) {
|
|
- mutex_lock(&victim_f->sem);
|
|
- if (d_is_dir(new_dentry))
|
|
- victim_f->inocache->pino_nlink = 0;
|
|
- else
|
|
- victim_f->inocache->pino_nlink--;
|
|
- mutex_unlock(&victim_f->sem);
|
|
+ if (!fd->ino) {
|
|
+ D2(printk (KERN_DEBUG "Skipping deletion dirent \"%s\"\n", fd->name));
|
|
+ (*int_off)++;
|
|
+ continue;
|
|
}
|
|
- }
|
|
|
|
- /* If it was a directory we moved, and there was no victim,
|
|
- increase i_nlink on its new parent */
|
|
- if (d_is_dir(old_dentry) && !victim_f)
|
|
- inc_nlink(new_dir_i);
|
|
-
|
|
- /* Unlink the original */
|
|
- ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
|
|
- old_dentry->d_name.name, old_dentry->d_name.len, NULL, now);
|
|
-
|
|
- /* We don't touch inode->i_nlink */
|
|
-
|
|
- if (ret) {
|
|
- /* Oh shit. We really ought to make a single node which can do both atomically */
|
|
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(old_dentry));
|
|
- mutex_lock(&f->sem);
|
|
- inc_nlink(d_inode(old_dentry));
|
|
- if (f->inocache && !d_is_dir(old_dentry))
|
|
- f->inocache->pino_nlink++;
|
|
- mutex_unlock(&f->sem);
|
|
+ D2(printk
|
|
+ (KERN_DEBUG "%s-%d: Dirent %ld: \"%s\", ino #%u, type %d\n", __FUNCTION__, __LINE__, offset,
|
|
+ fd->name, fd->ino, fd->type));
|
|
+ fill_name(ent->d_name, sizeof(ent->d_name) - 1, fd->name, strlen((char *)fd->name));
|
|
+ ent->d_type = fd->type;
|
|
+ ent->d_off = ++(*offset);
|
|
+ ent->d_reclen = (uint16_t)sizeof(struct dirent);
|
|
|
|
- pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n",
|
|
- __func__, ret);
|
|
- /*
|
|
- * We can't keep the target in dcache after that.
|
|
- * For one thing, we can't afford dentry aliases for directories.
|
|
- * For another, if there was a victim, we _can't_ set new inode
|
|
- * for that sucker and we have to trigger mount eviction - the
|
|
- * caller won't do it on its own since we are returning an error.
|
|
- */
|
|
- d_invalidate(new_dentry);
|
|
- new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
|
|
- return ret;
|
|
+ (*int_off)++;
|
|
+ break;
|
|
}
|
|
|
|
- if (d_is_dir(old_dentry))
|
|
- drop_nlink(old_dir_i);
|
|
+ mutex_unlock(&f->sem);
|
|
|
|
- new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
|
|
+ if (fd == NULL) {
|
|
+ D2(printk(KERN_DEBUG "reached the end of the directory\n"));
|
|
+ return ENOENT;
|
|
+ }
|
|
|
|
- return 0;
|
|
+ return ENOERR;
|
|
}
|
|
|
|
diff -Nupr old/fs/jffs2/erase.c new/fs/jffs2/erase.c
|
|
--- old/fs/jffs2/erase.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/erase.c 2022-05-10 16:09:47.150000000 +0800
|
|
@@ -10,16 +10,19 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
#include <linux/compiler.h>
|
|
-#include <linux/crc32.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/pagemap.h>
|
|
+#include "mtd_dev.h"
|
|
#include "nodelist.h"
|
|
+#include "los_crc32.h"
|
|
+
|
|
+struct erase_priv_struct {
|
|
+ struct jffs2_eraseblock *jeb;
|
|
+ struct jffs2_sb_info *c;
|
|
+};
|
|
|
|
static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
|
|
static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
|
|
@@ -29,50 +32,14 @@ static void jffs2_erase_block(struct jff
|
|
struct jffs2_eraseblock *jeb)
|
|
{
|
|
int ret;
|
|
- uint32_t bad_offset;
|
|
-#ifdef __ECOS
|
|
- ret = jffs2_flash_erase(c, jeb);
|
|
- if (!ret) {
|
|
- jffs2_erase_succeeded(c, jeb);
|
|
- return;
|
|
- }
|
|
- bad_offset = jeb->offset;
|
|
-#else /* Linux */
|
|
- struct erase_info *instr;
|
|
-
|
|
- jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n",
|
|
- __func__,
|
|
- jeb->offset, jeb->offset, jeb->offset + c->sector_size);
|
|
- instr = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
|
|
- if (!instr) {
|
|
- pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
|
|
- mutex_lock(&c->erase_free_sem);
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- list_move(&jeb->list, &c->erase_pending_list);
|
|
- c->erasing_size -= c->sector_size;
|
|
- c->dirty_size += c->sector_size;
|
|
- jeb->dirty_size = c->sector_size;
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
- mutex_unlock(&c->erase_free_sem);
|
|
- return;
|
|
- }
|
|
+ uint64_t bad_offset = 0;
|
|
|
|
- memset(instr, 0, sizeof(*instr));
|
|
-
|
|
- instr->addr = jeb->offset;
|
|
- instr->len = c->sector_size;
|
|
-
|
|
- ret = mtd_erase(c->mtd, instr);
|
|
+ ret = c->mtd->erase(c->mtd, jeb->offset, c->sector_size, &bad_offset);
|
|
if (!ret) {
|
|
jffs2_erase_succeeded(c, jeb);
|
|
- kfree(instr);
|
|
return;
|
|
}
|
|
|
|
- bad_offset = instr->fail_addr;
|
|
- kfree(instr);
|
|
-#endif /* __ECOS */
|
|
-
|
|
if (ret == -ENOMEM || ret == -EAGAIN) {
|
|
/* Erase failed immediately. Refile it on the list */
|
|
jffs2_dbg(1, "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n",
|
|
@@ -168,29 +135,10 @@ static void jffs2_erase_succeeded(struct
|
|
jffs2_garbage_collect_trigger(c);
|
|
spin_unlock(&c->erase_completion_lock);
|
|
mutex_unlock(&c->erase_free_sem);
|
|
- wake_up(&c->erase_wait);
|
|
}
|
|
|
|
static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
|
|
{
|
|
- /* For NAND, if the failure did not occur at the device level for a
|
|
- specific physical page, don't bother updating the bad block table. */
|
|
- if (jffs2_cleanmarker_oob(c) && (bad_offset != (uint32_t)MTD_FAIL_ADDR_UNKNOWN)) {
|
|
- /* We had a device-level failure to erase. Let's see if we've
|
|
- failed too many times. */
|
|
- if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
|
|
- /* We'd like to give this block another try. */
|
|
- mutex_lock(&c->erase_free_sem);
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- list_move(&jeb->list, &c->erase_pending_list);
|
|
- c->erasing_size -= c->sector_size;
|
|
- c->dirty_size += c->sector_size;
|
|
- jeb->dirty_size = c->sector_size;
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
- mutex_unlock(&c->erase_free_sem);
|
|
- return;
|
|
- }
|
|
- }
|
|
|
|
mutex_lock(&c->erase_free_sem);
|
|
spin_lock(&c->erase_completion_lock);
|
|
@@ -200,7 +148,6 @@ static void jffs2_erase_failed(struct jf
|
|
c->nr_erasing_blocks--;
|
|
spin_unlock(&c->erase_completion_lock);
|
|
mutex_unlock(&c->erase_free_sem);
|
|
- wake_up(&c->erase_wait);
|
|
}
|
|
|
|
/* Hmmm. Maybe we should accept the extra space it takes and make
|
|
@@ -315,40 +262,8 @@ static int jffs2_block_check_erase(struc
|
|
void *ebuf;
|
|
uint32_t ofs;
|
|
size_t retlen;
|
|
- int ret;
|
|
- unsigned long *wordebuf;
|
|
+ int ret = -EIO;
|
|
|
|
- ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen,
|
|
- &ebuf, NULL);
|
|
- if (ret != -EOPNOTSUPP) {
|
|
- if (ret) {
|
|
- jffs2_dbg(1, "MTD point failed %d\n", ret);
|
|
- goto do_flash_read;
|
|
- }
|
|
- if (retlen < c->sector_size) {
|
|
- /* Don't muck about if it won't let us point to the whole erase sector */
|
|
- jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
|
|
- retlen);
|
|
- mtd_unpoint(c->mtd, jeb->offset, retlen);
|
|
- goto do_flash_read;
|
|
- }
|
|
- wordebuf = ebuf-sizeof(*wordebuf);
|
|
- retlen /= sizeof(*wordebuf);
|
|
- do {
|
|
- if (*++wordebuf != ~0)
|
|
- break;
|
|
- } while(--retlen);
|
|
- mtd_unpoint(c->mtd, jeb->offset, c->sector_size);
|
|
- if (retlen) {
|
|
- pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
|
|
- *wordebuf,
|
|
- jeb->offset +
|
|
- c->sector_size-retlen * sizeof(*wordebuf));
|
|
- return -EIO;
|
|
- }
|
|
- return 0;
|
|
- }
|
|
- do_flash_read:
|
|
ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
|
if (!ebuf) {
|
|
pr_warn("Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n",
|
|
@@ -364,7 +279,7 @@ static int jffs2_block_check_erase(struc
|
|
|
|
*bad_offset = ofs;
|
|
|
|
- ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf);
|
|
+ ret = jffs2_flash_read(c, ofs, readlen, &retlen, ebuf);
|
|
if (ret) {
|
|
pr_warn("Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n",
|
|
ofs, ret);
|
|
@@ -379,7 +294,7 @@ static int jffs2_block_check_erase(struc
|
|
}
|
|
for (i=0; i<readlen; i += sizeof(unsigned long)) {
|
|
/* It's OK. We know it's properly aligned */
|
|
- unsigned long *datum = ebuf + i;
|
|
+ unsigned long *datum = (unsigned long *)((char *)ebuf + i);;
|
|
if (*datum + 1) {
|
|
*bad_offset += i;
|
|
pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n",
|
|
@@ -469,7 +384,6 @@ static void jffs2_mark_erased_block(stru
|
|
|
|
spin_unlock(&c->erase_completion_lock);
|
|
mutex_unlock(&c->erase_free_sem);
|
|
- wake_up(&c->erase_wait);
|
|
return;
|
|
|
|
filebad:
|
|
diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c
|
|
--- old/fs/jffs2/file.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/file.c 2022-05-10 09:43:14.250000000 +0800
|
|
@@ -9,335 +9,30 @@
|
|
* For licensing information, see the file 'LICENCE' in this directory.
|
|
*
|
|
*/
|
|
+#include "los_vm_common.h"
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/fs.h>
|
|
-#include <linux/time.h>
|
|
-#include <linux/pagemap.h>
|
|
-#include <linux/highmem.h>
|
|
-#include <linux/crc32.h>
|
|
-#include <linux/jffs2.h>
|
|
#include "nodelist.h"
|
|
+#include "vfs_jffs2.h"
|
|
|
|
-static int jffs2_write_end(struct file *filp, struct address_space *mapping,
|
|
- loff_t pos, unsigned len, unsigned copied,
|
|
- struct page *pg, void *fsdata);
|
|
-static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
|
|
- loff_t pos, unsigned len, unsigned flags,
|
|
- struct page **pagep, void **fsdata);
|
|
-static int jffs2_readpage (struct file *filp, struct page *pg);
|
|
+static unsigned char gc_buffer[PAGE_SIZE]; //avoids malloc when user may be under memory pressure
|
|
|
|
-int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
|
|
+unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
|
|
+ struct jffs2_inode_info *f,
|
|
+ unsigned long offset,
|
|
+ unsigned long *priv)
|
|
{
|
|
- struct inode *inode = filp->f_mapping->host;
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
|
|
+ /* FIXME: This works only with one file system mounted at a time */
|
|
int ret;
|
|
-
|
|
- ret = file_write_and_wait_range(filp, start, end);
|
|
+ ret = jffs2_read_inode_range(c, f, gc_buffer,
|
|
+ offset & ~(PAGE_SIZE-1), PAGE_SIZE);
|
|
if (ret)
|
|
- return ret;
|
|
-
|
|
- inode_lock(inode);
|
|
- /* Trigger GC to flush any pending writes for this inode */
|
|
- jffs2_flush_wbuf_gc(c, inode->i_ino);
|
|
- inode_unlock(inode);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-const struct file_operations jffs2_file_operations =
|
|
-{
|
|
- .llseek = generic_file_llseek,
|
|
- .open = generic_file_open,
|
|
- .read_iter = generic_file_read_iter,
|
|
- .write_iter = generic_file_write_iter,
|
|
- .unlocked_ioctl=jffs2_ioctl,
|
|
- .mmap = generic_file_readonly_mmap,
|
|
- .fsync = jffs2_fsync,
|
|
- .splice_read = generic_file_splice_read,
|
|
- .splice_write = iter_file_splice_write,
|
|
-};
|
|
-
|
|
-/* jffs2_file_inode_operations */
|
|
-
|
|
-const struct inode_operations jffs2_file_inode_operations =
|
|
-{
|
|
- .get_acl = jffs2_get_acl,
|
|
- .set_acl = jffs2_set_acl,
|
|
- .setattr = jffs2_setattr,
|
|
- .listxattr = jffs2_listxattr,
|
|
-};
|
|
-
|
|
-const struct address_space_operations jffs2_file_address_operations =
|
|
-{
|
|
- .readpage = jffs2_readpage,
|
|
- .write_begin = jffs2_write_begin,
|
|
- .write_end = jffs2_write_end,
|
|
-};
|
|
-
|
|
-static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
|
|
-{
|
|
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
|
|
- unsigned char *pg_buf;
|
|
- int ret;
|
|
-
|
|
- jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
|
|
- __func__, inode->i_ino, pg->index << PAGE_SHIFT);
|
|
-
|
|
- BUG_ON(!PageLocked(pg));
|
|
-
|
|
- pg_buf = kmap(pg);
|
|
- /* FIXME: Can kmap fail? */
|
|
-
|
|
- ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
|
|
- PAGE_SIZE);
|
|
-
|
|
- if (ret) {
|
|
- ClearPageUptodate(pg);
|
|
- SetPageError(pg);
|
|
- } else {
|
|
- SetPageUptodate(pg);
|
|
- ClearPageError(pg);
|
|
- }
|
|
-
|
|
- flush_dcache_page(pg);
|
|
- kunmap(pg);
|
|
-
|
|
- jffs2_dbg(2, "readpage finished\n");
|
|
- return ret;
|
|
-}
|
|
-
|
|
-int jffs2_do_readpage_unlock(void *data, struct page *pg)
|
|
-{
|
|
- int ret = jffs2_do_readpage_nolock(data, pg);
|
|
- unlock_page(pg);
|
|
- return ret;
|
|
-}
|
|
-
|
|
-
|
|
-static int jffs2_readpage (struct file *filp, struct page *pg)
|
|
-{
|
|
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host);
|
|
- int ret;
|
|
-
|
|
- mutex_lock(&f->sem);
|
|
- ret = jffs2_do_readpage_unlock(pg->mapping->host, pg);
|
|
- mutex_unlock(&f->sem);
|
|
- return ret;
|
|
+ return ERR_PTR(ret);
|
|
+ return gc_buffer;
|
|
}
|
|
|
|
-static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
|
|
- loff_t pos, unsigned len, unsigned flags,
|
|
- struct page **pagep, void **fsdata)
|
|
+void jffs2_gc_release_page(struct jffs2_sb_info *c,
|
|
+ unsigned char *ptr,
|
|
+ unsigned long *priv)
|
|
{
|
|
- struct page *pg;
|
|
- struct inode *inode = mapping->host;
|
|
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
|
|
- pgoff_t index = pos >> PAGE_SHIFT;
|
|
- uint32_t pageofs = index << PAGE_SHIFT;
|
|
- int ret = 0;
|
|
-
|
|
- jffs2_dbg(1, "%s()\n", __func__);
|
|
-
|
|
- if (pageofs > inode->i_size) {
|
|
- /* Make new hole frag from old EOF to new page */
|
|
- struct jffs2_raw_inode ri;
|
|
- struct jffs2_full_dnode *fn;
|
|
- uint32_t alloc_len;
|
|
-
|
|
- jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
|
|
- (unsigned int)inode->i_size, pageofs);
|
|
-
|
|
- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
|
|
- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
|
|
- if (ret)
|
|
- goto out_err;
|
|
-
|
|
- mutex_lock(&f->sem);
|
|
- memset(&ri, 0, sizeof(ri));
|
|
-
|
|
- ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
|
|
- ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
|
|
- ri.totlen = cpu_to_je32(sizeof(ri));
|
|
- ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
|
|
-
|
|
- ri.ino = cpu_to_je32(f->inocache->ino);
|
|
- ri.version = cpu_to_je32(++f->highest_version);
|
|
- ri.mode = cpu_to_jemode(inode->i_mode);
|
|
- ri.uid = cpu_to_je16(i_uid_read(inode));
|
|
- ri.gid = cpu_to_je16(i_gid_read(inode));
|
|
- ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
|
|
- ri.atime = ri.ctime = ri.mtime = cpu_to_je32(JFFS2_NOW());
|
|
- ri.offset = cpu_to_je32(inode->i_size);
|
|
- ri.dsize = cpu_to_je32(pageofs - inode->i_size);
|
|
- ri.csize = cpu_to_je32(0);
|
|
- ri.compr = JFFS2_COMPR_ZERO;
|
|
- ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
|
|
- ri.data_crc = cpu_to_je32(0);
|
|
-
|
|
- fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_NORMAL);
|
|
-
|
|
- if (IS_ERR(fn)) {
|
|
- ret = PTR_ERR(fn);
|
|
- jffs2_complete_reservation(c);
|
|
- mutex_unlock(&f->sem);
|
|
- goto out_err;
|
|
- }
|
|
- ret = jffs2_add_full_dnode_to_inode(c, f, fn);
|
|
- if (f->metadata) {
|
|
- jffs2_mark_node_obsolete(c, f->metadata->raw);
|
|
- jffs2_free_full_dnode(f->metadata);
|
|
- f->metadata = NULL;
|
|
- }
|
|
- if (ret) {
|
|
- jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n",
|
|
- ret);
|
|
- jffs2_mark_node_obsolete(c, fn->raw);
|
|
- jffs2_free_full_dnode(fn);
|
|
- jffs2_complete_reservation(c);
|
|
- mutex_unlock(&f->sem);
|
|
- goto out_err;
|
|
- }
|
|
- jffs2_complete_reservation(c);
|
|
- inode->i_size = pageofs;
|
|
- mutex_unlock(&f->sem);
|
|
- }
|
|
-
|
|
- /*
|
|
- * While getting a page and reading data in, lock c->alloc_sem until
|
|
- * the page is Uptodate. Otherwise GC task may attempt to read the same
|
|
- * page in read_cache_page(), which causes a deadlock.
|
|
- */
|
|
- mutex_lock(&c->alloc_sem);
|
|
- pg = grab_cache_page_write_begin(mapping, index, flags);
|
|
- if (!pg) {
|
|
- ret = -ENOMEM;
|
|
- goto release_sem;
|
|
- }
|
|
- *pagep = pg;
|
|
-
|
|
- /*
|
|
- * Read in the page if it wasn't already present. Cannot optimize away
|
|
- * the whole page write case until jffs2_write_end can handle the
|
|
- * case of a short-copy.
|
|
- */
|
|
- if (!PageUptodate(pg)) {
|
|
- mutex_lock(&f->sem);
|
|
- ret = jffs2_do_readpage_nolock(inode, pg);
|
|
- mutex_unlock(&f->sem);
|
|
- if (ret) {
|
|
- unlock_page(pg);
|
|
- put_page(pg);
|
|
- goto release_sem;
|
|
- }
|
|
- }
|
|
- jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
|
|
-
|
|
-release_sem:
|
|
- mutex_unlock(&c->alloc_sem);
|
|
-out_err:
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static int jffs2_write_end(struct file *filp, struct address_space *mapping,
|
|
- loff_t pos, unsigned len, unsigned copied,
|
|
- struct page *pg, void *fsdata)
|
|
-{
|
|
- /* Actually commit the write from the page cache page we're looking at.
|
|
- * For now, we write the full page out each time. It sucks, but it's simple
|
|
- */
|
|
- struct inode *inode = mapping->host;
|
|
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
|
|
- struct jffs2_raw_inode *ri;
|
|
- unsigned start = pos & (PAGE_SIZE - 1);
|
|
- unsigned end = start + copied;
|
|
- unsigned aligned_start = start & ~3;
|
|
- int ret = 0;
|
|
- uint32_t writtenlen = 0;
|
|
-
|
|
- jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
|
|
- __func__, inode->i_ino, pg->index << PAGE_SHIFT,
|
|
- start, end, pg->flags);
|
|
-
|
|
- /* We need to avoid deadlock with page_cache_read() in
|
|
- jffs2_garbage_collect_pass(). So the page must be
|
|
- up to date to prevent page_cache_read() from trying
|
|
- to re-lock it. */
|
|
- BUG_ON(!PageUptodate(pg));
|
|
-
|
|
- if (end == PAGE_SIZE) {
|
|
- /* When writing out the end of a page, write out the
|
|
- _whole_ page. This helps to reduce the number of
|
|
- nodes in files which have many short writes, like
|
|
- syslog files. */
|
|
- aligned_start = 0;
|
|
- }
|
|
-
|
|
- ri = jffs2_alloc_raw_inode();
|
|
-
|
|
- if (!ri) {
|
|
- jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
|
|
- __func__);
|
|
- unlock_page(pg);
|
|
- put_page(pg);
|
|
- return -ENOMEM;
|
|
- }
|
|
-
|
|
- /* Set the fields that the generic jffs2_write_inode_range() code can't find */
|
|
- ri->ino = cpu_to_je32(inode->i_ino);
|
|
- ri->mode = cpu_to_jemode(inode->i_mode);
|
|
- ri->uid = cpu_to_je16(i_uid_read(inode));
|
|
- ri->gid = cpu_to_je16(i_gid_read(inode));
|
|
- ri->isize = cpu_to_je32((uint32_t)inode->i_size);
|
|
- ri->atime = ri->ctime = ri->mtime = cpu_to_je32(JFFS2_NOW());
|
|
-
|
|
- /* In 2.4, it was already kmapped by generic_file_write(). Doesn't
|
|
- hurt to do it again. The alternative is ifdefs, which are ugly. */
|
|
- kmap(pg);
|
|
-
|
|
- ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
|
|
- (pg->index << PAGE_SHIFT) + aligned_start,
|
|
- end - aligned_start, &writtenlen);
|
|
-
|
|
- kunmap(pg);
|
|
-
|
|
- if (ret) {
|
|
- /* There was an error writing. */
|
|
- SetPageError(pg);
|
|
- }
|
|
-
|
|
- /* Adjust writtenlen for the padding we did, so we don't confuse our caller */
|
|
- writtenlen -= min(writtenlen, (start - aligned_start));
|
|
-
|
|
- if (writtenlen) {
|
|
- if (inode->i_size < pos + writtenlen) {
|
|
- inode->i_size = pos + writtenlen;
|
|
- inode->i_blocks = (inode->i_size + 511) >> 9;
|
|
-
|
|
- inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
|
|
- }
|
|
- }
|
|
-
|
|
- jffs2_free_raw_inode(ri);
|
|
-
|
|
- if (start+writtenlen < end) {
|
|
- /* generic_file_write has written more to the page cache than we've
|
|
- actually written to the medium. Mark the page !Uptodate so that
|
|
- it gets reread */
|
|
- jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n",
|
|
- __func__);
|
|
- SetPageError(pg);
|
|
- ClearPageUptodate(pg);
|
|
- }
|
|
-
|
|
- jffs2_dbg(1, "%s() returning %d\n",
|
|
- __func__, writtenlen > 0 ? writtenlen : ret);
|
|
- unlock_page(pg);
|
|
- put_page(pg);
|
|
- return writtenlen > 0 ? writtenlen : ret;
|
|
+ /* Do nothing */
|
|
}
|
|
diff -Nupr old/fs/jffs2/fs.c new/fs/jffs2/fs.c
|
|
--- old/fs/jffs2/fs.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/fs.c 2022-05-10 16:13:37.830000000 +0800
|
|
@@ -10,136 +10,129 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
-#include <linux/capability.h>
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/sched.h>
|
|
-#include <linux/cred.h>
|
|
-#include <linux/fs.h>
|
|
-#include <linux/fs_context.h>
|
|
-#include <linux/list.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
-#include <linux/pagemap.h>
|
|
-#include <linux/slab.h>
|
|
-#include <linux/vmalloc.h>
|
|
-#include <linux/vfs.h>
|
|
-#include <linux/crc32.h>
|
|
+#include <linux/delay.h>
|
|
#include "nodelist.h"
|
|
+#include "os-linux.h"
|
|
+#include "los_crc32.h"
|
|
+#include "jffs2_hash.h"
|
|
+#include "capability_type.h"
|
|
+#include "capability_api.h"
|
|
|
|
-static int jffs2_flash_setup(struct jffs2_sb_info *c);
|
|
-
|
|
-int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
|
|
+int jffs2_setattr (struct jffs2_inode *inode, struct IATTR *attr)
|
|
{
|
|
struct jffs2_full_dnode *old_metadata, *new_metadata;
|
|
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
|
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
|
|
struct jffs2_raw_inode *ri;
|
|
- union jffs2_device_node dev;
|
|
- unsigned char *mdata = NULL;
|
|
- int mdatalen = 0;
|
|
unsigned int ivalid;
|
|
+ mode_t tmp_mode;
|
|
+ uint c_uid = OsCurrUserGet()->effUserID;
|
|
+ uint c_gid = OsCurrUserGet()->effGid;
|
|
uint32_t alloclen;
|
|
int ret;
|
|
int alloc_type = ALLOC_NORMAL;
|
|
|
|
jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
|
|
-
|
|
- /* Special cases - we don't want more than one data node
|
|
- for these types on the medium at any time. So setattr
|
|
- must read the original data associated with the node
|
|
- (i.e. the device numbers or the target name) and write
|
|
- it out again with the appropriate data attached */
|
|
- if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
|
|
- /* For these, we don't actually need to read the old node */
|
|
- mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
|
|
- mdata = (char *)&dev;
|
|
- jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
|
|
- __func__, mdatalen);
|
|
- } else if (S_ISLNK(inode->i_mode)) {
|
|
- mutex_lock(&f->sem);
|
|
- mdatalen = f->metadata->size;
|
|
- mdata = kmalloc(f->metadata->size, GFP_USER);
|
|
- if (!mdata) {
|
|
- mutex_unlock(&f->sem);
|
|
- return -ENOMEM;
|
|
- }
|
|
- ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
|
|
- if (ret) {
|
|
- mutex_unlock(&f->sem);
|
|
- kfree(mdata);
|
|
- return ret;
|
|
- }
|
|
- mutex_unlock(&f->sem);
|
|
- jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
|
|
- __func__, mdatalen);
|
|
- }
|
|
-
|
|
ri = jffs2_alloc_raw_inode();
|
|
if (!ri) {
|
|
- if (S_ISLNK(inode->i_mode))
|
|
- kfree(mdata);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
|
|
- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
|
|
+ ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
|
|
+
|
|
if (ret) {
|
|
jffs2_free_raw_inode(ri);
|
|
- if (S_ISLNK(inode->i_mode))
|
|
- kfree(mdata);
|
|
return ret;
|
|
}
|
|
mutex_lock(&f->sem);
|
|
- ivalid = iattr->ia_valid;
|
|
+ ivalid = attr->attr_chg_valid;
|
|
+ tmp_mode = inode->i_mode;
|
|
|
|
ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
|
|
ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
|
|
- ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
|
|
- ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
|
|
+ ri->totlen = cpu_to_je32(sizeof(*ri));
|
|
+ ri->hdr_crc = cpu_to_je32(crc32(0, ri, (sizeof(struct jffs2_unknown_node)-4)));
|
|
|
|
ri->ino = cpu_to_je32(inode->i_ino);
|
|
ri->version = cpu_to_je32(++f->highest_version);
|
|
+ ri->uid = cpu_to_je16(inode->i_uid);
|
|
+ ri->gid = cpu_to_je16(inode->i_gid);
|
|
|
|
- ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
|
|
- from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
|
|
- ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
|
|
- from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));
|
|
+ if (ivalid & CHG_UID) {
|
|
+ if (((c_uid != inode->i_uid) || (attr->attr_chg_uid != inode->i_uid)) && (!IsCapPermit(CAP_CHOWN))) {
|
|
+ jffs2_complete_reservation(c);
|
|
+ jffs2_free_raw_inode(ri);
|
|
+ mutex_unlock(&f->sem);
|
|
+ return -EPERM;
|
|
+ } else {
|
|
+ ri->uid = cpu_to_je16(attr->attr_chg_uid);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ivalid & CHG_GID) {
|
|
+ if (((c_gid != inode->i_gid) || (attr->attr_chg_gid != inode->i_gid)) && (!IsCapPermit(CAP_CHOWN))) {
|
|
+ jffs2_complete_reservation(c);
|
|
+ jffs2_free_raw_inode(ri);
|
|
+ mutex_unlock(&f->sem);
|
|
+ return -EPERM;
|
|
+ } else {
|
|
+ ri->gid = cpu_to_je16(attr->attr_chg_gid);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ivalid & CHG_MODE) {
|
|
+ if (!IsCapPermit(CAP_FOWNER) && (c_uid != inode->i_uid)) {
|
|
+ jffs2_complete_reservation(c);
|
|
+ jffs2_free_raw_inode(ri);
|
|
+ mutex_unlock(&f->sem);
|
|
+ return -EPERM;
|
|
+ } else {
|
|
+ attr->attr_chg_mode &= ~S_IFMT; // delete file type
|
|
+ tmp_mode &= S_IFMT;
|
|
+ tmp_mode = attr->attr_chg_mode | tmp_mode; // add old file type
|
|
+ }
|
|
+ }
|
|
|
|
- if (ivalid & ATTR_MODE)
|
|
- ri->mode = cpu_to_jemode(iattr->ia_mode);
|
|
- else
|
|
- ri->mode = cpu_to_jemode(inode->i_mode);
|
|
+ if (ivalid & CHG_ATIME) {
|
|
+ if ((c_uid != inode->i_uid) || (attr->attr_chg_uid != inode->i_uid)) {
|
|
+ return -EPERM;
|
|
+ } else {
|
|
+ ri->atime = cpu_to_je32(attr->attr_chg_atime);
|
|
+ }
|
|
+ } else {
|
|
+ ri->atime = cpu_to_je32(inode->i_atime);
|
|
+ }
|
|
|
|
+ if (ivalid & CHG_MTIME) {
|
|
+ if ((c_uid != inode->i_uid) || (attr->attr_chg_uid != inode->i_uid)) {
|
|
+ return -EPERM;
|
|
+ } else {
|
|
+ ri->mtime = cpu_to_je32(attr->attr_chg_mtime);
|
|
+ }
|
|
+ } else {
|
|
+ ri->mtime = cpu_to_je32(Jffs2CurSec());
|
|
+ }
|
|
+ ri->mode = cpu_to_jemode(tmp_mode);
|
|
|
|
- ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
|
|
- ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
|
|
- ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
|
|
- ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
|
|
+ ri->isize = cpu_to_je32((ivalid & CHG_SIZE) ? attr->attr_chg_size : inode->i_size);
|
|
+ ri->ctime = cpu_to_je32(Jffs2CurSec());
|
|
|
|
ri->offset = cpu_to_je32(0);
|
|
- ri->csize = ri->dsize = cpu_to_je32(mdatalen);
|
|
+ ri->csize = ri->dsize = cpu_to_je32(0);
|
|
ri->compr = JFFS2_COMPR_NONE;
|
|
- if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
|
|
+ if (ivalid & CHG_SIZE && inode->i_size < attr->attr_chg_size) {
|
|
/* It's an extension. Make it a hole node */
|
|
ri->compr = JFFS2_COMPR_ZERO;
|
|
- ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
|
|
+ ri->dsize = cpu_to_je32(attr->attr_chg_size - inode->i_size);
|
|
ri->offset = cpu_to_je32(inode->i_size);
|
|
- } else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
|
|
+ } else if (ivalid & CHG_SIZE && !attr->attr_chg_size) {
|
|
/* For truncate-to-zero, treat it as deletion because
|
|
it'll always be obsoleting all previous nodes */
|
|
alloc_type = ALLOC_DELETION;
|
|
}
|
|
- ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
|
|
- if (mdatalen)
|
|
- ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
|
|
- else
|
|
- ri->data_crc = cpu_to_je32(0);
|
|
-
|
|
- new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
|
|
- if (S_ISLNK(inode->i_mode))
|
|
- kfree(mdata);
|
|
-
|
|
+ ri->node_crc = cpu_to_je32(crc32(0, ri, (sizeof(*ri)-8)));
|
|
+ ri->data_crc = cpu_to_je32(0);
|
|
+ new_metadata = jffs2_write_dnode(c, f, ri, NULL, 0, alloc_type);
|
|
if (IS_ERR(new_metadata)) {
|
|
jffs2_complete_reservation(c);
|
|
jffs2_free_raw_inode(ri);
|
|
@@ -147,23 +140,20 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
|
|
return PTR_ERR(new_metadata);
|
|
}
|
|
/* It worked. Update the inode */
|
|
- inode->i_atime = ITIME(je32_to_cpu(ri->atime));
|
|
- inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
|
|
- inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
|
|
+ inode->i_atime = je32_to_cpu(ri->atime);
|
|
+ inode->i_ctime = je32_to_cpu(ri->ctime);
|
|
+ inode->i_mtime = je32_to_cpu(ri->mtime);
|
|
inode->i_mode = jemode_to_cpu(ri->mode);
|
|
- i_uid_write(inode, je16_to_cpu(ri->uid));
|
|
- i_gid_write(inode, je16_to_cpu(ri->gid));
|
|
-
|
|
+ inode->i_uid = je16_to_cpu(ri->uid);
|
|
+ inode->i_gid = je16_to_cpu(ri->gid);
|
|
|
|
old_metadata = f->metadata;
|
|
+ if (ivalid & CHG_SIZE && inode->i_size > attr->attr_chg_size)
|
|
+ jffs2_truncate_fragtree (c, &f->fragtree, attr->attr_chg_size);
|
|
|
|
- if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
|
|
- jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
|
|
-
|
|
- if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
|
|
+ if (ivalid & CHG_SIZE && inode->i_size < attr->attr_chg_size) {
|
|
jffs2_add_full_dnode_to_inode(c, f, new_metadata);
|
|
- inode->i_size = iattr->ia_size;
|
|
- inode->i_blocks = (inode->i_size + 511) >> 9;
|
|
+ inode->i_size = attr->attr_chg_size;
|
|
f->metadata = NULL;
|
|
} else {
|
|
f->metadata = new_metadata;
|
|
@@ -182,315 +172,201 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
|
|
We are protected from a simultaneous write() extending i_size
|
|
back past iattr->ia_size, because do_truncate() holds the
|
|
generic inode semaphore. */
|
|
- if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
|
|
- truncate_setsize(inode, iattr->ia_size);
|
|
- inode->i_blocks = (inode->i_size + 511) >> 9;
|
|
+ if (ivalid & CHG_SIZE && inode->i_size > attr->attr_chg_size) {
|
|
+ inode->i_size = attr->attr_chg_size; // truncate_setsize
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
-int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
|
|
+static void jffs2_clear_inode (struct jffs2_inode *inode)
|
|
{
|
|
- struct inode *inode = d_inode(dentry);
|
|
- int rc;
|
|
+ /* We can forget about this inode for now - drop all
|
|
+ * the nodelists associated with it, etc.
|
|
+ */
|
|
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
|
|
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
|
+
|
|
+ jffs2_do_clear_inode(c, f);
|
|
+}
|
|
|
|
- rc = setattr_prepare(dentry, iattr);
|
|
- if (rc)
|
|
- return rc;
|
|
+static struct jffs2_inode *ilookup(struct super_block *sb, uint32_t ino)
|
|
+{
|
|
+ struct jffs2_inode *node = NULL;
|
|
|
|
- rc = jffs2_do_setattr(inode, iattr);
|
|
- if (!rc && (iattr->ia_valid & ATTR_MODE))
|
|
- rc = posix_acl_chmod(inode, inode->i_mode);
|
|
+ if (sb->s_root == NULL) {
|
|
+ return NULL;
|
|
+ }
|
|
|
|
- return rc;
|
|
+ // Check for this inode in the cache
|
|
+ Jffs2NodeLock();
|
|
+ (void)Jffs2HashGet(&sb->s_node_hash_lock, &sb->s_node_hash[0], sb, ino, &node);
|
|
+ Jffs2NodeUnlock();
|
|
+ return node;
|
|
}
|
|
|
|
-int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|
+struct jffs2_inode *new_inode(struct super_block *sb)
|
|
{
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
|
|
- unsigned long avail;
|
|
-
|
|
- buf->f_type = JFFS2_SUPER_MAGIC;
|
|
- buf->f_bsize = 1 << PAGE_SHIFT;
|
|
- buf->f_blocks = c->flash_size >> PAGE_SHIFT;
|
|
- buf->f_files = 0;
|
|
- buf->f_ffree = 0;
|
|
- buf->f_namelen = JFFS2_MAX_NAME_LEN;
|
|
- buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
|
|
- buf->f_fsid.val[1] = c->mtd->index;
|
|
-
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- avail = c->dirty_size + c->free_size;
|
|
- if (avail > c->sector_size * c->resv_blocks_write)
|
|
- avail -= c->sector_size * c->resv_blocks_write;
|
|
- else
|
|
- avail = 0;
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
-
|
|
- buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
|
|
+ struct jffs2_inode *inode = NULL;
|
|
|
|
- return 0;
|
|
-}
|
|
+ inode = zalloc(sizeof (struct jffs2_inode));
|
|
+ if (inode == NULL)
|
|
+ return 0;
|
|
|
|
+ D2(PRINTK("malloc new_inode %x ####################################\n",
|
|
+ inode));
|
|
|
|
-void jffs2_evict_inode (struct inode *inode)
|
|
-{
|
|
- /* We can forget about this inode for now - drop all
|
|
- * the nodelists associated with it, etc.
|
|
- */
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
|
|
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
|
+ inode->i_sb = sb;
|
|
+ inode->i_ino = 1;
|
|
+ inode->i_nlink = 1; // Let JFFS2 manage the link count
|
|
+ inode->i_size = 0;
|
|
+ LOS_ListInit((&(inode->i_hashlist)));
|
|
|
|
- jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
|
|
- __func__, inode->i_ino, inode->i_mode);
|
|
- truncate_inode_pages_final(&inode->i_data);
|
|
- clear_inode(inode);
|
|
- jffs2_do_clear_inode(c, f);
|
|
+ return inode;
|
|
}
|
|
|
|
-struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
|
|
+struct jffs2_inode *jffs2_iget(struct super_block *sb, uint32_t ino)
|
|
{
|
|
struct jffs2_inode_info *f;
|
|
struct jffs2_sb_info *c;
|
|
struct jffs2_raw_inode latest_node;
|
|
- union jffs2_device_node jdev;
|
|
- struct inode *inode;
|
|
- dev_t rdev = 0;
|
|
+ struct jffs2_inode *inode;
|
|
int ret;
|
|
|
|
- jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
|
|
-
|
|
- inode = iget_locked(sb, ino);
|
|
- if (!inode)
|
|
- return ERR_PTR(-ENOMEM);
|
|
- if (!(inode->i_state & I_NEW))
|
|
+ Jffs2NodeLock();
|
|
+ inode = ilookup(sb, ino);
|
|
+ if (inode) {
|
|
+ Jffs2NodeUnlock();
|
|
return inode;
|
|
+ }
|
|
+ inode = new_inode(sb);
|
|
+ if (inode == NULL) {
|
|
+ Jffs2NodeUnlock();
|
|
+ return (struct jffs2_inode *)-ENOMEM;
|
|
+ }
|
|
|
|
+ inode->i_ino = ino;
|
|
f = JFFS2_INODE_INFO(inode);
|
|
c = JFFS2_SB_INFO(inode->i_sb);
|
|
|
|
- jffs2_init_inode_info(f);
|
|
- mutex_lock(&f->sem);
|
|
+ (void)mutex_init(&f->sem);
|
|
+ (void)mutex_lock(&f->sem);
|
|
|
|
ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
|
|
- if (ret)
|
|
- goto error;
|
|
+ if (ret) {
|
|
+ (void)mutex_unlock(&f->sem);
|
|
+ inode->i_nlink = 0;
|
|
+ free(inode);
|
|
+ Jffs2NodeUnlock();
|
|
+ return (struct jffs2_inode *)ret;
|
|
+ }
|
|
|
|
inode->i_mode = jemode_to_cpu(latest_node.mode);
|
|
- i_uid_write(inode, je16_to_cpu(latest_node.uid));
|
|
- i_gid_write(inode, je16_to_cpu(latest_node.gid));
|
|
+ inode->i_uid = je16_to_cpu(latest_node.uid);
|
|
+ inode->i_gid = je16_to_cpu(latest_node.gid);
|
|
inode->i_size = je32_to_cpu(latest_node.isize);
|
|
- inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
|
|
- inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
|
|
- inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
|
|
-
|
|
- set_nlink(inode, f->inocache->pino_nlink);
|
|
-
|
|
- inode->i_blocks = (inode->i_size + 511) >> 9;
|
|
+ inode->i_atime = je32_to_cpu(latest_node.atime);
|
|
+ inode->i_mtime = je32_to_cpu(latest_node.mtime);
|
|
+ inode->i_ctime = je32_to_cpu(latest_node.ctime);
|
|
+ inode->i_nlink = f->inocache->pino_nlink;
|
|
|
|
- switch (inode->i_mode & S_IFMT) {
|
|
+ (void)mutex_unlock(&f->sem);
|
|
|
|
- case S_IFLNK:
|
|
- inode->i_op = &jffs2_symlink_inode_operations;
|
|
- inode->i_link = f->target;
|
|
- break;
|
|
-
|
|
- case S_IFDIR:
|
|
- {
|
|
- struct jffs2_full_dirent *fd;
|
|
- set_nlink(inode, 2); /* parent and '.' */
|
|
-
|
|
- for (fd=f->dents; fd; fd = fd->next) {
|
|
- if (fd->type == DT_DIR && fd->ino)
|
|
- inc_nlink(inode);
|
|
- }
|
|
- /* Root dir gets i_nlink 3 for some reason */
|
|
- if (inode->i_ino == 1)
|
|
- inc_nlink(inode);
|
|
-
|
|
- inode->i_op = &jffs2_dir_inode_operations;
|
|
- inode->i_fop = &jffs2_dir_operations;
|
|
- break;
|
|
- }
|
|
- case S_IFREG:
|
|
- inode->i_op = &jffs2_file_inode_operations;
|
|
- inode->i_fop = &jffs2_file_operations;
|
|
- inode->i_mapping->a_ops = &jffs2_file_address_operations;
|
|
- inode->i_mapping->nrpages = 0;
|
|
- break;
|
|
-
|
|
- case S_IFBLK:
|
|
- case S_IFCHR:
|
|
- /* Read the device numbers from the media */
|
|
- if (f->metadata->size != sizeof(jdev.old_id) &&
|
|
- f->metadata->size != sizeof(jdev.new_id)) {
|
|
- pr_notice("Device node has strange size %d\n",
|
|
- f->metadata->size);
|
|
- goto error_io;
|
|
- }
|
|
- jffs2_dbg(1, "Reading device numbers from flash\n");
|
|
- ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
|
|
- if (ret < 0) {
|
|
- /* Eep */
|
|
- pr_notice("Read device numbers for inode %lu failed\n",
|
|
- (unsigned long)inode->i_ino);
|
|
- goto error;
|
|
- }
|
|
- if (f->metadata->size == sizeof(jdev.old_id))
|
|
- rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
|
|
- else
|
|
- rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
|
|
- fallthrough;
|
|
-
|
|
- case S_IFSOCK:
|
|
- case S_IFIFO:
|
|
- inode->i_op = &jffs2_file_inode_operations;
|
|
- init_special_inode(inode, inode->i_mode, rdev);
|
|
- break;
|
|
-
|
|
- default:
|
|
- pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
|
|
- __func__, inode->i_mode, (unsigned long)inode->i_ino);
|
|
- }
|
|
-
|
|
- mutex_unlock(&f->sem);
|
|
+ (void)Jffs2HashInsert(&sb->s_node_hash_lock, &sb->s_node_hash[0], inode, ino);
|
|
|
|
jffs2_dbg(1, "jffs2_read_inode() returning\n");
|
|
- unlock_new_inode(inode);
|
|
- return inode;
|
|
+ Jffs2NodeUnlock();
|
|
|
|
-error_io:
|
|
- ret = -EIO;
|
|
-error:
|
|
- mutex_unlock(&f->sem);
|
|
- iget_failed(inode);
|
|
- return ERR_PTR(ret);
|
|
+ return inode;
|
|
}
|
|
|
|
-void jffs2_dirty_inode(struct inode *inode, int flags)
|
|
-{
|
|
- struct iattr iattr;
|
|
|
|
- if (!(inode->i_state & I_DIRTY_DATASYNC)) {
|
|
- jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
|
|
- __func__, inode->i_ino);
|
|
- return;
|
|
- }
|
|
-
|
|
- jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
|
|
- __func__, inode->i_ino);
|
|
-
|
|
- iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
|
|
- iattr.ia_mode = inode->i_mode;
|
|
- iattr.ia_uid = inode->i_uid;
|
|
- iattr.ia_gid = inode->i_gid;
|
|
- iattr.ia_atime = inode->i_atime;
|
|
- iattr.ia_mtime = inode->i_mtime;
|
|
- iattr.ia_ctime = inode->i_ctime;
|
|
-
|
|
- jffs2_do_setattr(inode, &iattr);
|
|
-}
|
|
+// -------------------------------------------------------------------------
|
|
+// Decrement the reference count on an inode. If this makes the ref count
|
|
+// zero, then this inode can be freed.
|
|
|
|
-int jffs2_do_remount_fs(struct super_block *sb, struct fs_context *fc)
|
|
+int jffs2_iput(struct jffs2_inode *i)
|
|
{
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
|
|
-
|
|
- if (c->flags & JFFS2_SB_FLAG_RO && !sb_rdonly(sb))
|
|
- return -EROFS;
|
|
-
|
|
- /* We stop if it was running, then restart if it needs to.
|
|
- This also catches the case where it was stopped and this
|
|
- is just a remount to restart it.
|
|
- Flush the writebuffer, if neccecary, else we loose it */
|
|
- if (!sb_rdonly(sb)) {
|
|
- jffs2_stop_garbage_collect_thread(c);
|
|
- mutex_lock(&c->alloc_sem);
|
|
- jffs2_flush_wbuf_pad(c);
|
|
- mutex_unlock(&c->alloc_sem);
|
|
- }
|
|
-
|
|
- if (!(fc->sb_flags & SB_RDONLY))
|
|
- jffs2_start_garbage_collect_thread(c);
|
|
+ // Called in jffs2_find
|
|
+ // (and jffs2_open and jffs2_ops_mkdir?)
|
|
+ // super.c jffs2_fill_super,
|
|
+ // and gc.c jffs2_garbage_collect_pass
|
|
+ struct jffs2_inode_info *f = NULL;
|
|
+
|
|
+ Jffs2NodeLock();
|
|
+ if (!i || i->i_nlink) {
|
|
+ // and let it fault...
|
|
+ Jffs2NodeUnlock();
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+ jffs2_clear_inode(i);
|
|
+ f = JFFS2_INODE_INFO(i);
|
|
+ (void)mutex_destroy(&(f->sem));
|
|
+ (void)Jffs2HashRemove(&i->i_sb->s_node_hash_lock, i);
|
|
+ (void)memset_s(i, sizeof(*i), 0x5a, sizeof(*i));
|
|
+ free(i);
|
|
+ Jffs2NodeUnlock();
|
|
|
|
- fc->sb_flags |= SB_NOATIME;
|
|
return 0;
|
|
}
|
|
|
|
+
|
|
/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
|
|
fill in the raw_inode while you're at it. */
|
|
-struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
|
|
+struct jffs2_inode *jffs2_new_inode (struct jffs2_inode *dir_i, int mode, struct jffs2_raw_inode *ri)
|
|
{
|
|
- struct inode *inode;
|
|
+ struct jffs2_inode *inode;
|
|
struct super_block *sb = dir_i->i_sb;
|
|
struct jffs2_sb_info *c;
|
|
struct jffs2_inode_info *f;
|
|
int ret;
|
|
|
|
- jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
|
|
- __func__, dir_i->i_ino, mode);
|
|
-
|
|
c = JFFS2_SB_INFO(sb);
|
|
|
|
+ Jffs2NodeLock();
|
|
inode = new_inode(sb);
|
|
|
|
if (!inode)
|
|
- return ERR_PTR(-ENOMEM);
|
|
+ return (struct jffs2_inode *)-ENOMEM;
|
|
|
|
f = JFFS2_INODE_INFO(inode);
|
|
- jffs2_init_inode_info(f);
|
|
- mutex_lock(&f->sem);
|
|
+ (void)mutex_init(&f->sem);
|
|
+ (void)mutex_lock(&f->sem);;
|
|
|
|
memset(ri, 0, sizeof(*ri));
|
|
/* Set OS-specific defaults for new inodes */
|
|
- ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid()));
|
|
+ ri->uid = cpu_to_je16(OsCurrUserGet()->effUserID);
|
|
+ ri->gid = cpu_to_je16(OsCurrUserGet()->effGid);
|
|
|
|
- if (dir_i->i_mode & S_ISGID) {
|
|
- ri->gid = cpu_to_je16(i_gid_read(dir_i));
|
|
- if (S_ISDIR(mode))
|
|
- mode |= S_ISGID;
|
|
- } else {
|
|
- ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid()));
|
|
- }
|
|
-
|
|
- /* POSIX ACLs have to be processed now, at least partly.
|
|
- The umask is only applied if there's no default ACL */
|
|
- ret = jffs2_init_acl_pre(dir_i, inode, &mode);
|
|
- if (ret) {
|
|
- mutex_unlock(&f->sem);
|
|
- make_bad_inode(inode);
|
|
- iput(inode);
|
|
- return ERR_PTR(ret);
|
|
- }
|
|
ret = jffs2_do_new_inode (c, f, mode, ri);
|
|
if (ret) {
|
|
- mutex_unlock(&f->sem);
|
|
- make_bad_inode(inode);
|
|
- iput(inode);
|
|
- return ERR_PTR(ret);
|
|
+ mutex_unlock(&(f->sem));
|
|
+ jffs2_clear_inode(inode);
|
|
+ (void)mutex_destroy(&(f->sem));
|
|
+ (void)memset_s(inode, sizeof(*inode), 0x6a, sizeof(*inode));
|
|
+ free(inode);
|
|
+ Jffs2NodeUnlock();
|
|
+ return (struct jffs2_inode *)ret;
|
|
+
|
|
}
|
|
- set_nlink(inode, 1);
|
|
+ inode->i_nlink = 1;
|
|
inode->i_ino = je32_to_cpu(ri->ino);
|
|
inode->i_mode = jemode_to_cpu(ri->mode);
|
|
- i_gid_write(inode, je16_to_cpu(ri->gid));
|
|
- i_uid_write(inode, je16_to_cpu(ri->uid));
|
|
- inode->i_atime = inode->i_ctime = inode->i_mtime = current_time(inode);
|
|
- ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
|
|
+ inode->i_gid = je16_to_cpu(ri->gid);
|
|
+ inode->i_uid = je16_to_cpu(ri->uid);
|
|
+ inode->i_atime = inode->i_ctime = inode->i_mtime = Jffs2CurSec();
|
|
+ ri->atime = ri->mtime = ri->ctime = cpu_to_je32(inode->i_mtime);
|
|
|
|
- inode->i_blocks = 0;
|
|
inode->i_size = 0;
|
|
|
|
- if (insert_inode_locked(inode) < 0) {
|
|
- mutex_unlock(&f->sem);
|
|
- make_bad_inode(inode);
|
|
- iput(inode);
|
|
- return ERR_PTR(-EINVAL);
|
|
- }
|
|
+ (void)Jffs2HashInsert(&sb->s_node_hash_lock, &sb->s_node_hash[0], inode, inode->i_ino);
|
|
+ Jffs2NodeUnlock();
|
|
|
|
return inode;
|
|
}
|
|
|
|
-static int calculate_inocache_hashsize(uint32_t flash_size)
|
|
+int calculate_inocache_hashsize(uint32_t flash_size)
|
|
{
|
|
/*
|
|
* Pick a inocache hash size based on the size of the medium.
|
|
@@ -510,118 +386,17 @@ static int calculate_inocache_hashsize(uint32_t flash_size)
|
|
return hashsize;
|
|
}
|
|
|
|
-int jffs2_do_fill_super(struct super_block *sb, struct fs_context *fc)
|
|
-{
|
|
- struct jffs2_sb_info *c;
|
|
- struct inode *root_i;
|
|
- int ret;
|
|
- size_t blocks;
|
|
-
|
|
- c = JFFS2_SB_INFO(sb);
|
|
-
|
|
- /* Do not support the MLC nand */
|
|
- if (c->mtd->type == MTD_MLCNANDFLASH)
|
|
- return -EINVAL;
|
|
-
|
|
-#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
|
|
- if (c->mtd->type == MTD_NANDFLASH) {
|
|
- errorf(fc, "Cannot operate on NAND flash unless jffs2 NAND support is compiled in");
|
|
- return -EINVAL;
|
|
- }
|
|
- if (c->mtd->type == MTD_DATAFLASH) {
|
|
- errorf(fc, "Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in");
|
|
- return -EINVAL;
|
|
- }
|
|
-#endif
|
|
-
|
|
- c->flash_size = c->mtd->size;
|
|
- c->sector_size = c->mtd->erasesize;
|
|
- blocks = c->flash_size / c->sector_size;
|
|
-
|
|
- /*
|
|
- * Size alignment check
|
|
- */
|
|
- if ((c->sector_size * blocks) != c->flash_size) {
|
|
- c->flash_size = c->sector_size * blocks;
|
|
- infof(fc, "Flash size not aligned to erasesize, reducing to %dKiB",
|
|
- c->flash_size / 1024);
|
|
- }
|
|
-
|
|
- if (c->flash_size < 5*c->sector_size) {
|
|
- errorf(fc, "Too few erase blocks (%d)",
|
|
- c->flash_size / c->sector_size);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
|
|
-
|
|
- /* NAND (or other bizarre) flash... do setup accordingly */
|
|
- ret = jffs2_flash_setup(c);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
|
|
- c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
|
|
- if (!c->inocache_list) {
|
|
- ret = -ENOMEM;
|
|
- goto out_wbuf;
|
|
- }
|
|
-
|
|
- jffs2_init_xattr_subsystem(c);
|
|
-
|
|
- if ((ret = jffs2_do_mount_fs(c)))
|
|
- goto out_inohash;
|
|
-
|
|
- jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
|
|
- root_i = jffs2_iget(sb, 1);
|
|
- if (IS_ERR(root_i)) {
|
|
- jffs2_dbg(1, "get root inode failed\n");
|
|
- ret = PTR_ERR(root_i);
|
|
- goto out_root;
|
|
- }
|
|
-
|
|
- ret = -ENOMEM;
|
|
-
|
|
- jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
|
|
- sb->s_root = d_make_root(root_i);
|
|
- if (!sb->s_root)
|
|
- goto out_root;
|
|
-
|
|
- sb->s_maxbytes = 0xFFFFFFFF;
|
|
- sb->s_blocksize = PAGE_SIZE;
|
|
- sb->s_blocksize_bits = PAGE_SHIFT;
|
|
- sb->s_magic = JFFS2_SUPER_MAGIC;
|
|
- sb->s_time_min = 0;
|
|
- sb->s_time_max = U32_MAX;
|
|
-
|
|
- if (!sb_rdonly(sb))
|
|
- jffs2_start_garbage_collect_thread(c);
|
|
- return 0;
|
|
-
|
|
-out_root:
|
|
- jffs2_free_ino_caches(c);
|
|
- jffs2_free_raw_node_refs(c);
|
|
- kvfree(c->blocks);
|
|
- jffs2_clear_xattr_subsystem(c);
|
|
- jffs2_sum_exit(c);
|
|
- out_inohash:
|
|
- kfree(c->inocache_list);
|
|
- out_wbuf:
|
|
- jffs2_flash_cleanup(c);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
void jffs2_gc_release_inode(struct jffs2_sb_info *c,
|
|
struct jffs2_inode_info *f)
|
|
{
|
|
- iput(OFNI_EDONI_2SFFJ(f));
|
|
+ struct jffs2_inode *node = OFNI_EDONI_2SFFJ(f);
|
|
+ jffs2_iput(node);
|
|
}
|
|
|
|
struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
|
|
int inum, int unlinked)
|
|
{
|
|
- struct inode *inode;
|
|
+ struct jffs2_inode *inode;
|
|
struct jffs2_inode_cache *ic;
|
|
|
|
if (unlinked) {
|
|
@@ -669,72 +444,9 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
|
|
Just iget() it, and if read_inode() is necessary that's OK.
|
|
*/
|
|
inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
|
|
- if (IS_ERR(inode))
|
|
- return ERR_CAST(inode);
|
|
- }
|
|
- if (is_bad_inode(inode)) {
|
|
- pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
|
|
- inum, unlinked);
|
|
- /* NB. This will happen again. We need to do something appropriate here. */
|
|
- iput(inode);
|
|
- return ERR_PTR(-EIO);
|
|
+ if (inode <= 0)
|
|
+ return (struct jffs2_inode_info *)inode;
|
|
}
|
|
|
|
return JFFS2_INODE_INFO(inode);
|
|
}
|
|
-
|
|
-static int jffs2_flash_setup(struct jffs2_sb_info *c) {
|
|
- int ret = 0;
|
|
-
|
|
- if (jffs2_cleanmarker_oob(c)) {
|
|
- /* NAND flash... do setup accordingly */
|
|
- ret = jffs2_nand_flash_setup(c);
|
|
- if (ret)
|
|
- return ret;
|
|
- }
|
|
-
|
|
- /* and Dataflash */
|
|
- if (jffs2_dataflash(c)) {
|
|
- ret = jffs2_dataflash_setup(c);
|
|
- if (ret)
|
|
- return ret;
|
|
- }
|
|
-
|
|
- /* and Intel "Sibley" flash */
|
|
- if (jffs2_nor_wbuf_flash(c)) {
|
|
- ret = jffs2_nor_wbuf_flash_setup(c);
|
|
- if (ret)
|
|
- return ret;
|
|
- }
|
|
-
|
|
- /* and an UBI volume */
|
|
- if (jffs2_ubivol(c)) {
|
|
- ret = jffs2_ubivol_setup(c);
|
|
- if (ret)
|
|
- return ret;
|
|
- }
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
|
|
-
|
|
- if (jffs2_cleanmarker_oob(c)) {
|
|
- jffs2_nand_flash_cleanup(c);
|
|
- }
|
|
-
|
|
- /* and DataFlash */
|
|
- if (jffs2_dataflash(c)) {
|
|
- jffs2_dataflash_cleanup(c);
|
|
- }
|
|
-
|
|
- /* and Intel "Sibley" flash */
|
|
- if (jffs2_nor_wbuf_flash(c)) {
|
|
- jffs2_nor_wbuf_flash_cleanup(c);
|
|
- }
|
|
-
|
|
- /* and an UBI volume */
|
|
- if (jffs2_ubivol(c)) {
|
|
- jffs2_ubivol_cleanup(c);
|
|
- }
|
|
-}
|
|
diff -Nupr old/fs/jffs2/gc.c new/fs/jffs2/gc.c
|
|
--- old/fs/jffs2/gc.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/gc.c 2022-05-10 16:11:42.090000000 +0800
|
|
@@ -10,17 +10,17 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
#include <linux/kernel.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/pagemap.h>
|
|
-#include <linux/crc32.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/semaphore.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/stat.h>
|
|
+#include "mtd_dev.h"
|
|
#include "nodelist.h"
|
|
#include "compr.h"
|
|
+#include "los_crc32.h"
|
|
|
|
static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
|
|
struct jffs2_inode_cache *ic,
|
|
@@ -43,7 +43,7 @@ static int jffs2_garbage_collect_live(st
|
|
/* Called with erase_completion_lock held */
|
|
static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
|
|
{
|
|
- struct jffs2_eraseblock *ret;
|
|
+ struct jffs2_eraseblock *ret = NULL;
|
|
struct list_head *nextlist = NULL;
|
|
int n = jiffies % 128;
|
|
|
|
@@ -131,62 +131,40 @@ int jffs2_garbage_collect_pass(struct jf
|
|
int ret = 0, inum, nlink;
|
|
int xattr = 0;
|
|
|
|
- if (mutex_lock_interruptible(&c->alloc_sem))
|
|
+ if (mutex_lock(&c->alloc_sem))
|
|
return -EINTR;
|
|
|
|
-
|
|
for (;;) {
|
|
- /* We can't start doing GC until we've finished checking
|
|
- the node CRCs etc. */
|
|
- int bucket, want_ino;
|
|
-
|
|
spin_lock(&c->erase_completion_lock);
|
|
if (!c->unchecked_size)
|
|
break;
|
|
+
|
|
+ /* We can't start doing GC yet. We haven't finished checking
|
|
+ the node CRCs etc. Do it now. */
|
|
+
|
|
+ /* checked_ino is protected by the alloc_sem */
|
|
+ if (c->checked_ino > c->highest_ino && xattr) {
|
|
+ pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n",
|
|
+ c->unchecked_size);
|
|
+ jffs2_dbg_dump_block_lists_nolock(c);
|
|
+ spin_unlock(&c->erase_completion_lock);
|
|
+ mutex_unlock(&c->alloc_sem);
|
|
+ return -ENOSPC;
|
|
+ }
|
|
+
|
|
spin_unlock(&c->erase_completion_lock);
|
|
|
|
if (!xattr)
|
|
xattr = jffs2_verify_xattr(c);
|
|
|
|
spin_lock(&c->inocache_lock);
|
|
- /* Instead of doing the inodes in numeric order, doing a lookup
|
|
- * in the hash for each possible number, just walk the hash
|
|
- * buckets of *existing* inodes. This means that we process
|
|
- * them out-of-order, but it can be a lot faster if there's
|
|
- * a sparse inode# space. Which there often is. */
|
|
- want_ino = c->check_ino;
|
|
- for (bucket = c->check_ino % c->inocache_hashsize ; bucket < c->inocache_hashsize; bucket++) {
|
|
- for (ic = c->inocache_list[bucket]; ic; ic = ic->next) {
|
|
- if (ic->ino < want_ino)
|
|
- continue;
|
|
-
|
|
- if (ic->state != INO_STATE_CHECKEDABSENT &&
|
|
- ic->state != INO_STATE_PRESENT)
|
|
- goto got_next; /* with inocache_lock held */
|
|
|
|
- jffs2_dbg(1, "Skipping ino #%u already checked\n",
|
|
- ic->ino);
|
|
- }
|
|
- want_ino = 0;
|
|
- }
|
|
-
|
|
- /* Point c->check_ino past the end of the last bucket. */
|
|
- c->check_ino = ((c->highest_ino + c->inocache_hashsize + 1) &
|
|
- ~c->inocache_hashsize) - 1;
|
|
-
|
|
- spin_unlock(&c->inocache_lock);
|
|
-
|
|
- pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n",
|
|
- c->unchecked_size);
|
|
- jffs2_dbg_dump_block_lists_nolock(c);
|
|
- mutex_unlock(&c->alloc_sem);
|
|
- return -ENOSPC;
|
|
+ ic = jffs2_get_ino_cache(c, c->checked_ino++);
|
|
|
|
- got_next:
|
|
- /* For next time round the loop, we want c->checked_ino to indicate
|
|
- * the *next* one we want to check. And since we're walking the
|
|
- * buckets rather than doing it sequentially, it's: */
|
|
- c->check_ino = ic->ino + c->inocache_hashsize;
|
|
+ if (!ic) {
|
|
+ spin_unlock(&c->inocache_lock);
|
|
+ continue;
|
|
+ }
|
|
|
|
if (!ic->pino_nlink) {
|
|
jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n",
|
|
@@ -198,6 +176,8 @@ int jffs2_garbage_collect_pass(struct jf
|
|
switch(ic->state) {
|
|
case INO_STATE_CHECKEDABSENT:
|
|
case INO_STATE_PRESENT:
|
|
+ jffs2_dbg(1, "Skipping ino #%u already checked\n",
|
|
+ ic->ino);
|
|
spin_unlock(&c->inocache_lock);
|
|
continue;
|
|
|
|
@@ -207,6 +187,7 @@ int jffs2_garbage_collect_pass(struct jf
|
|
ic->ino, ic->state);
|
|
spin_unlock(&c->inocache_lock);
|
|
BUG();
|
|
+ break;
|
|
|
|
case INO_STATE_READING:
|
|
/* We need to wait for it to finish, lest we move on
|
|
@@ -216,7 +197,7 @@ int jffs2_garbage_collect_pass(struct jf
|
|
ic->ino);
|
|
/* We need to come back again for the _same_ inode. We've
|
|
made no progress in this case, but that should be OK */
|
|
- c->check_ino = ic->ino;
|
|
+ c->checked_ino--;
|
|
|
|
mutex_unlock(&c->alloc_sem);
|
|
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
|
|
@@ -224,6 +205,7 @@ int jffs2_garbage_collect_pass(struct jf
|
|
|
|
default:
|
|
BUG();
|
|
+ break;
|
|
|
|
case INO_STATE_UNCHECKED:
|
|
;
|
|
@@ -290,7 +272,7 @@ int jffs2_garbage_collect_pass(struct jf
|
|
raw = jeb->gc_node;
|
|
gcblock_dirty = jeb->dirty_size;
|
|
|
|
- while(ref_obsolete(raw)) {
|
|
+ while(raw && ref_obsolete(raw)) {
|
|
jffs2_dbg(1, "Node at 0x%08x is obsolete... skipping\n",
|
|
ref_offset(raw));
|
|
raw = ref_next(raw);
|
|
@@ -310,7 +292,7 @@ int jffs2_garbage_collect_pass(struct jf
|
|
jffs2_dbg(1, "Going to garbage collect node at 0x%08x\n",
|
|
ref_offset(raw));
|
|
|
|
- if (!raw->next_in_ino) {
|
|
+ if (raw &&!raw->next_in_ino) {
|
|
/* Inode-less node. Clean marker, snapshot or something like that */
|
|
spin_unlock(&c->erase_completion_lock);
|
|
if (ref_flags(raw) == REF_PRISTINE) {
|
|
@@ -368,7 +350,7 @@ int jffs2_garbage_collect_pass(struct jf
|
|
We can just copy any pristine nodes, but have
|
|
to prevent anyone else from doing read_inode() while
|
|
we're at it, so we set the state accordingly */
|
|
- if (ref_flags(raw) == REF_PRISTINE)
|
|
+ if (raw && ref_flags(raw) == REF_PRISTINE)
|
|
ic->state = INO_STATE_GC;
|
|
else {
|
|
jffs2_dbg(1, "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
|
|
@@ -393,6 +375,7 @@ int jffs2_garbage_collect_pass(struct jf
|
|
mutex_unlock(&c->alloc_sem);
|
|
spin_unlock(&c->inocache_lock);
|
|
BUG();
|
|
+ break;
|
|
|
|
case INO_STATE_READING:
|
|
/* Someone's currently trying to read it. We must wait for
|
|
@@ -430,7 +413,6 @@ int jffs2_garbage_collect_pass(struct jf
|
|
|
|
spin_lock(&c->inocache_lock);
|
|
ic->state = INO_STATE_CHECKEDABSENT;
|
|
- wake_up(&c->inocache_wq);
|
|
|
|
if (ret != -EBADFD) {
|
|
spin_unlock(&c->inocache_lock);
|
|
@@ -460,9 +442,7 @@ int jffs2_garbage_collect_pass(struct jf
|
|
ret = 0;
|
|
goto release_sem;
|
|
}
|
|
-
|
|
ret = jffs2_garbage_collect_live(c, jeb, raw, f);
|
|
-
|
|
jffs2_gc_release_inode(c, f);
|
|
|
|
test_gcnode:
|
|
@@ -541,7 +521,7 @@ static int jffs2_garbage_collect_live(st
|
|
break; /* We've found them all */
|
|
}
|
|
}
|
|
- if (fn) {
|
|
+ if (fn != NULL && frag != NULL) {
|
|
if (ref_flags(raw) == REF_PRISTINE) {
|
|
ret = jffs2_garbage_collect_pristine(c, f->inocache, raw);
|
|
if (!ret) {
|
|
@@ -552,7 +532,7 @@ static int jffs2_garbage_collect_live(st
|
|
goto upnout;
|
|
}
|
|
/* We found a datanode. Do the GC */
|
|
- if((start >> PAGE_SHIFT) < ((end-1) >> PAGE_SHIFT)) {
|
|
+ if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) {
|
|
/* It crosses a page boundary. Therefore, it must be a hole. */
|
|
ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
|
|
} else {
|
|
@@ -635,6 +615,7 @@ static int jffs2_garbage_collect_pristin
|
|
if (je32_to_cpu(node->u.hdr_crc) != crc) {
|
|
pr_warn("Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
|
|
ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
|
|
+ jffs2_dbg_dump_node(c, ref_offset(raw));
|
|
goto bail;
|
|
}
|
|
|
|
@@ -645,6 +626,7 @@ static int jffs2_garbage_collect_pristin
|
|
pr_warn("Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
|
|
ref_offset(raw), je32_to_cpu(node->i.node_crc),
|
|
crc);
|
|
+ jffs2_dbg_dump_node(c, ref_offset(raw));
|
|
goto bail;
|
|
}
|
|
|
|
@@ -654,6 +636,7 @@ static int jffs2_garbage_collect_pristin
|
|
pr_warn("Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
|
|
ref_offset(raw),
|
|
je32_to_cpu(node->i.data_crc), crc);
|
|
+ jffs2_dbg_dump_node(c, ref_offset(raw));
|
|
goto bail;
|
|
}
|
|
}
|
|
@@ -665,12 +648,14 @@ static int jffs2_garbage_collect_pristin
|
|
pr_warn("Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
|
|
ref_offset(raw),
|
|
je32_to_cpu(node->d.node_crc), crc);
|
|
+ jffs2_dbg_dump_node(c, ref_offset(raw));
|
|
goto bail;
|
|
}
|
|
|
|
- if (strnlen(node->d.name, node->d.nsize) != node->d.nsize) {
|
|
+ if (strnlen((const char *)node->d.name, node->d.nsize) != node->d.nsize) {
|
|
pr_warn("Name in dirent node at 0x%08x contains zeroes\n",
|
|
ref_offset(raw));
|
|
+ jffs2_dbg_dump_node(c, ref_offset(raw));
|
|
goto bail;
|
|
}
|
|
|
|
@@ -680,6 +665,7 @@ static int jffs2_garbage_collect_pristin
|
|
pr_warn("Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
|
|
ref_offset(raw),
|
|
je32_to_cpu(node->d.name_crc), crc);
|
|
+ jffs2_dbg_dump_node(c, ref_offset(raw));
|
|
goto bail;
|
|
}
|
|
}
|
|
@@ -689,6 +675,7 @@ static int jffs2_garbage_collect_pristin
|
|
if (ic) {
|
|
pr_warn("Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
|
|
ref_offset(raw), je16_to_cpu(node->u.nodetype));
|
|
+ jffs2_dbg_dump_node(c, ref_offset(raw));
|
|
goto bail;
|
|
}
|
|
}
|
|
@@ -697,7 +684,7 @@ static int jffs2_garbage_collect_pristin
|
|
retry:
|
|
phys_ofs = write_ofs(c);
|
|
|
|
- ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
|
|
+ ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (const u_char *)node);
|
|
|
|
if (ret || (retlen != rawlen)) {
|
|
pr_notice("Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
|
|
@@ -761,7 +748,7 @@ static int jffs2_garbage_collect_metadat
|
|
struct jffs2_full_dnode *new_fn;
|
|
struct jffs2_raw_inode ri;
|
|
struct jffs2_node_frag *last_frag;
|
|
- union jffs2_device_node dev;
|
|
+ jint16_t dev;
|
|
char *mdata = NULL;
|
|
int mdatalen = 0;
|
|
uint32_t alloclen, ilen;
|
|
@@ -770,8 +757,9 @@ static int jffs2_garbage_collect_metadat
|
|
if (S_ISBLK(JFFS2_F_I_MODE(f)) ||
|
|
S_ISCHR(JFFS2_F_I_MODE(f)) ) {
|
|
/* For these, we don't actually need to read the old node */
|
|
- mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f));
|
|
+ dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) | JFFS2_F_I_RDEV_MIN(f)));
|
|
mdata = (char *)&dev;
|
|
+ mdatalen = sizeof(dev);
|
|
jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
|
|
__func__, mdatalen);
|
|
} else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
|
|
@@ -781,7 +769,7 @@ static int jffs2_garbage_collect_metadat
|
|
pr_warn("kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
|
|
return -ENOMEM;
|
|
}
|
|
- ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen);
|
|
+ ret = jffs2_read_dnode(c, f, fn, (unsigned char *)mdata, 0, mdatalen);
|
|
if (ret) {
|
|
pr_warn("read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n",
|
|
ret);
|
|
@@ -831,7 +819,7 @@ static int jffs2_garbage_collect_metadat
|
|
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
|
|
ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
|
|
|
|
- new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC);
|
|
+ new_fn = jffs2_write_dnode(c, f, &ri, (unsigned char *)mdata, mdatalen, ALLOC_GC);
|
|
|
|
if (IS_ERR(new_fn)) {
|
|
pr_warn("Error writing new dnode: %ld\n", PTR_ERR(new_fn));
|
|
@@ -857,7 +845,7 @@ static int jffs2_garbage_collect_dirent(
|
|
|
|
rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
|
|
rd.nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
|
|
- rd.nsize = strlen(fd->name);
|
|
+ rd.nsize = strlen((const char *)fd->name);
|
|
rd.totlen = cpu_to_je32(sizeof(rd) + rd.nsize);
|
|
rd.hdr_crc = cpu_to_je32(crc32(0, &rd, sizeof(struct jffs2_unknown_node)-4));
|
|
|
|
@@ -908,7 +896,7 @@ static int jffs2_garbage_collect_deletio
|
|
struct jffs2_raw_node_ref *raw;
|
|
int ret;
|
|
size_t retlen;
|
|
- int name_len = strlen(fd->name);
|
|
+ int name_len = strlen((const char *)fd->name);
|
|
uint32_t name_crc = crc32(0, fd->name, name_len);
|
|
uint32_t rawlen = ref_totlen(c, jeb, fd->raw);
|
|
|
|
@@ -1053,6 +1041,7 @@ static int jffs2_garbage_collect_hole(st
|
|
pr_warn("%s: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
|
|
__func__, ref_offset(fn->raw),
|
|
je32_to_cpu(ri.node_crc), crc);
|
|
+ jffs2_dbg_dump_node(c, ref_offset(fn->raw));
|
|
/* FIXME: We could possibly deal with this by writing new holes for each frag */
|
|
pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
|
|
start, end, f->inocache->ino);
|
|
@@ -1165,13 +1154,12 @@ static int jffs2_garbage_collect_dnode(s
|
|
struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
|
|
uint32_t start, uint32_t end)
|
|
{
|
|
- struct inode *inode = OFNI_EDONI_2SFFJ(f);
|
|
struct jffs2_full_dnode *new_fn;
|
|
struct jffs2_raw_inode ri;
|
|
uint32_t alloclen, offset, orig_end, orig_start;
|
|
int ret = 0;
|
|
unsigned char *comprbuf = NULL, *writebuf;
|
|
- struct page *page;
|
|
+ unsigned long pg;
|
|
unsigned char *pg_ptr;
|
|
|
|
memset(&ri, 0, sizeof(ri));
|
|
@@ -1193,8 +1181,8 @@ static int jffs2_garbage_collect_dnode(s
|
|
struct jffs2_node_frag *frag;
|
|
uint32_t min, max;
|
|
|
|
- min = start & ~(PAGE_SIZE-1);
|
|
- max = min + PAGE_SIZE;
|
|
+ min = start & ~(PAGE_CACHE_SIZE-1);
|
|
+ max = min + PAGE_CACHE_SIZE;
|
|
|
|
frag = jffs2_lookup_node_frag(&f->fragtree, start);
|
|
|
|
@@ -1203,7 +1191,7 @@ static int jffs2_garbage_collect_dnode(s
|
|
BUG_ON(frag->ofs != start);
|
|
|
|
/* First grow down... */
|
|
- while((frag = frag_prev(frag)) && frag->ofs >= min) {
|
|
+ while(frag && (frag = frag_prev(frag)) && frag->ofs >= min) {
|
|
|
|
/* If the previous frag doesn't even reach the beginning, there's
|
|
excessive fragmentation. Just merge. */
|
|
@@ -1259,7 +1247,7 @@ static int jffs2_garbage_collect_dnode(s
|
|
/* Find last frag which is actually part of the node we're to GC. */
|
|
frag = jffs2_lookup_node_frag(&f->fragtree, end-1);
|
|
|
|
- while((frag = frag_next(frag)) && frag->ofs+frag->size <= max) {
|
|
+ while(frag && (frag = frag_next(frag)) && frag->ofs+frag->size <= max) {
|
|
|
|
/* If the previous frag doesn't even reach the beginning, there's lots
|
|
of fragmentation. Just merge. */
|
|
@@ -1317,27 +1305,21 @@ static int jffs2_garbage_collect_dnode(s
|
|
BUG_ON(start > orig_start);
|
|
}
|
|
|
|
- /* The rules state that we must obtain the page lock *before* f->sem, so
|
|
- * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
|
|
- * actually going to *change* so we're safe; we only allow reading.
|
|
- *
|
|
- * It is important to note that jffs2_write_begin() will ensure that its
|
|
- * page is marked Uptodate before allocating space. That means that if we
|
|
- * end up here trying to GC the *same* page that jffs2_write_begin() is
|
|
- * trying to write out, read_cache_page() will not deadlock. */
|
|
- mutex_unlock(&f->sem);
|
|
- page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT,
|
|
- jffs2_do_readpage_unlock, inode);
|
|
- if (IS_ERR(page)) {
|
|
+ /* First, use readpage() to read the appropriate page into the page cache */
|
|
+ /* Q: What happens if we actually try to GC the _same_ page for which commit_write()
|
|
+ * triggered garbage collection in the first place?
|
|
+ * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
|
|
+ * page OK. We'll actually write it out again in commit_write, which is a little
|
|
+ * suboptimal, but at least we're correct.
|
|
+ */
|
|
+ pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
|
|
+
|
|
+ if (IS_ERR(pg_ptr)) {
|
|
pr_warn("read_cache_page() returned error: %ld\n",
|
|
- PTR_ERR(page));
|
|
- mutex_lock(&f->sem);
|
|
- return PTR_ERR(page);
|
|
+ PTR_ERR(pg_ptr));
|
|
+ return PTR_ERR(pg_ptr);
|
|
}
|
|
|
|
- pg_ptr = kmap(page);
|
|
- mutex_lock(&f->sem);
|
|
-
|
|
offset = start;
|
|
while(offset < orig_end) {
|
|
uint32_t datalen;
|
|
@@ -1355,7 +1337,7 @@ static int jffs2_garbage_collect_dnode(s
|
|
cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
|
|
datalen = end - offset;
|
|
|
|
- writebuf = pg_ptr + (offset & (PAGE_SIZE -1));
|
|
+ writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1));
|
|
|
|
comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
|
|
|
|
@@ -1400,7 +1382,6 @@ static int jffs2_garbage_collect_dnode(s
|
|
}
|
|
}
|
|
|
|
- kunmap(page);
|
|
- put_page(page);
|
|
+ jffs2_gc_release_page(c, pg_ptr, &pg);
|
|
return ret;
|
|
}
|
|
diff -Nupr old/fs/jffs2/ioctl.c new/fs/jffs2/ioctl.c
|
|
--- old/fs/jffs2/ioctl.c 2022-05-09 17:15:24.350000000 +0800
|
|
+++ new/fs/jffs2/ioctl.c 1970-01-01 08:00:00.000000000 +0800
|
|
@@ -1,22 +0,0 @@
|
|
-/*
|
|
- * JFFS2 -- Journalling Flash File System, Version 2.
|
|
- *
|
|
- * Copyright © 2001-2007 Red Hat, Inc.
|
|
- * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
|
|
- *
|
|
- * Created by David Woodhouse <dwmw2@infradead.org>
|
|
- *
|
|
- * For licensing information, see the file 'LICENCE' in this directory.
|
|
- *
|
|
- */
|
|
-
|
|
-#include <linux/fs.h>
|
|
-#include "nodelist.h"
|
|
-
|
|
-long jffs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|
-{
|
|
- /* Later, this will provide for lsattr.jffs2 and chattr.jffs2, which
|
|
- will include compression support etc. */
|
|
- return -ENOTTY;
|
|
-}
|
|
-
|
|
diff -Nupr old/fs/jffs2/jffs2_fs_i.h new/fs/jffs2/jffs2_fs_i.h
|
|
--- old/fs/jffs2/jffs2_fs_i.h 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/jffs2_fs_i.h 2022-05-09 20:50:05.810000000 +0800
|
|
@@ -14,8 +14,13 @@
|
|
#define _JFFS2_FS_I
|
|
|
|
#include <linux/rbtree.h>
|
|
-#include <linux/posix_acl.h>
|
|
-#include <linux/mutex.h>
|
|
+#include <linux/kernel.h>
|
|
+
|
|
+#ifdef __cplusplus
|
|
+#if __cplusplus
|
|
+extern "C" {
|
|
+#endif /* __cplusplus */
|
|
+#endif /* __cplusplus */
|
|
|
|
struct jffs2_inode_info {
|
|
/* We need an internal mutex similar to inode->i_mutex.
|
|
@@ -24,7 +29,7 @@ struct jffs2_inode_info {
|
|
before letting GC proceed. Or we'd have to put ugliness
|
|
into the GC code so it didn't attempt to obtain the i_mutex
|
|
for the inode(s) which are already locked */
|
|
- struct mutex sem;
|
|
+ struct pthread_mutex sem;
|
|
|
|
/* The highest (datanode) version number used for this ino */
|
|
uint32_t highest_version;
|
|
@@ -50,7 +55,29 @@ struct jffs2_inode_info {
|
|
|
|
uint16_t flags;
|
|
uint8_t usercompr;
|
|
- struct inode vfs_inode;
|
|
};
|
|
|
|
+struct super_block;
|
|
+
|
|
+struct jffs2_inode {
|
|
+ uint32_t i_ino;
|
|
+ mode_t i_mode;
|
|
+ nlink_t i_nlink;
|
|
+ uid_t i_uid;
|
|
+ gid_t i_gid;
|
|
+ time_t i_atime;
|
|
+ time_t i_mtime;
|
|
+ time_t i_ctime;
|
|
+ off_t i_size;
|
|
+ struct super_block *i_sb;
|
|
+ LOS_DL_LIST i_hashlist;
|
|
+ struct jffs2_inode_info jffs2_i;
|
|
+};
|
|
+
|
|
+#ifdef __cplusplus
|
|
+#if __cplusplus
|
|
+}
|
|
+#endif /* __cplusplus */
|
|
+#endif /* __cplusplus */
|
|
+
|
|
#endif /* _JFFS2_FS_I */
|
|
diff -Nupr old/fs/jffs2/jffs2_fs_sb.h new/fs/jffs2/jffs2_fs_sb.h
|
|
--- old/fs/jffs2/jffs2_fs_sb.h 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/jffs2_fs_sb.h 2022-05-09 20:49:43.100000000 +0800
|
|
@@ -17,11 +17,18 @@
|
|
#include <linux/spinlock.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/completion.h>
|
|
-#include <linux/mutex.h>
|
|
#include <linux/timer.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/list.h>
|
|
#include <linux/rwsem.h>
|
|
+#include "vfs_jffs2.h"
|
|
+#include "mtd_dev.h"
|
|
+
|
|
+#ifdef __cplusplus
|
|
+#if __cplusplus
|
|
+extern "C" {
|
|
+#endif /* __cplusplus */
|
|
+#endif /* __cplusplus */
|
|
|
|
#define JFFS2_SB_FLAG_RO 1
|
|
#define JFFS2_SB_FLAG_SCANNING 2 /* Flash scanning is in progress */
|
|
@@ -47,10 +54,10 @@ struct jffs2_mount_opts {
|
|
Nee jffs_control
|
|
*/
|
|
struct jffs2_sb_info {
|
|
- struct mtd_info *mtd;
|
|
+ struct MtdDev *mtd;
|
|
|
|
uint32_t highest_ino;
|
|
- uint32_t check_ino; /* *NEXT* inode to be checked */
|
|
+ uint32_t checked_ino;
|
|
|
|
unsigned int flags;
|
|
|
|
@@ -58,7 +65,7 @@ struct jffs2_sb_info {
|
|
struct completion gc_thread_start; /* GC thread start completion */
|
|
struct completion gc_thread_exit; /* GC thread exit completion port */
|
|
|
|
- struct mutex alloc_sem; /* Used to protect all the following
|
|
+ struct pthread_mutex alloc_sem; /* Used to protect all the following
|
|
fields, and also to protect against
|
|
out-of-order writing of nodes. And GC. */
|
|
uint32_t cleanmarker_size; /* Size of an _inline_ CLEANMARKER
|
|
@@ -120,7 +127,7 @@ struct jffs2_sb_info {
|
|
/* Sem to allow jffs2_garbage_collect_deletion_dirent to
|
|
drop the erase_completion_lock while it's holding a pointer
|
|
to an obsoleted node. I don't like this. Alternatives welcomed. */
|
|
- struct mutex erase_free_sem;
|
|
+ struct pthread_mutex erase_free_sem;
|
|
|
|
uint32_t wbuf_pagesize; /* 0 for NOR and other flashes with no wbuf */
|
|
|
|
@@ -160,4 +167,27 @@ struct jffs2_sb_info {
|
|
void *os_priv;
|
|
};
|
|
|
|
+struct super_block {
|
|
+ struct jffs2_sb_info jffs2_sb;
|
|
+ LIST_HEAD s_node_hash[JFFS2_NODE_HASH_BUCKETS];
|
|
+ LosMux s_node_hash_lock;
|
|
+ struct jffs2_inode *s_root;
|
|
+ void *s_dev;
|
|
+
|
|
+ UINT32 s_lock; /* Lock the inode cache */
|
|
+ EVENT_CB_S s_gc_thread_flags; /* Communication with the gcthread */
|
|
+ unsigned int s_gc_thread;
|
|
+ unsigned long s_mount_flags;
|
|
+};
|
|
+
|
|
+#define JFFS2_SB_INFO(sb) (&(sb)->jffs2_sb)
|
|
+#define OFNI_BS_2SFFJ(c) \
|
|
+ ((struct super_block *) ( ((char *)c) - ((char *)(&((struct super_block *)NULL)->jffs2_sb)) ) )
|
|
+
|
|
+#ifdef __cplusplus
|
|
+#if __cplusplus
|
|
+}
|
|
+#endif /* __cplusplus */
|
|
+#endif /* __cplusplus */
|
|
+
|
|
#endif /* _JFFS2_FS_SB */
|
|
diff -Nupr old/fs/jffs2/malloc.c new/fs/jffs2/malloc.c
|
|
--- old/fs/jffs2/malloc.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/malloc.c 2022-05-10 09:43:16.720000000 +0800
|
|
@@ -9,111 +9,31 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
-#include <linux/init.h>
|
|
-#include <linux/jffs2.h>
|
|
+#include <stdlib.h>
|
|
#include "nodelist.h"
|
|
|
|
-/* These are initialised to NULL in the kernel startup code.
|
|
- If you're porting to other operating systems, beware */
|
|
-static struct kmem_cache *full_dnode_slab;
|
|
-static struct kmem_cache *raw_dirent_slab;
|
|
-static struct kmem_cache *raw_inode_slab;
|
|
-static struct kmem_cache *tmp_dnode_info_slab;
|
|
-static struct kmem_cache *raw_node_ref_slab;
|
|
-static struct kmem_cache *node_frag_slab;
|
|
-static struct kmem_cache *inode_cache_slab;
|
|
-#ifdef CONFIG_JFFS2_FS_XATTR
|
|
-static struct kmem_cache *xattr_datum_cache;
|
|
-static struct kmem_cache *xattr_ref_cache;
|
|
+#if !defined(JFFS2NUM_FS_JFFS2_RAW_NODE_REF_CACHE_POOL_SIZE)
|
|
+# define JFFS2NUM_FS_JFFS2_RAW_NODE_REF_CACHE_POOL_SIZE 0
|
|
#endif
|
|
|
|
int __init jffs2_create_slab_caches(void)
|
|
{
|
|
- full_dnode_slab = kmem_cache_create("jffs2_full_dnode",
|
|
- sizeof(struct jffs2_full_dnode),
|
|
- 0, 0, NULL);
|
|
- if (!full_dnode_slab)
|
|
- goto err;
|
|
-
|
|
- raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
|
|
- sizeof(struct jffs2_raw_dirent),
|
|
- 0, SLAB_HWCACHE_ALIGN, NULL);
|
|
- if (!raw_dirent_slab)
|
|
- goto err;
|
|
-
|
|
- raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
|
|
- sizeof(struct jffs2_raw_inode),
|
|
- 0, SLAB_HWCACHE_ALIGN, NULL);
|
|
- if (!raw_inode_slab)
|
|
- goto err;
|
|
-
|
|
- tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
|
|
- sizeof(struct jffs2_tmp_dnode_info),
|
|
- 0, 0, NULL);
|
|
- if (!tmp_dnode_info_slab)
|
|
- goto err;
|
|
-
|
|
- raw_node_ref_slab = kmem_cache_create("jffs2_refblock",
|
|
- sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1),
|
|
- 0, 0, NULL);
|
|
- if (!raw_node_ref_slab)
|
|
- goto err;
|
|
-
|
|
- node_frag_slab = kmem_cache_create("jffs2_node_frag",
|
|
- sizeof(struct jffs2_node_frag),
|
|
- 0, 0, NULL);
|
|
- if (!node_frag_slab)
|
|
- goto err;
|
|
-
|
|
- inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
|
|
- sizeof(struct jffs2_inode_cache),
|
|
- 0, 0, NULL);
|
|
- if (!inode_cache_slab)
|
|
- goto err;
|
|
-
|
|
-#ifdef CONFIG_JFFS2_FS_XATTR
|
|
- xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum",
|
|
- sizeof(struct jffs2_xattr_datum),
|
|
- 0, 0, NULL);
|
|
- if (!xattr_datum_cache)
|
|
- goto err;
|
|
-
|
|
- xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref",
|
|
- sizeof(struct jffs2_xattr_ref),
|
|
- 0, 0, NULL);
|
|
- if (!xattr_ref_cache)
|
|
- goto err;
|
|
-#endif
|
|
-
|
|
return 0;
|
|
- err:
|
|
- jffs2_destroy_slab_caches();
|
|
- return -ENOMEM;
|
|
+
|
|
}
|
|
|
|
void jffs2_destroy_slab_caches(void)
|
|
{
|
|
- kmem_cache_destroy(full_dnode_slab);
|
|
- kmem_cache_destroy(raw_dirent_slab);
|
|
- kmem_cache_destroy(raw_inode_slab);
|
|
- kmem_cache_destroy(tmp_dnode_info_slab);
|
|
- kmem_cache_destroy(raw_node_ref_slab);
|
|
- kmem_cache_destroy(node_frag_slab);
|
|
- kmem_cache_destroy(inode_cache_slab);
|
|
-#ifdef CONFIG_JFFS2_FS_XATTR
|
|
- kmem_cache_destroy(xattr_datum_cache);
|
|
- kmem_cache_destroy(xattr_ref_cache);
|
|
-#endif
|
|
+ return;
|
|
}
|
|
|
|
+
|
|
struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize)
|
|
{
|
|
struct jffs2_full_dirent *ret;
|
|
- ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
|
|
+ ret = zalloc(sizeof(struct jffs2_full_dirent) + namesize);
|
|
dbg_memalloc("%p\n", ret);
|
|
return ret;
|
|
}
|
|
@@ -127,7 +47,7 @@ void jffs2_free_full_dirent(struct jffs2
|
|
struct jffs2_full_dnode *jffs2_alloc_full_dnode(void)
|
|
{
|
|
struct jffs2_full_dnode *ret;
|
|
- ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
|
|
+ ret = zalloc(sizeof(struct jffs2_full_dnode));
|
|
dbg_memalloc("%p\n", ret);
|
|
return ret;
|
|
}
|
|
@@ -135,13 +55,13 @@ struct jffs2_full_dnode *jffs2_alloc_ful
|
|
void jffs2_free_full_dnode(struct jffs2_full_dnode *x)
|
|
{
|
|
dbg_memalloc("%p\n", x);
|
|
- kmem_cache_free(full_dnode_slab, x);
|
|
+ free(x);
|
|
}
|
|
|
|
struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void)
|
|
{
|
|
struct jffs2_raw_dirent *ret;
|
|
- ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
|
|
+ ret = zalloc(sizeof(struct jffs2_raw_dirent));
|
|
dbg_memalloc("%p\n", ret);
|
|
return ret;
|
|
}
|
|
@@ -149,13 +69,13 @@ struct jffs2_raw_dirent *jffs2_alloc_raw
|
|
void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x)
|
|
{
|
|
dbg_memalloc("%p\n", x);
|
|
- kmem_cache_free(raw_dirent_slab, x);
|
|
+ free(x);
|
|
}
|
|
|
|
struct jffs2_raw_inode *jffs2_alloc_raw_inode(void)
|
|
{
|
|
struct jffs2_raw_inode *ret;
|
|
- ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
|
|
+ ret = zalloc(sizeof(struct jffs2_raw_inode));
|
|
dbg_memalloc("%p\n", ret);
|
|
return ret;
|
|
}
|
|
@@ -163,13 +83,13 @@ struct jffs2_raw_inode *jffs2_alloc_raw_
|
|
void jffs2_free_raw_inode(struct jffs2_raw_inode *x)
|
|
{
|
|
dbg_memalloc("%p\n", x);
|
|
- kmem_cache_free(raw_inode_slab, x);
|
|
+ free(x);
|
|
}
|
|
|
|
struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void)
|
|
{
|
|
struct jffs2_tmp_dnode_info *ret;
|
|
- ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
|
|
+ ret = zalloc(sizeof(struct jffs2_tmp_dnode_info));
|
|
dbg_memalloc("%p\n",
|
|
ret);
|
|
return ret;
|
|
@@ -178,14 +98,14 @@ struct jffs2_tmp_dnode_info *jffs2_alloc
|
|
void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
|
|
{
|
|
dbg_memalloc("%p\n", x);
|
|
- kmem_cache_free(tmp_dnode_info_slab, x);
|
|
+ free(x);
|
|
}
|
|
|
|
static struct jffs2_raw_node_ref *jffs2_alloc_refblock(void)
|
|
{
|
|
struct jffs2_raw_node_ref *ret;
|
|
|
|
- ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
|
|
+ ret = malloc(sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK+1));
|
|
if (ret) {
|
|
int i = 0;
|
|
for (i=0; i < REFS_PER_BLOCK; i++) {
|
|
@@ -242,13 +162,13 @@ int jffs2_prealloc_raw_node_refs(struct
|
|
void jffs2_free_refblock(struct jffs2_raw_node_ref *x)
|
|
{
|
|
dbg_memalloc("%p\n", x);
|
|
- kmem_cache_free(raw_node_ref_slab, x);
|
|
+ free(x);
|
|
}
|
|
|
|
struct jffs2_node_frag *jffs2_alloc_node_frag(void)
|
|
{
|
|
struct jffs2_node_frag *ret;
|
|
- ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
|
|
+ ret = malloc(sizeof(struct jffs2_node_frag));
|
|
dbg_memalloc("%p\n", ret);
|
|
return ret;
|
|
}
|
|
@@ -256,13 +176,14 @@ struct jffs2_node_frag *jffs2_alloc_node
|
|
void jffs2_free_node_frag(struct jffs2_node_frag *x)
|
|
{
|
|
dbg_memalloc("%p\n", x);
|
|
- kmem_cache_free(node_frag_slab, x);
|
|
+ free(x);
|
|
}
|
|
|
|
+
|
|
struct jffs2_inode_cache *jffs2_alloc_inode_cache(void)
|
|
{
|
|
struct jffs2_inode_cache *ret;
|
|
- ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
|
|
+ ret = zalloc(sizeof(struct jffs2_inode_cache));;
|
|
dbg_memalloc("%p\n", ret);
|
|
return ret;
|
|
}
|
|
@@ -270,14 +191,14 @@ struct jffs2_inode_cache *jffs2_alloc_in
|
|
void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
|
|
{
|
|
dbg_memalloc("%p\n", x);
|
|
- kmem_cache_free(inode_cache_slab, x);
|
|
+ kfree(x);
|
|
}
|
|
|
|
#ifdef CONFIG_JFFS2_FS_XATTR
|
|
struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
|
|
{
|
|
struct jffs2_xattr_datum *xd;
|
|
- xd = kmem_cache_zalloc(xattr_datum_cache, GFP_KERNEL);
|
|
+ xd = malloc(sizeof(struct jffs2_xattr_datum));
|
|
dbg_memalloc("%p\n", xd);
|
|
if (!xd)
|
|
return NULL;
|
|
@@ -291,13 +212,13 @@ struct jffs2_xattr_datum *jffs2_alloc_xa
|
|
void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd)
|
|
{
|
|
dbg_memalloc("%p\n", xd);
|
|
- kmem_cache_free(xattr_datum_cache, xd);
|
|
+ kfree(xd);
|
|
}
|
|
|
|
struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
|
|
{
|
|
struct jffs2_xattr_ref *ref;
|
|
- ref = kmem_cache_zalloc(xattr_ref_cache, GFP_KERNEL);
|
|
+ ref = malloc(sizeof(struct jffs2_xattr_ref));
|
|
dbg_memalloc("%p\n", ref);
|
|
if (!ref)
|
|
return NULL;
|
|
@@ -310,6 +231,6 @@ struct jffs2_xattr_ref *jffs2_alloc_xatt
|
|
void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref)
|
|
{
|
|
dbg_memalloc("%p\n", ref);
|
|
- kmem_cache_free(xattr_ref_cache, ref);
|
|
+ kfree(ref);
|
|
}
|
|
#endif
|
|
diff -Nupr old/fs/jffs2/nodelist.c new/fs/jffs2/nodelist.c
|
|
--- old/fs/jffs2/nodelist.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/nodelist.c 2022-05-09 20:37:35.680000000 +0800
|
|
@@ -9,16 +9,15 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/fs.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
#include <linux/rbtree.h>
|
|
-#include <linux/crc32.h>
|
|
#include <linux/pagemap.h>
|
|
+#include <mtd_dev.h>
|
|
#include "nodelist.h"
|
|
+#include "jffs2.h"
|
|
+#include "los_crc32.h"
|
|
|
|
static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c,
|
|
struct jffs2_node_frag *this);
|
|
@@ -30,7 +29,7 @@ void jffs2_add_fd_to_list(struct jffs2_s
|
|
dbg_dentlist("add dirent \"%s\", ino #%u\n", new->name, new->ino);
|
|
|
|
while ((*prev) && (*prev)->nhash <= new->nhash) {
|
|
- if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
|
|
+ if ((*prev)->nhash == new->nhash && !strcmp((const char *)((*prev)->name), (const char *)new->name)) {
|
|
/* Duplicate. Free one */
|
|
if (new->version < (*prev)->version) {
|
|
dbg_dentlist("Eep! Marking new dirent node obsolete, old is \"%s\", ino #%u\n",
|
|
@@ -41,7 +40,7 @@ void jffs2_add_fd_to_list(struct jffs2_s
|
|
dbg_dentlist("marking old dirent \"%s\", ino #%u obsolete\n",
|
|
(*prev)->name, (*prev)->ino);
|
|
new->next = (*prev)->next;
|
|
- /* It may have been a 'placeholder' deletion dirent,
|
|
+ /* It may have been a 'placeholder' deletion dirent,
|
|
if jffs2_can_mark_obsolete() (see jffs2_do_unlink()) */
|
|
if ((*prev)->raw)
|
|
jffs2_mark_node_obsolete(c, ((*prev)->raw));
|
|
@@ -65,13 +64,14 @@ uint32_t jffs2_truncate_fragtree(struct
|
|
/* We know frag->ofs <= size. That's what lookup does for us */
|
|
if (frag && frag->ofs != size) {
|
|
if (frag->ofs+frag->size > size) {
|
|
+ dbg_fragtree("truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size);
|
|
frag->size = size - frag->ofs;
|
|
}
|
|
frag = frag_next(frag);
|
|
}
|
|
while (frag && frag->ofs >= size) {
|
|
struct jffs2_node_frag *next = frag_next(frag);
|
|
-
|
|
+ dbg_fragtree("removing frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size);
|
|
frag_erase(frag, list);
|
|
jffs2_obsolete_node_frag(c, frag);
|
|
frag = next;
|
|
@@ -90,7 +90,7 @@ uint32_t jffs2_truncate_fragtree(struct
|
|
|
|
/* If the last fragment starts at the RAM page boundary, it is
|
|
* REF_PRISTINE irrespective of its size. */
|
|
- if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) {
|
|
+ if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) {
|
|
dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n",
|
|
frag->ofs, frag->ofs + frag->size);
|
|
frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE;
|
|
@@ -237,7 +237,7 @@ static int jffs2_add_frag_to_fragtree(st
|
|
If so, both 'this' and the new node get marked REF_NORMAL so
|
|
the GC can take a look.
|
|
*/
|
|
- if (lastend && (lastend-1) >> PAGE_SHIFT == newfrag->ofs >> PAGE_SHIFT) {
|
|
+ if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) {
|
|
if (this->node)
|
|
mark_ref_normal(this->node->raw);
|
|
mark_ref_normal(newfrag->node->raw);
|
|
@@ -382,7 +382,7 @@ int jffs2_add_full_dnode_to_inode(struct
|
|
|
|
/* If we now share a page with other nodes, mark either previous
|
|
or next node REF_NORMAL, as appropriate. */
|
|
- if (newfrag->ofs & (PAGE_SIZE-1)) {
|
|
+ if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) {
|
|
struct jffs2_node_frag *prev = frag_prev(newfrag);
|
|
|
|
mark_ref_normal(fn->raw);
|
|
@@ -391,7 +391,7 @@ int jffs2_add_full_dnode_to_inode(struct
|
|
mark_ref_normal(prev->node->raw);
|
|
}
|
|
|
|
- if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE-1)) {
|
|
+ if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) {
|
|
struct jffs2_node_frag *next = frag_next(newfrag);
|
|
|
|
if (next) {
|
|
@@ -401,7 +401,7 @@ int jffs2_add_full_dnode_to_inode(struct
|
|
}
|
|
}
|
|
jffs2_dbg_fragtree_paranoia_check_nolock(f);
|
|
-
|
|
+ jffs2_dbg_dump_fragtree_nolock(f);
|
|
return 0;
|
|
}
|
|
|
|
@@ -409,7 +409,6 @@ void jffs2_set_inocache_state(struct jff
|
|
{
|
|
spin_lock(&c->inocache_lock);
|
|
ic->state = state;
|
|
- wake_up(&c->inocache_wq);
|
|
spin_unlock(&c->inocache_lock);
|
|
}
|
|
|
|
@@ -505,8 +504,12 @@ void jffs2_free_raw_node_refs(struct jff
|
|
{
|
|
int i;
|
|
struct jffs2_raw_node_ref *this, *next;
|
|
+ struct super_block *sb = NULL;
|
|
+ struct MtdNorDev *device = NULL;
|
|
+ sb = OFNI_BS_2SFFJ(c);
|
|
+ device = (struct MtdNorDev*)(sb->s_dev);
|
|
|
|
- for (i=0; i<c->nr_blocks; i++) {
|
|
+ for (i=device->blockStart; i<c->nr_blocks+device->blockStart; i++) {
|
|
this = c->blocks[i].first_node;
|
|
while (this) {
|
|
if (this[REFS_PER_BLOCK].flash_offset == REF_LINK_NODE)
|
|
@@ -536,14 +539,22 @@ struct jffs2_node_frag *jffs2_lookup_nod
|
|
while(next) {
|
|
frag = rb_entry(next, struct jffs2_node_frag, rb);
|
|
|
|
+ dbg_fragtree2("considering frag %#04x-%#04x (%p). left %p, right %p\n",
|
|
+ frag->ofs, frag->ofs+frag->size, frag, frag->rb.rb_left, frag->rb.rb_right);
|
|
if (frag->ofs + frag->size <= offset) {
|
|
+ dbg_fragtree2("going right from frag %#04x-%#04x, before the region we care about\n",
|
|
+ frag->ofs, frag->ofs+frag->size);
|
|
/* Remember the closest smaller match on the way down */
|
|
if (!prev || frag->ofs > prev->ofs)
|
|
prev = frag;
|
|
next = frag->rb.rb_right;
|
|
} else if (frag->ofs > offset) {
|
|
+ dbg_fragtree2("going left from frag %#04x-%#04x, after the region we care about\n",
|
|
+ frag->ofs, frag->ofs+frag->size);
|
|
next = frag->rb.rb_left;
|
|
} else {
|
|
+ dbg_fragtree2("returning frag %#04x-%#04x, matched\n",
|
|
+ frag->ofs, frag->ofs+frag->size);
|
|
return frag;
|
|
}
|
|
}
|
|
@@ -564,10 +575,12 @@ struct jffs2_node_frag *jffs2_lookup_nod
|
|
they're killed. */
|
|
void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
|
|
{
|
|
- struct jffs2_node_frag *frag, *next;
|
|
+ struct jffs2_node_frag *frag;
|
|
+ struct rb_node *tn,*next;
|
|
|
|
dbg_fragtree("killing\n");
|
|
- rbtree_postorder_for_each_entry_safe(frag, next, root, rb) {
|
|
+ RB_POSTORDER_FOREACH_SAFE(tn, linux_root, (struct linux_root *)root, next) {
|
|
+ frag = (struct jffs2_node_frag *)tn;
|
|
if (frag->node && !(--frag->node->frags)) {
|
|
/* Not a hole, and it's the final remaining frag
|
|
of this node. Free the node */
|
|
@@ -604,7 +617,7 @@ struct jffs2_raw_node_ref *jffs2_link_no
|
|
ref++;
|
|
}
|
|
|
|
- dbg_noderef("New ref is %p (%08x becomes %08x,%p) len 0x%x\n", ref,
|
|
+ dbg_noderef("New ref is %p (%08x becomes %08x,%p) len 0x%x\n", ref,
|
|
ref->flash_offset, ofs, ref->next_in_ino, len);
|
|
|
|
ref->flash_offset = ofs;
|
|
@@ -617,7 +630,7 @@ struct jffs2_raw_node_ref *jffs2_link_no
|
|
|
|
JFFS2_ERROR("Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n",
|
|
ref, ref_offset(ref), ref_offset(ref)+len,
|
|
- ref_offset(jeb->last_node),
|
|
+ ref_offset(jeb->last_node),
|
|
ref_offset(jeb->last_node)+last_len);
|
|
BUG();
|
|
}
|
|
@@ -734,7 +747,7 @@ uint32_t __jffs2_ref_totlen(struct jffs2
|
|
pr_crit("next %p (0x%08x-0x%08x)\n",
|
|
ref_next(ref), ref_offset(ref_next(ref)),
|
|
ref_offset(ref_next(ref)) + ref->__totlen);
|
|
- } else
|
|
+ } else
|
|
pr_crit("No next ref. jeb->last_node is %p\n",
|
|
jeb->last_node);
|
|
|
|
diff -Nupr old/fs/jffs2/nodelist.h new/fs/jffs2/nodelist.h
|
|
--- old/fs/jffs2/nodelist.h 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/nodelist.h 2022-05-09 20:36:25.460000000 +0800
|
|
@@ -12,20 +12,28 @@
|
|
#ifndef __JFFS2_NODELIST_H__
|
|
#define __JFFS2_NODELIST_H__
|
|
|
|
-#include <linux/fs.h>
|
|
+#include <linux/stat.h>
|
|
#include <linux/types.h>
|
|
-#include <linux/jffs2.h>
|
|
+#include <linux/list.h>
|
|
+#include "jffs2.h"
|
|
#include "jffs2_fs_sb.h"
|
|
#include "jffs2_fs_i.h"
|
|
#include "xattr.h"
|
|
#include "acl.h"
|
|
#include "summary.h"
|
|
-
|
|
-#ifdef __ECOS
|
|
-#include "os-ecos.h"
|
|
-#else
|
|
+#include "vfs_jffs2.h"
|
|
#include "os-linux.h"
|
|
-#endif
|
|
+
|
|
+#ifdef __cplusplus
|
|
+#if __cplusplus
|
|
+extern "C" {
|
|
+#endif /* __cplusplus */
|
|
+#endif /* __cplusplus */
|
|
+
|
|
+struct kvec {
|
|
+ void *iov_base;
|
|
+ long iov_len;
|
|
+};
|
|
|
|
#define JFFS2_NATIVE_ENDIAN
|
|
|
|
@@ -193,6 +201,8 @@ struct jffs2_inode_cache {
|
|
#define INO_STATE_READING 5 /* In read_inode() */
|
|
#define INO_STATE_CLEARING 6 /* In clear_inode() */
|
|
|
|
+#define INOCACHE_HASHSIZE 128
|
|
+
|
|
#define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */
|
|
#define INO_FLAGS_IS_DIR 0x02 /* is a directory */
|
|
|
|
@@ -250,10 +260,7 @@ struct jffs2_readinode_info
|
|
|
|
struct jffs2_full_dirent
|
|
{
|
|
- union {
|
|
- struct jffs2_raw_node_ref *raw;
|
|
- struct jffs2_inode_cache *ic; /* Just during part of build */
|
|
- };
|
|
+ struct jffs2_raw_node_ref *raw;
|
|
struct jffs2_full_dirent *next;
|
|
uint32_t version;
|
|
uint32_t ino; /* == zero for unlink */
|
|
@@ -313,34 +320,26 @@ static inline int jffs2_blocks_use_vmall
|
|
|
|
#define PAD(x) (((x)+3)&~3)
|
|
|
|
-static inline int jffs2_encode_dev(union jffs2_device_node *jdev, dev_t rdev)
|
|
-{
|
|
- if (old_valid_dev(rdev)) {
|
|
- jdev->old_id = cpu_to_je16(old_encode_dev(rdev));
|
|
- return sizeof(jdev->old_id);
|
|
- } else {
|
|
- jdev->new_id = cpu_to_je32(new_encode_dev(rdev));
|
|
- return sizeof(jdev->new_id);
|
|
- }
|
|
-}
|
|
|
|
static inline struct jffs2_node_frag *frag_first(struct rb_root *root)
|
|
{
|
|
- struct rb_node *node = rb_first(root);
|
|
+ struct rb_node *node = root->rb_node;
|
|
|
|
if (!node)
|
|
return NULL;
|
|
-
|
|
+ while(node->rb_left)
|
|
+ node = node->rb_left;
|
|
return rb_entry(node, struct jffs2_node_frag, rb);
|
|
}
|
|
|
|
static inline struct jffs2_node_frag *frag_last(struct rb_root *root)
|
|
{
|
|
- struct rb_node *node = rb_last(root);
|
|
+ struct rb_node *node = root->rb_node;
|
|
|
|
if (!node)
|
|
return NULL;
|
|
-
|
|
+ while(node->rb_right)
|
|
+ node = node->rb_right;
|
|
return rb_entry(node, struct jffs2_node_frag, rb);
|
|
}
|
|
|
|
@@ -404,8 +403,9 @@ struct jffs2_full_dirent *jffs2_write_di
|
|
int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
|
|
struct jffs2_raw_inode *ri, unsigned char *buf,
|
|
uint32_t offset, uint32_t writelen, uint32_t *retlen);
|
|
-int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f,
|
|
- struct jffs2_raw_inode *ri, const struct qstr *qstr);
|
|
+int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
|
|
+ struct jffs2_inode_info *f, struct jffs2_raw_inode *ri,
|
|
+ const char *name, int namelen);
|
|
int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name,
|
|
int namelen, struct jffs2_inode_info *dead_f, uint32_t time);
|
|
int jffs2_do_link(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino,
|
|
@@ -481,4 +481,10 @@ int jffs2_write_nand_cleanmarker(struct
|
|
|
|
#include "debug.h"
|
|
|
|
+#ifdef __cplusplus
|
|
+#if __cplusplus
|
|
+}
|
|
+#endif /* __cplusplus */
|
|
+#endif /* __cplusplus */
|
|
+
|
|
#endif /* __JFFS2_NODELIST_H__ */
|
|
diff -Nupr old/fs/jffs2/nodemgmt.c new/fs/jffs2/nodemgmt.c
|
|
--- old/fs/jffs2/nodemgmt.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/nodemgmt.c 2022-05-09 20:35:50.910000000 +0800
|
|
@@ -9,46 +9,14 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
#include <linux/kernel.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
#include <linux/compiler.h>
|
|
-#include <linux/sched/signal.h>
|
|
+#include <linux/sched.h> /* For cond_resched() */
|
|
+#include <linux/semaphore.h>
|
|
+#include <mtd_dev.h>
|
|
#include "nodelist.h"
|
|
#include "debug.h"
|
|
|
|
-/*
|
|
- * Check whether the user is allowed to write.
|
|
- */
|
|
-static int jffs2_rp_can_write(struct jffs2_sb_info *c)
|
|
-{
|
|
- uint32_t avail;
|
|
- struct jffs2_mount_opts *opts = &c->mount_opts;
|
|
-
|
|
- avail = c->dirty_size + c->free_size + c->unchecked_size +
|
|
- c->erasing_size - c->resv_blocks_write * c->sector_size
|
|
- - c->nospc_dirty_size;
|
|
-
|
|
- if (avail < 2 * opts->rp_size)
|
|
- jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, "
|
|
- "erasing_size %u, unchecked_size %u, "
|
|
- "nr_erasing_blocks %u, avail %u, resrv %u\n",
|
|
- opts->rp_size, c->dirty_size, c->free_size,
|
|
- c->erasing_size, c->unchecked_size,
|
|
- c->nr_erasing_blocks, avail, c->nospc_dirty_size);
|
|
-
|
|
- if (avail > opts->rp_size)
|
|
- return 1;
|
|
-
|
|
- /* Always allow root */
|
|
- if (capable(CAP_SYS_RESOURCE))
|
|
- return 1;
|
|
-
|
|
- jffs2_dbg(1, "forbid writing\n");
|
|
- return 0;
|
|
-}
|
|
-
|
|
/**
|
|
* jffs2_reserve_space - request physical space to write nodes to flash
|
|
* @c: superblock info
|
|
@@ -57,8 +25,8 @@ static int jffs2_rp_can_write(struct jff
|
|
* @prio: Allocation type - ALLOC_{NORMAL,DELETION}
|
|
*
|
|
* Requests a block of physical space on the flash. Returns zero for success
|
|
- * and puts 'len' into the appropriate place, or returns -ENOSPC or other
|
|
- * error if appropriate. Doesn't return len since that's
|
|
+ * and puts 'len' into the appropriate place, or returns -ENOSPC or other
|
|
+ * error if appropriate. Doesn't return len since that's
|
|
*
|
|
* If it returns zero, jffs2_reserve_space() also downs the per-filesystem
|
|
* allocation semaphore, to prevent more than one allocation from being
|
|
@@ -86,15 +54,6 @@ int jffs2_reserve_space(struct jffs2_sb_
|
|
|
|
spin_lock(&c->erase_completion_lock);
|
|
|
|
- /*
|
|
- * Check if the free space is greater then size of the reserved pool.
|
|
- * If not, only allow root to proceed with writing.
|
|
- */
|
|
- if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) {
|
|
- ret = -ENOSPC;
|
|
- goto out;
|
|
- }
|
|
-
|
|
/* this needs a little more thought (true <tglx> :)) */
|
|
while(ret == -EAGAIN) {
|
|
while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
|
|
@@ -165,24 +124,7 @@ int jffs2_reserve_space(struct jffs2_sb_
|
|
spin_unlock(&c->erase_completion_lock);
|
|
|
|
ret = jffs2_garbage_collect_pass(c);
|
|
-
|
|
- if (ret == -EAGAIN) {
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- if (c->nr_erasing_blocks &&
|
|
- list_empty(&c->erase_pending_list) &&
|
|
- list_empty(&c->erase_complete_list)) {
|
|
- DECLARE_WAITQUEUE(wait, current);
|
|
- set_current_state(TASK_UNINTERRUPTIBLE);
|
|
- add_wait_queue(&c->erase_wait, &wait);
|
|
- jffs2_dbg(1, "%s waiting for erase to complete\n",
|
|
- __func__);
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
-
|
|
- schedule();
|
|
- remove_wait_queue(&c->erase_wait, &wait);
|
|
- } else
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
- } else if (ret)
|
|
+ if (ret)
|
|
return ret;
|
|
|
|
cond_resched();
|
|
@@ -200,7 +142,6 @@ int jffs2_reserve_space(struct jffs2_sb_
|
|
}
|
|
}
|
|
|
|
-out:
|
|
spin_unlock(&c->erase_completion_lock);
|
|
if (!ret)
|
|
ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
|
|
@@ -509,7 +450,7 @@ struct jffs2_raw_node_ref *jffs2_add_phy
|
|
jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
|
|
__func__, ofs & ~3, ofs & 3, len);
|
|
#if 1
|
|
- /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
|
|
+ /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
|
|
if c->nextblock is set. Note that wbuf.c will file obsolete nodes
|
|
even after refiling c->nextblock */
|
|
if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
|
|
@@ -584,6 +525,8 @@ void jffs2_mark_node_obsolete(struct jff
|
|
int ret, addedsize;
|
|
size_t retlen;
|
|
uint32_t freed_len;
|
|
+ struct super_block *sb;
|
|
+ struct MtdNorDev *device;
|
|
|
|
if(unlikely(!ref)) {
|
|
pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
|
|
@@ -595,9 +538,10 @@ void jffs2_mark_node_obsolete(struct jff
|
|
return;
|
|
}
|
|
blocknr = ref->flash_offset / c->sector_size;
|
|
- if (blocknr >= c->nr_blocks) {
|
|
- pr_notice("raw node at 0x%08x is off the end of device!\n",
|
|
- ref->flash_offset);
|
|
+ sb = OFNI_BS_2SFFJ(c);
|
|
+ device = (struct MtdNorDev*)(sb->s_dev);
|
|
+ if (blocknr >= c->nr_blocks +device->blockStart) {
|
|
+ pr_notice("raw node at 0x%08x is off the end of device!\n",ref->flash_offset);
|
|
BUG();
|
|
}
|
|
jeb = &c->blocks[blocknr];
|
|
@@ -778,7 +722,7 @@ void jffs2_mark_node_obsolete(struct jff
|
|
}
|
|
/* XXX FIXME: This is ugly now */
|
|
n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
|
|
- ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
|
|
+ ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (const u_char *)&n);
|
|
if (ret) {
|
|
pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
|
|
ref_offset(ref), ret);
|
|
@@ -846,8 +790,8 @@ int jffs2_thread_should_wake(struct jffs
|
|
return 1;
|
|
|
|
if (c->unchecked_size) {
|
|
- jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, check_ino #%d\n",
|
|
- c->unchecked_size, c->check_ino);
|
|
+ jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
|
|
+ c->unchecked_size, c->checked_ino);
|
|
return 1;
|
|
}
|
|
|
|
diff -Nupr old/fs/jffs2/os-linux.h new/fs/jffs2/os-linux.h
|
|
--- old/fs/jffs2/os-linux.h 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/os-linux.h 2022-05-09 20:33:21.200000000 +0800
|
|
@@ -12,59 +12,57 @@
|
|
#ifndef __JFFS2_OS_LINUX_H__
|
|
#define __JFFS2_OS_LINUX_H__
|
|
|
|
+#include <dirent.h>
|
|
+#include "fs/fs.h"
|
|
+#include "jffs2.h"
|
|
+#include "jffs2_fs_sb.h"
|
|
+
|
|
+
|
|
+/* jffs2 debug output opion */
|
|
+#define CONFIG_JFFS2_FS_DEBUG 0 /* 1 or 2 */
|
|
+
|
|
+/* jffs2 gc thread section */
|
|
+#define JFFS2_GC_THREAD_PRIORITY 10 /* GC thread's priority */
|
|
+
|
|
+/* zlib section*/
|
|
+#define CONFIG_JFFS2_ZLIB
|
|
+#define CONFIG_JFFS2_RTIME
|
|
+#define CONFIG_JFFS2_RUBIN
|
|
+
|
|
/* JFFS2 uses Linux mode bits natively -- no need for conversion */
|
|
#define os_to_jffs2_mode(x) (x)
|
|
#define jffs2_to_os_mode(x) (x)
|
|
|
|
+#ifndef BUG_ON
|
|
+#define BUG_ON(x) do {if (unlikely(x)) BUG();} while (0)
|
|
+#endif
|
|
+
|
|
struct kstatfs;
|
|
struct kvec;
|
|
|
|
-#define JFFS2_INODE_INFO(i) (container_of(i, struct jffs2_inode_info, vfs_inode))
|
|
-#define OFNI_EDONI_2SFFJ(f) (&(f)->vfs_inode)
|
|
-#define JFFS2_SB_INFO(sb) (sb->s_fs_info)
|
|
-#define OFNI_BS_2SFFJ(c) ((struct super_block *)c->os_priv)
|
|
|
|
+#define JFFS2_INODE_INFO(i) (&(i)->jffs2_i)
|
|
+#define OFNI_EDONI_2SFFJ(f) \
|
|
+ ((struct jffs2_inode *) (((char *)f) - ((char *)(&((struct jffs2_inode *)NULL)->jffs2_i))))
|
|
|
|
#define JFFS2_F_I_SIZE(f) (OFNI_EDONI_2SFFJ(f)->i_size)
|
|
#define JFFS2_F_I_MODE(f) (OFNI_EDONI_2SFFJ(f)->i_mode)
|
|
-#define JFFS2_F_I_UID(f) (i_uid_read(OFNI_EDONI_2SFFJ(f)))
|
|
-#define JFFS2_F_I_GID(f) (i_gid_read(OFNI_EDONI_2SFFJ(f)))
|
|
-#define JFFS2_F_I_RDEV(f) (OFNI_EDONI_2SFFJ(f)->i_rdev)
|
|
-
|
|
-#define JFFS2_CLAMP_TIME(t) ((uint32_t)clamp_t(time64_t, (t), 0, U32_MAX))
|
|
-#define ITIME(sec) ((struct timespec64){sec, 0})
|
|
-#define JFFS2_NOW() JFFS2_CLAMP_TIME(ktime_get_real_seconds())
|
|
-#define I_SEC(tv) JFFS2_CLAMP_TIME((tv).tv_sec)
|
|
-#define JFFS2_F_I_CTIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_ctime)
|
|
-#define JFFS2_F_I_MTIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_mtime)
|
|
-#define JFFS2_F_I_ATIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_atime)
|
|
-#define sleep_on_spinunlock(wq, s) \
|
|
- do { \
|
|
- DECLARE_WAITQUEUE(__wait, current); \
|
|
- add_wait_queue((wq), &__wait); \
|
|
- set_current_state(TASK_UNINTERRUPTIBLE); \
|
|
- spin_unlock(s); \
|
|
- schedule(); \
|
|
- remove_wait_queue((wq), &__wait); \
|
|
- } while(0)
|
|
-
|
|
-static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
|
|
-{
|
|
- f->highest_version = 0;
|
|
- f->fragtree = RB_ROOT;
|
|
- f->metadata = NULL;
|
|
- f->dents = NULL;
|
|
- f->target = NULL;
|
|
- f->flags = 0;
|
|
- f->usercompr = 0;
|
|
-}
|
|
+#define JFFS2_F_I_UID(f) (OFNI_EDONI_2SFFJ(f)->i_uid)
|
|
+#define JFFS2_F_I_GID(f) (OFNI_EDONI_2SFFJ(f)->i_gid)
|
|
+#define JFFS2_F_I_CTIME(f) (OFNI_EDONI_2SFFJ(f)->i_ctime)
|
|
+#define JFFS2_F_I_MTIME(f) (OFNI_EDONI_2SFFJ(f)->i_mtime)
|
|
+#define JFFS2_F_I_ATIME(f) (OFNI_EDONI_2SFFJ(f)->i_atime)
|
|
+
|
|
+#define ITIME(sec) ((struct timespec){sec, 0})
|
|
+#define I_SEC(tv) ((tv).tv_sec)
|
|
|
|
+#define sleep_on_spinunlock(wq, sl) do {spin_unlock(sl); msleep(100);} while (0)
|
|
|
|
-#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & SB_RDONLY)
|
|
+#define jffs2_is_readonly(c) (0)
|
|
|
|
#define SECTOR_ADDR(x) ( (((unsigned long)(x) / c->sector_size) * c->sector_size) )
|
|
-#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
|
|
|
|
+#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
|
|
|
|
#ifdef CONFIG_JFFS2_SUMMARY
|
|
#define jffs2_can_mark_obsolete(c) (0)
|
|
@@ -77,10 +75,10 @@ static inline void jffs2_init_inode_info
|
|
#define jffs2_write_nand_cleanmarker(c,jeb) (-EIO)
|
|
|
|
#define jffs2_flash_write(c, ofs, len, retlen, buf) jffs2_flash_direct_write(c, ofs, len, retlen, buf)
|
|
-#define jffs2_flash_read(c, ofs, len, retlen, buf) (mtd_read((c)->mtd, ofs, len, retlen, buf))
|
|
-#define jffs2_flush_wbuf_pad(c) ({ do{} while(0); (void)(c), 0; })
|
|
+#define jffs2_flash_read(c, ofs, len, retlen, buf) jffs2_flash_direct_read(c, ofs, len, retlen, buf)
|
|
+#define jffs2_flush_wbuf_pad(c) (c=c)
|
|
#define jffs2_flush_wbuf_gc(c, i) ({ do{} while(0); (void)(c), (void) i, 0; })
|
|
-#define jffs2_write_nand_badblock(c,jeb,bad_offset) (1)
|
|
+#define jffs2_write_nand_badblock(c,jeb,p) (0)
|
|
#define jffs2_nand_flash_setup(c) (0)
|
|
#define jffs2_nand_flash_cleanup(c) do {} while(0)
|
|
#define jffs2_wbuf_dirty(c) (0)
|
|
@@ -100,7 +98,8 @@ static inline void jffs2_init_inode_info
|
|
|
|
#else /* NAND and/or ECC'd NOR support present */
|
|
|
|
-#define jffs2_is_writebuffered(c) (c->wbuf != NULL)
|
|
+/* current not support */
|
|
+#define jffs2_is_writebuffered(c) (0)
|
|
|
|
#ifdef CONFIG_JFFS2_SUMMARY
|
|
#define jffs2_can_mark_obsolete(c) (0)
|
|
@@ -142,38 +141,28 @@ void jffs2_dirty_trigger(struct jffs2_sb
|
|
#endif /* WRITEBUFFER */
|
|
|
|
/* background.c */
|
|
-int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c);
|
|
+void jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c);
|
|
void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c);
|
|
void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c);
|
|
|
|
/* dir.c */
|
|
-extern const struct file_operations jffs2_dir_operations;
|
|
-extern const struct inode_operations jffs2_dir_inode_operations;
|
|
-
|
|
-/* file.c */
|
|
-extern const struct file_operations jffs2_file_operations;
|
|
-extern const struct inode_operations jffs2_file_inode_operations;
|
|
-extern const struct address_space_operations jffs2_file_address_operations;
|
|
-int jffs2_fsync(struct file *, loff_t, loff_t, int);
|
|
-int jffs2_do_readpage_unlock(void *data, struct page *pg);
|
|
-
|
|
-/* ioctl.c */
|
|
-long jffs2_ioctl(struct file *, unsigned int, unsigned long);
|
|
-
|
|
-/* symlink.c */
|
|
-extern const struct inode_operations jffs2_symlink_inode_operations;
|
|
+struct jffs2_inode *jffs2_lookup(struct jffs2_inode *dir_i, const unsigned char *name, int namelen);
|
|
+int jffs2_create(struct jffs2_inode *dir_i, const unsigned char *d_name, int mode, struct jffs2_inode **new_i);
|
|
+int jffs2_mkdir (struct jffs2_inode *dir_i, const unsigned char *d_name, int mode, struct jffs2_inode **new_i);
|
|
+int jffs2_link (struct jffs2_inode *old_d_inode, struct jffs2_inode *dir_i, const unsigned char *d_name);
|
|
+int jffs2_symlink(struct jffs2_inode *dir_i, struct jffs2_inode **d_inode, const unsigned char *d_name, const char *target);
|
|
+int jffs2_unlink(struct jffs2_inode *dir_i, struct jffs2_inode *d_inode, const unsigned char *d_name);
|
|
+int jffs2_rmdir (struct jffs2_inode *dir_i, struct jffs2_inode *d_inode, const unsigned char *d_name);
|
|
+int jffs2_rename (struct jffs2_inode *old_dir_i, struct jffs2_inode *d_inode, const unsigned char *old_d_name,
|
|
+ struct jffs2_inode *new_dir_i, const unsigned char *new_d_name);
|
|
+int jffs2_readdir(struct jffs2_inode *inode, off_t *offset, off_t *int_off, struct dirent *ent);
|
|
|
|
/* fs.c */
|
|
-int jffs2_setattr (struct dentry *, struct iattr *);
|
|
-int jffs2_do_setattr (struct inode *, struct iattr *);
|
|
-struct inode *jffs2_iget(struct super_block *, unsigned long);
|
|
-void jffs2_evict_inode (struct inode *);
|
|
-void jffs2_dirty_inode(struct inode *inode, int flags);
|
|
-struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode,
|
|
- struct jffs2_raw_inode *ri);
|
|
-int jffs2_statfs (struct dentry *, struct kstatfs *);
|
|
-int jffs2_do_remount_fs(struct super_block *sb, struct fs_context *fc);
|
|
-int jffs2_do_fill_super(struct super_block *sb, struct fs_context *fc);
|
|
+int jffs2_setattr (struct jffs2_inode *inode, struct IATTR *attr);
|
|
+struct jffs2_inode *jffs2_iget(struct super_block *sb, uint32_t ino);
|
|
+int jffs2_iput(struct jffs2_inode * i);
|
|
+struct jffs2_inode *jffs2_new_inode (struct jffs2_inode *dir_i, int mode, struct jffs2_raw_inode *ri);
|
|
+
|
|
void jffs2_gc_release_inode(struct jffs2_sb_info *c,
|
|
struct jffs2_inode_info *f);
|
|
struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
|
|
@@ -183,15 +172,25 @@ unsigned char *jffs2_gc_fetch_page(struc
|
|
struct jffs2_inode_info *f,
|
|
unsigned long offset,
|
|
unsigned long *priv);
|
|
+void jffs2_gc_release_page(struct jffs2_sb_info *c,
|
|
+ unsigned char *pg,
|
|
+ unsigned long *priv);
|
|
void jffs2_flash_cleanup(struct jffs2_sb_info *c);
|
|
|
|
+int calculate_inocache_hashsize(uint32_t flash_size);
|
|
|
|
/* writev.c */
|
|
int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
|
|
unsigned long count, loff_t to, size_t *retlen);
|
|
int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
|
|
size_t *retlen, const u_char *buf);
|
|
+int jffs2_flash_direct_read(struct jffs2_sb_info *c, loff_t ofs, size_t len,
|
|
+ size_t *retlen, const char *buf);
|
|
|
|
-#endif /* __JFFS2_OS_LINUX_H__ */
|
|
+/* super.c */
|
|
+int jffs2_fill_super(struct super_block *sb);
|
|
+int jffs2_mount(int part_no, struct jffs2_inode **root_node, unsigned long mountflags);
|
|
+int jffs2_umount(struct jffs2_inode *root_node);
|
|
|
|
+#endif /* __JFFS2_OS_LINUX_H__ */
|
|
|
|
diff -Nupr old/fs/jffs2/read.c new/fs/jffs2/read.c
|
|
--- old/fs/jffs2/read.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/read.c 2022-05-09 20:27:15.580000000 +0800
|
|
@@ -9,16 +9,15 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
-#include <linux/crc32.h>
|
|
#include <linux/pagemap.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
#include <linux/compiler.h>
|
|
+#include <mtd_dev.h>
|
|
#include "nodelist.h"
|
|
#include "compr.h"
|
|
+#include "los_crc32.h"
|
|
+#include "user_copy.h"
|
|
|
|
int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
|
|
struct jffs2_full_dnode *fd, unsigned char *buf,
|
|
@@ -57,6 +56,7 @@ int jffs2_read_dnode(struct jffs2_sb_inf
|
|
if (crc != je32_to_cpu(ri->node_crc)) {
|
|
pr_warn("Node CRC %08x != calculated CRC %08x for node at %08x\n",
|
|
je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw));
|
|
+ jffs2_dbg_dump_node(c, ref_offset(fd->raw));
|
|
ret = -EIO;
|
|
goto out_ri;
|
|
}
|
|
@@ -75,9 +75,8 @@ int jffs2_read_dnode(struct jffs2_sb_inf
|
|
goto out_ri;
|
|
});
|
|
|
|
-
|
|
if (ri->compr == JFFS2_COMPR_ZERO) {
|
|
- memset(buf, 0, len);
|
|
+ ret = LOS_UserMemClear(buf, len);
|
|
goto out_ri;
|
|
}
|
|
|
|
@@ -88,7 +87,11 @@ int jffs2_read_dnode(struct jffs2_sb_inf
|
|
Reading partial node and it's compressed - read into readbuf, check checksum, decompress to decomprbuf and copy
|
|
*/
|
|
if (ri->compr == JFFS2_COMPR_NONE && len == je32_to_cpu(ri->dsize)) {
|
|
- readbuf = buf;
|
|
+ readbuf = kmalloc(je32_to_cpu(ri->dsize), GFP_KERNEL);
|
|
+ if (!readbuf) {
|
|
+ ret = -ENOMEM;
|
|
+ goto out_ri;
|
|
+ }
|
|
} else {
|
|
readbuf = kmalloc(je32_to_cpu(ri->csize), GFP_KERNEL);
|
|
if (!readbuf) {
|
|
@@ -97,14 +100,10 @@ int jffs2_read_dnode(struct jffs2_sb_inf
|
|
}
|
|
}
|
|
if (ri->compr != JFFS2_COMPR_NONE) {
|
|
- if (len < je32_to_cpu(ri->dsize)) {
|
|
- decomprbuf = kmalloc(je32_to_cpu(ri->dsize), GFP_KERNEL);
|
|
- if (!decomprbuf) {
|
|
- ret = -ENOMEM;
|
|
- goto out_readbuf;
|
|
- }
|
|
- } else {
|
|
- decomprbuf = buf;
|
|
+ decomprbuf = kmalloc(je32_to_cpu(ri->dsize), GFP_KERNEL);
|
|
+ if (!decomprbuf) {
|
|
+ ret = -ENOMEM;
|
|
+ goto out_readbuf;
|
|
}
|
|
} else {
|
|
decomprbuf = readbuf;
|
|
@@ -113,7 +112,7 @@ int jffs2_read_dnode(struct jffs2_sb_inf
|
|
jffs2_dbg(2, "Read %d bytes to %p\n", je32_to_cpu(ri->csize),
|
|
readbuf);
|
|
ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri),
|
|
- je32_to_cpu(ri->csize), &readlen, readbuf);
|
|
+ je32_to_cpu(ri->csize), &readlen, (char *)readbuf);
|
|
|
|
if (!ret && readlen != je32_to_cpu(ri->csize))
|
|
ret = -EIO;
|
|
@@ -124,6 +123,7 @@ int jffs2_read_dnode(struct jffs2_sb_inf
|
|
if (crc != je32_to_cpu(ri->data_crc)) {
|
|
pr_warn("Data CRC %08x != calculated CRC %08x for node at %08x\n",
|
|
je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw));
|
|
+ jffs2_dbg_dump_node(c, ref_offset(fd->raw));
|
|
ret = -EIO;
|
|
goto out_decomprbuf;
|
|
}
|
|
@@ -139,8 +139,8 @@ int jffs2_read_dnode(struct jffs2_sb_inf
|
|
}
|
|
}
|
|
|
|
- if (len < je32_to_cpu(ri->dsize)) {
|
|
- memcpy(buf, decomprbuf+ofs, len);
|
|
+ if (LOS_CopyFromKernel(buf, len, decomprbuf + ofs, len) != 0) {
|
|
+ ret = -EFAULT;
|
|
}
|
|
out_decomprbuf:
|
|
if(decomprbuf != buf && decomprbuf != readbuf)
|
|
@@ -184,7 +184,10 @@ int jffs2_read_inode_range(struct jffs2_
|
|
}
|
|
jffs2_dbg(1, "Filling non-frag hole from %d-%d\n",
|
|
offset, offset + holesize);
|
|
- memset(buf, 0, holesize);
|
|
+ ret = LOS_UserMemClear(buf, holesize);
|
|
+ if (ret != 0) {
|
|
+ return ret;
|
|
+ }
|
|
buf += holesize;
|
|
offset += holesize;
|
|
continue;
|
|
@@ -193,7 +196,10 @@ int jffs2_read_inode_range(struct jffs2_
|
|
jffs2_dbg(1, "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n",
|
|
offset, holeend, frag->ofs,
|
|
frag->ofs + frag->size);
|
|
- memset(buf, 0, holeend - offset);
|
|
+ ret = LOS_UserMemClear(buf, holeend - offset);
|
|
+ if (ret != 0) {
|
|
+ return ret;
|
|
+ }
|
|
buf += holeend - offset;
|
|
offset = holeend;
|
|
frag = frag_next(frag);
|
|
@@ -214,7 +220,7 @@ int jffs2_read_inode_range(struct jffs2_
|
|
if (ret) {
|
|
jffs2_dbg(1, "%s(): error %d\n",
|
|
__func__, ret);
|
|
- memset(buf, 0, readlen);
|
|
+ (void)LOS_UserMemClear(buf, readlen);
|
|
return ret;
|
|
}
|
|
buf += readlen;
|
|
@@ -226,3 +232,15 @@ int jffs2_read_inode_range(struct jffs2_
|
|
return 0;
|
|
}
|
|
|
|
+int jffs2_flash_direct_read(struct jffs2_sb_info *c, loff_t ofs, size_t len,
|
|
+ size_t *retlen, const char *buf)
|
|
+{
|
|
+ int ret;
|
|
+ ret = c->mtd->read(c->mtd, ofs, len, (char *)buf);
|
|
+ if (ret >= 0) {
|
|
+ *retlen = ret;
|
|
+ return 0;
|
|
+ }
|
|
+ *retlen = 0;
|
|
+ return ret;
|
|
+}
|
|
\ No newline at end of file
|
|
diff -Nupr old/fs/jffs2/readinode.c new/fs/jffs2/readinode.c
|
|
--- old/fs/jffs2/readinode.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/readinode.c 2022-05-09 20:26:31.030000000 +0800
|
|
@@ -9,17 +9,18 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/fs.h>
|
|
-#include <linux/crc32.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/semaphore.h>
|
|
#include <linux/pagemap.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
#include <linux/compiler.h>
|
|
+#include <mtd_dev.h>
|
|
#include "nodelist.h"
|
|
+#include "os-linux.h"
|
|
+#include "los_crc32.h"
|
|
|
|
/*
|
|
* Check the data CRC of the node.
|
|
@@ -31,9 +32,9 @@
|
|
static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
|
|
{
|
|
struct jffs2_raw_node_ref *ref = tn->fn->raw;
|
|
- int err = 0, pointed = 0;
|
|
+ int err = 0;
|
|
struct jffs2_eraseblock *jeb;
|
|
- unsigned char *buffer;
|
|
+ unsigned char *buffer = NULL;
|
|
uint32_t crc, ofs, len;
|
|
size_t retlen;
|
|
|
|
@@ -61,48 +62,28 @@ static int check_node_data(struct jffs2_
|
|
dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
|
|
ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
|
|
|
|
-#ifndef __ECOS
|
|
- /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(),
|
|
- * adding and jffs2_flash_read_end() interface. */
|
|
- err = mtd_point(c->mtd, ofs, len, &retlen, (void **)&buffer, NULL);
|
|
- if (!err && retlen < len) {
|
|
- JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
|
|
- mtd_unpoint(c->mtd, ofs, retlen);
|
|
- } else if (err) {
|
|
- if (err != -EOPNOTSUPP)
|
|
- JFFS2_WARNING("MTD point failed: error code %d.\n", err);
|
|
- } else
|
|
- pointed = 1; /* succefully pointed to device */
|
|
-#endif
|
|
-
|
|
- if (!pointed) {
|
|
- buffer = kmalloc(len, GFP_KERNEL);
|
|
- if (unlikely(!buffer))
|
|
- return -ENOMEM;
|
|
+ buffer = kmalloc(len, GFP_KERNEL);
|
|
+ if (unlikely(!buffer))
|
|
+ return -ENOMEM;
|
|
|
|
- /* TODO: this is very frequent pattern, make it a separate
|
|
- * routine */
|
|
- err = jffs2_flash_read(c, ofs, len, &retlen, buffer);
|
|
- if (err) {
|
|
- JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err);
|
|
- goto free_out;
|
|
- }
|
|
+ /* TODO: this is very frequent pattern, make it a separate
|
|
+ * routine */
|
|
+ err = jffs2_flash_read(c, ofs, len, &retlen, (char *)buffer);
|
|
+ if (err) {
|
|
+ JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err);
|
|
+ goto free_out;
|
|
+ }
|
|
|
|
- if (retlen != len) {
|
|
- JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len);
|
|
- err = -EIO;
|
|
- goto free_out;
|
|
- }
|
|
+ if (retlen != len) {
|
|
+ JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len);
|
|
+ err = -EIO;
|
|
+ goto free_out;
|
|
}
|
|
|
|
/* Continue calculating CRC */
|
|
crc = crc32(tn->partial_crc, buffer, len);
|
|
- if(!pointed)
|
|
- kfree(buffer);
|
|
-#ifndef __ECOS
|
|
- else
|
|
- mtd_unpoint(c->mtd, ofs, len);
|
|
-#endif
|
|
+
|
|
+ kfree(buffer);
|
|
|
|
if (crc != tn->data_crc) {
|
|
JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
|
|
@@ -133,12 +114,7 @@ adj_acc:
|
|
return 0;
|
|
|
|
free_out:
|
|
- if(!pointed)
|
|
- kfree(buffer);
|
|
-#ifndef __ECOS
|
|
- else
|
|
- mtd_unpoint(c->mtd, ofs, len);
|
|
-#endif
|
|
+ kfree(buffer);
|
|
return err;
|
|
}
|
|
|
|
@@ -415,8 +391,12 @@ static void eat_last(struct rb_root *roo
|
|
link = &parent->rb_right;
|
|
|
|
*link = node->rb_left;
|
|
- if (node->rb_left)
|
|
- node->rb_left->__rb_parent_color = node->__rb_parent_color;
|
|
+ if (node->rb_left) {
|
|
+ node->rb_left->rb_parent_color = node->rb_parent_color;
|
|
+ // set child parent only
|
|
+ rb_parent(node->rb_left) = parent;
|
|
+ node->rb_left = NULL;
|
|
+ }
|
|
}
|
|
|
|
/* We put the version tree in reverse order, so we can use the same eat_last()
|
|
@@ -464,8 +444,8 @@ static int jffs2_build_inode_fragtree(st
|
|
#ifdef JFFS2_DBG_READINODE_MESSAGES
|
|
this = tn_last(&rii->tn_root);
|
|
while (this) {
|
|
- dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs,
|
|
- this->fn->ofs+this->fn->size, this->overlapped);
|
|
+ dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d,left %p,right %p ,parent %p\n", this, this->version, this->fn->ofs,
|
|
+ this->fn->ofs+this->fn->size, this->overlapped,this->rb.rb_left,this->rb.rb_right,rb_parent(&(this->rb)));
|
|
this = tn_prev(this);
|
|
}
|
|
#endif
|
|
@@ -543,11 +523,13 @@ static int jffs2_build_inode_fragtree(st
|
|
|
|
static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
|
|
{
|
|
- struct jffs2_tmp_dnode_info *tn, *next;
|
|
+ struct jffs2_tmp_dnode_info *tn;
|
|
+ struct rb_node *rbn,*next;
|
|
|
|
- rbtree_postorder_for_each_entry_safe(tn, next, list, rb) {
|
|
- jffs2_free_full_dnode(tn->fn);
|
|
- jffs2_free_tmp_dnode_info(tn);
|
|
+ RB_POSTORDER_FOREACH_SAFE(rbn, linux_root, (struct linux_root *)list, next) {
|
|
+ tn = (struct jffs2_tmp_dnode_info *)rbn;
|
|
+ jffs2_free_full_dnode(tn->fn);
|
|
+ jffs2_free_tmp_dnode_info(tn);
|
|
}
|
|
|
|
*list = RB_ROOT;
|
|
@@ -659,7 +641,7 @@ static inline int read_direntry(struct j
|
|
int already = read - sizeof(*rd);
|
|
|
|
err = jffs2_flash_read(c, (ref_offset(ref)) + read,
|
|
- rd->nsize - already, &read, &fd->name[already]);
|
|
+ rd->nsize - already, &read, (char *)&fd->name[already]);
|
|
if (unlikely(read != rd->nsize - already) && likely(!err)) {
|
|
jffs2_free_full_dirent(fd);
|
|
JFFS2_ERROR("short read: wanted %d bytes, got %zd\n",
|
|
@@ -690,7 +672,7 @@ static inline int read_direntry(struct j
|
|
#endif
|
|
}
|
|
|
|
- fd->nhash = full_name_hash(NULL, fd->name, rd->nsize);
|
|
+ fd->nhash = full_name_hash(fd->name, rd->nsize);
|
|
fd->next = NULL;
|
|
fd->name[rd->nsize] = '\0';
|
|
|
|
@@ -956,7 +938,7 @@ static int read_more(struct jffs2_sb_inf
|
|
|
|
dbg_readinode("read more %d bytes\n", to_read);
|
|
|
|
- err = jffs2_flash_read(c, offs, to_read, &retlen, buf + *rdlen);
|
|
+ err = jffs2_flash_read(c, offs, to_read, &retlen, (char *)(buf + *rdlen));
|
|
if (err) {
|
|
JFFS2_ERROR("can not read %d bytes from 0x%08x, "
|
|
"error code: %d.\n", to_read, offs, err);
|
|
@@ -1042,7 +1024,7 @@ static int jffs2_get_inode_nodes(struct
|
|
dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
|
|
|
|
/* FIXME: point() */
|
|
- err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf);
|
|
+ err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, (char *)buf);
|
|
if (err) {
|
|
JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ref_offset(ref), err);
|
|
goto free_out;
|
|
@@ -1079,6 +1061,7 @@ static int jffs2_get_inode_nodes(struct
|
|
|
|
case JFFS2_NODETYPE_DIRENT:
|
|
|
|
+ dbg_readinode("node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref));
|
|
if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent) &&
|
|
len < sizeof(struct jffs2_raw_dirent)) {
|
|
err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf);
|
|
@@ -1094,6 +1077,7 @@ static int jffs2_get_inode_nodes(struct
|
|
|
|
case JFFS2_NODETYPE_INODE:
|
|
|
|
+ dbg_readinode("node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref));
|
|
if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode) &&
|
|
len < sizeof(struct jffs2_raw_inode)) {
|
|
err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf);
|
|
@@ -1289,7 +1273,7 @@ static int jffs2_do_read_inode_internal(
|
|
dbg_readinode("symlink's target '%s' cached\n", f->target);
|
|
}
|
|
|
|
- fallthrough;
|
|
+ /* fall through... */
|
|
|
|
case S_IFBLK:
|
|
case S_IFCHR:
|
|
@@ -1315,7 +1299,7 @@ static int jffs2_do_read_inode_internal(
|
|
/* OK. We're happy */
|
|
f->metadata = frag_first(&f->fragtree)->node;
|
|
jffs2_free_node_frag(frag_first(&f->fragtree));
|
|
- f->fragtree = RB_ROOT;
|
|
+ f->fragtree.rb_node = NULL;
|
|
break;
|
|
}
|
|
if (f->inocache->state == INO_STATE_READING)
|
|
@@ -1362,6 +1346,7 @@ int jffs2_do_read_inode(struct jffs2_sb_
|
|
break;
|
|
|
|
default:
|
|
+ JFFS2_ERROR("Unknown f->inocache->state %d!\n", f->inocache->state);
|
|
BUG();
|
|
}
|
|
}
|
|
@@ -1375,14 +1360,13 @@ int jffs2_do_read_inode(struct jffs2_sb_
|
|
return -ENOMEM;
|
|
}
|
|
dbg_readinode("creating inocache for root inode\n");
|
|
- memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
|
|
f->inocache->ino = f->inocache->pino_nlink = 1;
|
|
f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
|
|
f->inocache->state = INO_STATE_READING;
|
|
jffs2_add_ino_cache(c, f->inocache);
|
|
}
|
|
if (!f->inocache) {
|
|
- JFFS2_ERROR("requested to read a nonexistent ino %u\n", ino);
|
|
+ JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
|
|
return -ENOENT;
|
|
}
|
|
|
|
@@ -1430,6 +1414,11 @@ void jffs2_do_clear_inode(struct jffs2_s
|
|
|
|
jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
|
|
|
|
+ if (f->target) {
|
|
+ kfree(f->target);
|
|
+ f->target = NULL;
|
|
+ }
|
|
+
|
|
fds = f->dents;
|
|
while(fds) {
|
|
fd = fds;
|
|
diff -Nupr old/fs/jffs2/scan.c new/fs/jffs2/scan.c
|
|
--- old/fs/jffs2/scan.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/scan.c 2022-05-09 20:23:02.230000000 +0800
|
|
@@ -9,18 +9,17 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
#include <linux/pagemap.h>
|
|
-#include <linux/crc32.h>
|
|
#include <linux/compiler.h>
|
|
#include "nodelist.h"
|
|
#include "summary.h"
|
|
#include "debug.h"
|
|
+#include "mtd_dev.h"
|
|
+#include "los_typedef.h"
|
|
+#include "los_crc32.h"
|
|
|
|
#define DEFAULT_EMPTY_SCAN_SIZE 256
|
|
|
|
@@ -74,7 +73,7 @@ static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
|
|
return ret;
|
|
if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size)))
|
|
return ret;
|
|
- /* Turned wasted size into dirty, since we apparently
|
|
+ /* Turned wasted size into dirty, since we apparently
|
|
think it's recoverable now. */
|
|
jeb->dirty_size += jeb->wasted_size;
|
|
c->dirty_size += jeb->wasted_size;
|
|
@@ -95,40 +94,26 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
|
|
unsigned char *flashbuf = NULL;
|
|
uint32_t buf_size = 0;
|
|
struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
|
|
-#ifndef __ECOS
|
|
- size_t pointlen, try_size;
|
|
-
|
|
- ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen,
|
|
- (void **)&flashbuf, NULL);
|
|
- if (!ret && pointlen < c->mtd->size) {
|
|
- /* Don't muck about if it won't let us point to the whole flash */
|
|
- jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
|
|
- pointlen);
|
|
- mtd_unpoint(c->mtd, 0, pointlen);
|
|
- flashbuf = NULL;
|
|
- }
|
|
- if (ret && ret != -EOPNOTSUPP)
|
|
- jffs2_dbg(1, "MTD point failed %d\n", ret);
|
|
-#endif
|
|
+ struct super_block *sb = NULL;
|
|
+ struct MtdNorDev *device = NULL;
|
|
+
|
|
if (!flashbuf) {
|
|
/* For NAND it's quicker to read a whole eraseblock at a time,
|
|
apparently */
|
|
if (jffs2_cleanmarker_oob(c))
|
|
- try_size = c->sector_size;
|
|
+ buf_size = c->sector_size;
|
|
else
|
|
- try_size = PAGE_SIZE;
|
|
+ buf_size = PAGE_SIZE;
|
|
|
|
jffs2_dbg(1, "Trying to allocate readbuf of %zu "
|
|
- "bytes\n", try_size);
|
|
+ "bytes\n", buf_size);
|
|
|
|
- flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size);
|
|
+ flashbuf = kmalloc(buf_size, GFP_KERNEL);
|
|
if (!flashbuf)
|
|
return -ENOMEM;
|
|
|
|
jffs2_dbg(1, "Allocated readbuf of %zu bytes\n",
|
|
- try_size);
|
|
-
|
|
- buf_size = (uint32_t)try_size;
|
|
+ buf_size);
|
|
}
|
|
|
|
if (jffs2_sum_active()) {
|
|
@@ -140,7 +125,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
|
|
}
|
|
}
|
|
|
|
- for (i=0; i<c->nr_blocks; i++) {
|
|
+ sb = OFNI_BS_2SFFJ(c);
|
|
+ device = (struct MtdNorDev*)(sb->s_dev);
|
|
+ for (i=device->blockStart; i<c->nr_blocks + device->blockStart; i++) {
|
|
struct jffs2_eraseblock *jeb = &c->blocks[i];
|
|
|
|
cond_resched();
|
|
@@ -269,14 +256,10 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
|
|
ret = -EIO;
|
|
goto out;
|
|
}
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- jffs2_garbage_collect_trigger(c);
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
}
|
|
ret = 0;
|
|
out:
|
|
- jffs2_sum_reset_collected(s);
|
|
- kfree(s);
|
|
+ kfree(flashbuf);
|
|
out_buf:
|
|
if (buf_size)
|
|
kfree(flashbuf);
|
|
@@ -413,7 +396,7 @@ static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock
|
|
if (!ref)
|
|
return -ENOMEM;
|
|
|
|
- /* BEFORE jffs2_build_xattr_subsystem() called,
|
|
+ /* BEFORE jffs2_build_xattr_subsystem() called,
|
|
* and AFTER xattr_ref is marked as a dead xref,
|
|
* ref->xid is used to store 32bit xid, xd is not used
|
|
* ref->ino is used to store 32bit inode-number, ic is not used
|
|
@@ -486,10 +469,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
struct jffs2_sum_marker *sm;
|
|
void *sumptr = NULL;
|
|
uint32_t sumlen;
|
|
-
|
|
+
|
|
if (!buf_size) {
|
|
/* XIP case. Just look, point at the summary if it's there */
|
|
- sm = (void *)buf + c->sector_size - sizeof(*sm);
|
|
+ sm = (struct jffs2_sum_marker *)((uint8_t *)buf + c->sector_size - sizeof(*sm));
|
|
if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
|
|
sumptr = buf + je32_to_cpu(sm->offset);
|
|
sumlen = c->sector_size - je32_to_cpu(sm->offset);
|
|
@@ -502,13 +485,13 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
buf_len = sizeof(*sm);
|
|
|
|
/* Read as much as we want into the _end_ of the preallocated buffer */
|
|
- err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
|
|
+ err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
|
|
jeb->offset + c->sector_size - buf_len,
|
|
- buf_len);
|
|
+ buf_len);
|
|
if (err)
|
|
return err;
|
|
|
|
- sm = (void *)buf + buf_size - sizeof(*sm);
|
|
+ sm = (struct jffs2_sum_marker *)((uint8_t *)buf + buf_size - sizeof(*sm));
|
|
if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
|
|
sumlen = c->sector_size - je32_to_cpu(sm->offset);
|
|
sumptr = buf + buf_size - sumlen;
|
|
@@ -523,18 +506,15 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
sumptr = kmalloc(sumlen, GFP_KERNEL);
|
|
if (!sumptr)
|
|
return -ENOMEM;
|
|
- memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len);
|
|
+ memcpy((uint8_t *)sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len);
|
|
}
|
|
if (buf_len < sumlen) {
|
|
/* Need to read more so that the entire summary node is present */
|
|
- err = jffs2_fill_scan_buf(c, sumptr,
|
|
+ err = jffs2_fill_scan_buf(c, sumptr,
|
|
jeb->offset + c->sector_size - sumlen,
|
|
- sumlen - buf_len);
|
|
- if (err) {
|
|
- if (sumlen > buf_size)
|
|
- kfree(sumptr);
|
|
+ sumlen - buf_len);
|
|
+ if (err)
|
|
return err;
|
|
- }
|
|
}
|
|
}
|
|
|
|
@@ -545,7 +525,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
|
|
if (buf_size && sumlen > buf_size)
|
|
kfree(sumptr);
|
|
- /* If it returns with a real error, bail.
|
|
+ /* If it returns with a real error, bail.
|
|
If it returns positive, that's a block classification
|
|
(i.e. BLK_STATE_xxx) so return that too.
|
|
If it returns zero, fall through to full scan. */
|
|
@@ -607,7 +587,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
/* Now ofs is a complete physical flash offset as it always was... */
|
|
ofs += jeb->offset;
|
|
|
|
- noise = 10;
|
|
+ noise = 1;
|
|
|
|
dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset);
|
|
|
|
@@ -700,7 +680,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
scan_end = buf_len;
|
|
goto more_empty;
|
|
}
|
|
-
|
|
+
|
|
/* See how much more there is to read in this eraseblock... */
|
|
buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
|
|
if (!buf_len) {
|
|
@@ -950,7 +930,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
jffs2_dbg(1, "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
|
|
jeb->offset, jeb->free_size, jeb->dirty_size,
|
|
jeb->unchecked_size, jeb->used_size, jeb->wasted_size);
|
|
-
|
|
+
|
|
/* mark_node_obsolete can add to wasted !! */
|
|
if (jeb->wasted_size) {
|
|
jeb->dirty_size += jeb->wasted_size;
|
|
@@ -978,7 +958,6 @@ struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uin
|
|
pr_notice("%s(): allocation of inode cache failed\n", __func__);
|
|
return NULL;
|
|
}
|
|
- memset(ic, 0, sizeof(*ic));
|
|
|
|
ic->ino = ino;
|
|
ic->nodes = (void *)ic;
|
|
@@ -1069,7 +1048,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
pseudo_random += je32_to_cpu(rd->version);
|
|
|
|
/* Should never happen. Did. (OLPC trac #4184)*/
|
|
- checkedlen = strnlen(rd->name, rd->nsize);
|
|
+ checkedlen = strnlen((const char *)rd->name, rd->nsize);
|
|
if (checkedlen < rd->nsize) {
|
|
pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n",
|
|
ofs, checkedlen);
|
|
@@ -1081,7 +1060,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
memcpy(&fd->name, rd->name, checkedlen);
|
|
fd->name[checkedlen] = 0;
|
|
|
|
- crc = crc32(0, fd->name, checkedlen);
|
|
+ crc = crc32(0, fd->name, rd->nsize);
|
|
if (crc != je32_to_cpu(rd->name_crc)) {
|
|
pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
|
|
__func__, ofs, je32_to_cpu(rd->name_crc), crc);
|
|
@@ -1106,7 +1085,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
fd->next = NULL;
|
|
fd->version = je32_to_cpu(rd->version);
|
|
fd->ino = je32_to_cpu(rd->ino);
|
|
- fd->nhash = full_name_hash(NULL, fd->name, checkedlen);
|
|
+ fd->nhash = full_name_hash(fd->name, checkedlen);
|
|
fd->type = rd->type;
|
|
jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
|
|
|
|
diff
|
|
@@ -9,18 +9,17 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
#include <linux/pagemap.h>
|
|
-#include <linux/crc32.h>
|
|
#include <linux/compiler.h>
|
|
#include "nodelist.h"
|
|
#include "summary.h"
|
|
#include "debug.h"
|
|
+#include "mtd_dev.h"
|
|
+#include "los_typedef.h"
|
|
+#include "los_crc32.h"
|
|
|
|
#define DEFAULT_EMPTY_SCAN_SIZE 256
|
|
|
|
@@ -74,7 +73,7 @@ static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
|
|
return ret;
|
|
if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size)))
|
|
return ret;
|
|
- /* Turned wasted size into dirty, since we apparently
|
|
+ /* Turned wasted size into dirty, since we apparently
|
|
think it's recoverable now. */
|
|
jeb->dirty_size += jeb->wasted_size;
|
|
c->dirty_size += jeb->wasted_size;
|
|
@@ -95,40 +94,26 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
|
|
unsigned char *flashbuf = NULL;
|
|
uint32_t buf_size = 0;
|
|
struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
|
|
-#ifndef __ECOS
|
|
- size_t pointlen, try_size;
|
|
-
|
|
- ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen,
|
|
- (void **)&flashbuf, NULL);
|
|
- if (!ret && pointlen < c->mtd->size) {
|
|
- /* Don't muck about if it won't let us point to the whole flash */
|
|
- jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
|
|
- pointlen);
|
|
- mtd_unpoint(c->mtd, 0, pointlen);
|
|
- flashbuf = NULL;
|
|
- }
|
|
- if (ret && ret != -EOPNOTSUPP)
|
|
- jffs2_dbg(1, "MTD point failed %d\n", ret);
|
|
-#endif
|
|
+ struct super_block *sb = NULL;
|
|
+ struct MtdNorDev *device = NULL;
|
|
+
|
|
if (!flashbuf) {
|
|
/* For NAND it's quicker to read a whole eraseblock at a time,
|
|
apparently */
|
|
if (jffs2_cleanmarker_oob(c))
|
|
- try_size = c->sector_size;
|
|
+ buf_size = c->sector_size;
|
|
else
|
|
- try_size = PAGE_SIZE;
|
|
+ buf_size = PAGE_SIZE;
|
|
|
|
jffs2_dbg(1, "Trying to allocate readbuf of %zu "
|
|
- "bytes\n", try_size);
|
|
+ "bytes\n", buf_size);
|
|
|
|
- flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size);
|
|
+ flashbuf = kmalloc(buf_size, GFP_KERNEL);
|
|
if (!flashbuf)
|
|
return -ENOMEM;
|
|
|
|
jffs2_dbg(1, "Allocated readbuf of %zu bytes\n",
|
|
- try_size);
|
|
-
|
|
- buf_size = (uint32_t)try_size;
|
|
+ buf_size);
|
|
}
|
|
|
|
if (jffs2_sum_active()) {
|
|
@@ -140,7 +125,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
|
|
}
|
|
}
|
|
|
|
- for (i=0; i<c->nr_blocks; i++) {
|
|
+ sb = OFNI_BS_2SFFJ(c);
|
|
+ device = (struct MtdNorDev*)(sb->s_dev);
|
|
+ for (i=device->blockStart; i<c->nr_blocks + device->blockStart; i++) {
|
|
struct jffs2_eraseblock *jeb = &c->blocks[i];
|
|
|
|
cond_resched();
|
|
@@ -269,14 +256,10 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
|
|
ret = -EIO;
|
|
goto out;
|
|
}
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- jffs2_garbage_collect_trigger(c);
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
}
|
|
ret = 0;
|
|
out:
|
|
- jffs2_sum_reset_collected(s);
|
|
- kfree(s);
|
|
+ kfree(flashbuf);
|
|
out_buf:
|
|
if (buf_size)
|
|
kfree(flashbuf);
|
|
@@ -413,7 +396,7 @@ static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock
|
|
if (!ref)
|
|
return -ENOMEM;
|
|
|
|
- /* BEFORE jffs2_build_xattr_subsystem() called,
|
|
+ /* BEFORE jffs2_build_xattr_subsystem() called,
|
|
* and AFTER xattr_ref is marked as a dead xref,
|
|
* ref->xid is used to store 32bit xid, xd is not used
|
|
* ref->ino is used to store 32bit inode-number, ic is not used
|
|
@@ -486,10 +469,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
struct jffs2_sum_marker *sm;
|
|
void *sumptr = NULL;
|
|
uint32_t sumlen;
|
|
-
|
|
+
|
|
if (!buf_size) {
|
|
/* XIP case. Just look, point at the summary if it's there */
|
|
- sm = (void *)buf + c->sector_size - sizeof(*sm);
|
|
+ sm = (struct jffs2_sum_marker *)((uint8_t *)buf + c->sector_size - sizeof(*sm));
|
|
if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
|
|
sumptr = buf + je32_to_cpu(sm->offset);
|
|
sumlen = c->sector_size - je32_to_cpu(sm->offset);
|
|
@@ -502,13 +485,13 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
buf_len = sizeof(*sm);
|
|
|
|
/* Read as much as we want into the _end_ of the preallocated buffer */
|
|
- err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
|
|
+ err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
|
|
jeb->offset + c->sector_size - buf_len,
|
|
- buf_len);
|
|
+ buf_len);
|
|
if (err)
|
|
return err;
|
|
|
|
- sm = (void *)buf + buf_size - sizeof(*sm);
|
|
+ sm = (struct jffs2_sum_marker *)((uint8_t *)buf + buf_size - sizeof(*sm));
|
|
if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
|
|
sumlen = c->sector_size - je32_to_cpu(sm->offset);
|
|
sumptr = buf + buf_size - sumlen;
|
|
@@ -523,18 +506,15 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
sumptr = kmalloc(sumlen, GFP_KERNEL);
|
|
if (!sumptr)
|
|
return -ENOMEM;
|
|
- memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len);
|
|
+ memcpy((uint8_t *)sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len);
|
|
}
|
|
if (buf_len < sumlen) {
|
|
/* Need to read more so that the entire summary node is present */
|
|
- err = jffs2_fill_scan_buf(c, sumptr,
|
|
+ err = jffs2_fill_scan_buf(c, sumptr,
|
|
jeb->offset + c->sector_size - sumlen,
|
|
- sumlen - buf_len);
|
|
- if (err) {
|
|
- if (sumlen > buf_size)
|
|
- kfree(sumptr);
|
|
+ sumlen - buf_len);
|
|
+ if (err)
|
|
return err;
|
|
- }
|
|
}
|
|
}
|
|
|
|
@@ -545,7 +525,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
|
|
if (buf_size && sumlen > buf_size)
|
|
kfree(sumptr);
|
|
- /* If it returns with a real error, bail.
|
|
+ /* If it returns with a real error, bail.
|
|
If it returns positive, that's a block classification
|
|
(i.e. BLK_STATE_xxx) so return that too.
|
|
If it returns zero, fall through to full scan. */
|
|
@@ -607,7 +587,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
/* Now ofs is a complete physical flash offset as it always was... */
|
|
ofs += jeb->offset;
|
|
|
|
- noise = 10;
|
|
+ noise = 1;
|
|
|
|
dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset);
|
|
|
|
@@ -700,7 +680,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
scan_end = buf_len;
|
|
goto more_empty;
|
|
}
|
|
-
|
|
+
|
|
/* See how much more there is to read in this eraseblock... */
|
|
buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
|
|
if (!buf_len) {
|
|
@@ -950,7 +930,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
jffs2_dbg(1, "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
|
|
jeb->offset, jeb->free_size, jeb->dirty_size,
|
|
jeb->unchecked_size, jeb->used_size, jeb->wasted_size);
|
|
-
|
|
+
|
|
/* mark_node_obsolete can add to wasted !! */
|
|
if (jeb->wasted_size) {
|
|
jeb->dirty_size += jeb->wasted_size;
|
|
@@ -978,7 +958,6 @@ struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uin
|
|
pr_notice("%s(): allocation of inode cache failed\n", __func__);
|
|
return NULL;
|
|
}
|
|
- memset(ic, 0, sizeof(*ic));
|
|
|
|
ic->ino = ino;
|
|
ic->nodes = (void *)ic;
|
|
@@ -1069,7 +1048,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
pseudo_random += je32_to_cpu(rd->version);
|
|
|
|
/* Should never happen. Did. (OLPC trac #4184)*/
|
|
- checkedlen = strnlen(rd->name, rd->nsize);
|
|
+ checkedlen = strnlen((const char *)rd->name, rd->nsize);
|
|
if (checkedlen < rd->nsize) {
|
|
pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n",
|
|
ofs, checkedlen);
|
|
@@ -1081,7 +1060,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
memcpy(&fd->name, rd->name, checkedlen);
|
|
fd->name[checkedlen] = 0;
|
|
|
|
- crc = crc32(0, fd->name, checkedlen);
|
|
+ crc = crc32(0, fd->name, rd->nsize);
|
|
if (crc != je32_to_cpu(rd->name_crc)) {
|
|
pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
|
|
__func__, ofs, je32_to_cpu(rd->name_crc), crc);
|
|
@@ -1106,7 +1085,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
fd->next = NULL;
|
|
fd->version = je32_to_cpu(rd->version);
|
|
fd->ino = je32_to_cpu(rd->ino);
|
|
- fd->nhash = full_name_hash(NULL, fd->name, checkedlen);
|
|
+ fd->nhash = full_name_hash(fd->name, checkedlen);
|
|
fd->type = rd->type;
|
|
jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
|
|
|
|
diff -Nupr old/fs/jffs2/security.c new/fs/jffs2/security.c
|
|
--- old/fs/jffs2/security.c 2022-05-09 17:15:24.350000000 +0800
|
|
+++ new/fs/jffs2/security.c 1970-01-01 08:00:00.000000000 +0800
|
|
@@ -1,72 +0,0 @@
|
|
-/*
|
|
- * JFFS2 -- Journalling Flash File System, Version 2.
|
|
- *
|
|
- * Copyright © 2006 NEC Corporation
|
|
- *
|
|
- * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
|
|
- *
|
|
- * For licensing information, see the file 'LICENCE' in this directory.
|
|
- *
|
|
- */
|
|
-
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/slab.h>
|
|
-#include <linux/fs.h>
|
|
-#include <linux/time.h>
|
|
-#include <linux/pagemap.h>
|
|
-#include <linux/highmem.h>
|
|
-#include <linux/crc32.h>
|
|
-#include <linux/jffs2.h>
|
|
-#include <linux/xattr.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
-#include <linux/security.h>
|
|
-#include "nodelist.h"
|
|
-
|
|
-/* ---- Initial Security Label(s) Attachment callback --- */
|
|
-static int jffs2_initxattrs(struct inode *inode,
|
|
- const struct xattr *xattr_array, void *fs_info)
|
|
-{
|
|
- const struct xattr *xattr;
|
|
- int err = 0;
|
|
-
|
|
- for (xattr = xattr_array; xattr->name != NULL; xattr++) {
|
|
- err = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY,
|
|
- xattr->name, xattr->value,
|
|
- xattr->value_len, 0);
|
|
- if (err < 0)
|
|
- break;
|
|
- }
|
|
- return err;
|
|
-}
|
|
-
|
|
-/* ---- Initial Security Label(s) Attachment ----------- */
|
|
-int jffs2_init_security(struct inode *inode, struct inode *dir,
|
|
- const struct qstr *qstr)
|
|
-{
|
|
- return security_inode_init_security(inode, dir, qstr,
|
|
- &jffs2_initxattrs, NULL);
|
|
-}
|
|
-
|
|
-/* ---- XATTR Handler for "security.*" ----------------- */
|
|
-static int jffs2_security_getxattr(const struct xattr_handler *handler,
|
|
- struct dentry *unused, struct inode *inode,
|
|
- const char *name, void *buffer, size_t size)
|
|
-{
|
|
- return do_jffs2_getxattr(inode, JFFS2_XPREFIX_SECURITY,
|
|
- name, buffer, size);
|
|
-}
|
|
-
|
|
-static int jffs2_security_setxattr(const struct xattr_handler *handler,
|
|
- struct dentry *unused, struct inode *inode,
|
|
- const char *name, const void *buffer,
|
|
- size_t size, int flags)
|
|
-{
|
|
- return do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY,
|
|
- name, buffer, size, flags);
|
|
-}
|
|
-
|
|
-const struct xattr_handler jffs2_security_xattr_handler = {
|
|
- .prefix = XATTR_SECURITY_PREFIX,
|
|
- .set = jffs2_security_setxattr,
|
|
- .get = jffs2_security_getxattr
|
|
-};
|
|
diff -Nupr old/fs/jffs2/summary.c new/fs/jffs2/summary.c
|
|
--- old/fs/jffs2/summary.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/summary.c 2022-05-09 20:13:24.440000000 +0800
|
|
@@ -10,16 +10,20 @@
|
|
* For licensing information, see the file 'LICENCE' in this directory.
|
|
*
|
|
*/
|
|
+#include "summary.h"
|
|
|
|
+#ifdef CONFIG_JFFS2_SUMMARY
|
|
+
|
|
+#ifndef pr_fmt
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
+#endif
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
+#include <mtd_dev.h>
|
|
#include <linux/pagemap.h>
|
|
-#include <linux/crc32.h>
|
|
+#include "los_crc32.h"
|
|
#include <linux/compiler.h>
|
|
-#include <linux/vmalloc.h>
|
|
#include "nodelist.h"
|
|
#include "debug.h"
|
|
|
|
@@ -388,11 +392,25 @@ static int jffs2_sum_process_sum_data(st
|
|
{
|
|
struct jffs2_inode_cache *ic;
|
|
struct jffs2_full_dirent *fd;
|
|
- void *sp;
|
|
+ uintptr_t sp;
|
|
int i, ino;
|
|
int err;
|
|
|
|
- sp = summary->sum;
|
|
+ sp = (uintptr_t)summary->sum;
|
|
+
|
|
+#if 0
|
|
+ PRINTK("summary: %x %x %d %d %x %x %d %x %x %p %p\n",
|
|
+ je16_to_cpu(summary->magic),
|
|
+ je16_to_cpu(summary->nodetype),
|
|
+ je32_to_cpu(summary->totlen),
|
|
+ je32_to_cpu(summary->hdr_crc),
|
|
+ je32_to_cpu(summary->sum_num),
|
|
+ je32_to_cpu(summary->cln_mkr),
|
|
+ je32_to_cpu(summary->padded),
|
|
+ je32_to_cpu(summary->sum_crc),
|
|
+ je32_to_cpu(summary->node_crc),
|
|
+ sp, summary->sum);
|
|
+#endif
|
|
|
|
for (i=0; i<je32_to_cpu(summary->sum_num); i++) {
|
|
dbg_summary("processing summary index %d\n", i);
|
|
@@ -404,10 +422,12 @@ static int jffs2_sum_process_sum_data(st
|
|
if (err)
|
|
return err;
|
|
|
|
+ //PRINTK("sum type %d \n", je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype));
|
|
+
|
|
switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) {
|
|
case JFFS2_NODETYPE_INODE: {
|
|
struct jffs2_sum_inode_flash *spi;
|
|
- spi = sp;
|
|
+ spi = (struct jffs2_sum_inode_flash *)sp;
|
|
|
|
ino = je32_to_cpu(spi->inode);
|
|
|
|
@@ -428,13 +448,29 @@ static int jffs2_sum_process_sum_data(st
|
|
|
|
sp += JFFS2_SUMMARY_INODE_SIZE;
|
|
|
|
+ //PRINTK("1 sp + %d %p\n", JFFS2_SUMMARY_INODE_SIZE, sp);
|
|
+
|
|
break;
|
|
}
|
|
|
|
case JFFS2_NODETYPE_DIRENT: {
|
|
struct jffs2_sum_dirent_flash *spd;
|
|
int checkedlen;
|
|
- spd = sp;
|
|
+ spd = (struct jffs2_sum_dirent_flash *)sp;
|
|
+
|
|
+
|
|
+#if 0
|
|
+ PRINTK("dir: %x %d %d %d %d %d %d %d %d\n",
|
|
+ je16_to_cpu(spd->nodetype),
|
|
+ je32_to_cpu(spd->totlen),
|
|
+ je32_to_cpu(spd->offset),
|
|
+ je32_to_cpu(spd->pino),
|
|
+ je32_to_cpu(spd->version),
|
|
+ je32_to_cpu(spd->ino),
|
|
+ spd->nsize,
|
|
+ spd->type,
|
|
+ spd->name);
|
|
+#endif
|
|
|
|
dbg_summary("Dirent at 0x%08x-0x%08x\n",
|
|
jeb->offset + je32_to_cpu(spd->offset),
|
|
@@ -442,7 +478,7 @@ static int jffs2_sum_process_sum_data(st
|
|
|
|
|
|
/* This should never happen, but https://dev.laptop.org/ticket/4184 */
|
|
- checkedlen = strnlen(spd->name, spd->nsize);
|
|
+ checkedlen = strnlen((const char *)spd->name, spd->nsize);
|
|
if (!checkedlen) {
|
|
pr_err("Dirent at %08x has zero at start of name. Aborting mount.\n",
|
|
jeb->offset +
|
|
@@ -463,6 +499,7 @@ static int jffs2_sum_process_sum_data(st
|
|
|
|
memcpy(&fd->name, spd->name, checkedlen);
|
|
fd->name[checkedlen] = 0;
|
|
+ //PRINTK("add %s \n", fd->name);
|
|
|
|
ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino));
|
|
if (!ic) {
|
|
@@ -476,15 +513,19 @@ static int jffs2_sum_process_sum_data(st
|
|
fd->next = NULL;
|
|
fd->version = je32_to_cpu(spd->version);
|
|
fd->ino = je32_to_cpu(spd->ino);
|
|
- fd->nhash = full_name_hash(NULL, fd->name, checkedlen);
|
|
+ fd->nhash = full_name_hash((const unsigned char *)fd->name, checkedlen);
|
|
fd->type = spd->type;
|
|
|
|
jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
|
|
|
|
*pseudo_random += je32_to_cpu(spd->version);
|
|
|
|
+ //PRINTK("2 sp before add %p\n", sp);
|
|
+
|
|
sp += JFFS2_SUMMARY_DIRENT_SIZE(spd->nsize);
|
|
|
|
+ //PRINTK("2 sp + %d %p\n", JFFS2_SUMMARY_DIRENT_SIZE(spd->nsize), sp);
|
|
+
|
|
break;
|
|
}
|
|
#ifdef CONFIG_JFFS2_FS_XATTR
|
|
@@ -493,7 +534,7 @@ static int jffs2_sum_process_sum_data(st
|
|
struct jffs2_sum_xattr_flash *spx;
|
|
|
|
spx = (struct jffs2_sum_xattr_flash *)sp;
|
|
- dbg_summary("xattr at %#08x-%#08x (xid=%u, version=%u)\n",
|
|
+ dbg_summary("xattr at %#08x-%#08x (xid=%u, version=%u)\n",
|
|
jeb->offset + je32_to_cpu(spx->offset),
|
|
jeb->offset + je32_to_cpu(spx->offset) + je32_to_cpu(spx->totlen),
|
|
je32_to_cpu(spx->xid), je32_to_cpu(spx->version));
|
|
@@ -526,7 +567,7 @@ static int jffs2_sum_process_sum_data(st
|
|
spr = (struct jffs2_sum_xref_flash *)sp;
|
|
dbg_summary("xref at %#08x-%#08x\n",
|
|
jeb->offset + je32_to_cpu(spr->offset),
|
|
- jeb->offset + je32_to_cpu(spr->offset) +
|
|
+ jeb->offset + je32_to_cpu(spr->offset) +
|
|
(uint32_t)PAD(sizeof(struct jffs2_raw_xref)));
|
|
|
|
ref = jffs2_alloc_xattr_ref();
|
|
@@ -679,7 +720,7 @@ static int jffs2_sum_write_data(struct j
|
|
struct jffs2_sum_marker *sm;
|
|
struct kvec vecs[2];
|
|
uint32_t sum_ofs;
|
|
- void *wpage;
|
|
+ uintptr_t wpage;
|
|
int ret;
|
|
size_t retlen;
|
|
|
|
@@ -713,14 +754,14 @@ static int jffs2_sum_write_data(struct j
|
|
isum.padded = cpu_to_je32(c->summary->sum_padded);
|
|
isum.cln_mkr = cpu_to_je32(c->cleanmarker_size);
|
|
isum.sum_num = cpu_to_je32(c->summary->sum_num);
|
|
- wpage = c->summary->sum_buf;
|
|
+ wpage = (uintptr_t)c->summary->sum_buf;
|
|
|
|
while (c->summary->sum_num) {
|
|
temp = c->summary->sum_list_head;
|
|
|
|
switch (je16_to_cpu(temp->u.nodetype)) {
|
|
case JFFS2_NODETYPE_INODE: {
|
|
- struct jffs2_sum_inode_flash *sino_ptr = wpage;
|
|
+ struct jffs2_sum_inode_flash *sino_ptr = (struct jffs2_sum_inode_flash *)wpage;
|
|
|
|
sino_ptr->nodetype = temp->i.nodetype;
|
|
sino_ptr->inode = temp->i.inode;
|
|
@@ -734,7 +775,7 @@ static int jffs2_sum_write_data(struct j
|
|
}
|
|
|
|
case JFFS2_NODETYPE_DIRENT: {
|
|
- struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage;
|
|
+ struct jffs2_sum_dirent_flash *sdrnt_ptr = (struct jffs2_sum_dirent_flash *)wpage;
|
|
|
|
sdrnt_ptr->nodetype = temp->d.nodetype;
|
|
sdrnt_ptr->totlen = temp->d.totlen;
|
|
@@ -802,7 +843,7 @@ static int jffs2_sum_write_data(struct j
|
|
|
|
wpage += padsize;
|
|
|
|
- sm = wpage;
|
|
+ sm = (struct jffs2_sum_marker *)wpage;
|
|
sm->offset = cpu_to_je32(c->sector_size - jeb->free_size);
|
|
sm->magic = cpu_to_je32(JFFS2_SUM_MAGIC);
|
|
|
|
@@ -847,7 +888,7 @@ static int jffs2_sum_write_data(struct j
|
|
/* Write out summary information - called from jffs2_do_reserve_space */
|
|
|
|
int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
|
|
- __must_hold(&c->erase_completion_block)
|
|
+ //__must_hold(&c->erase_completion_block)
|
|
{
|
|
int datasize, infosize, padsize;
|
|
struct jffs2_eraseblock *jeb;
|
|
@@ -875,3 +916,5 @@ int jffs2_sum_write_sumnode(struct jffs2
|
|
spin_lock(&c->erase_completion_lock);
|
|
return ret;
|
|
}
|
|
+
|
|
+#endif
|
|
diff -Nupr old/fs/jffs2/summary.h new/fs/jffs2/summary.h
|
|
--- old/fs/jffs2/summary.h 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/summary.h 2022-05-09 20:35:43.430000000 +0800
|
|
@@ -19,8 +19,9 @@
|
|
anyway. */
|
|
#define MAX_SUMMARY_SIZE 65536
|
|
|
|
-#include <linux/uio.h>
|
|
-#include <linux/jffs2.h>
|
|
+#include <sys/uio.h>
|
|
+#include <linux/types.h>
|
|
+#include "jffs2.h"
|
|
|
|
#define BLK_STATE_ALLFF 0
|
|
#define BLK_STATE_CLEAN 1
|
|
@@ -169,6 +170,10 @@ struct jffs2_sum_marker
|
|
|
|
#define JFFS2_SUMMARY_FRAME_SIZE (sizeof(struct jffs2_raw_summary) + sizeof(struct jffs2_sum_marker))
|
|
|
|
+#ifdef LOSCFG_FS_JFFS2_SUMMARY
|
|
+#define CONFIG_JFFS2_SUMMARY
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_JFFS2_SUMMARY /* SUMMARY SUPPORT ENABLED */
|
|
|
|
#define jffs2_sum_active() (1)
|
|
diff -Nupr old/fs/jffs2/super.c new/fs/jffs2/super.c
|
|
--- old/fs/jffs2/super.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/super.c 2022-05-09 20:09:32.170000000 +0800
|
|
@@ -9,433 +9,188 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/module.h>
|
|
-#include <linux/slab.h>
|
|
-#include <linux/init.h>
|
|
-#include <linux/list.h>
|
|
-#include <linux/fs.h>
|
|
-#include <linux/err.h>
|
|
-#include <linux/mount.h>
|
|
-#include <linux/fs_context.h>
|
|
-#include <linux/fs_parser.h>
|
|
-#include <linux/jffs2.h>
|
|
-#include <linux/pagemap.h>
|
|
-#include <linux/mtd/super.h>
|
|
-#include <linux/ctype.h>
|
|
-#include <linux/namei.h>
|
|
-#include <linux/seq_file.h>
|
|
-#include <linux/exportfs.h>
|
|
-#include "compr.h"
|
|
+#include "jffs2.h"
|
|
#include "nodelist.h"
|
|
+#include "jffs2_fs_sb.h"
|
|
+#include "mtd_dev.h"
|
|
+#include "mtd_partition.h"
|
|
+#include "compr.h"
|
|
+#include "jffs2_hash.h"
|
|
|
|
-static void jffs2_put_super(struct super_block *);
|
|
-
|
|
-static struct kmem_cache *jffs2_inode_cachep;
|
|
+static unsigned char jffs2_mounted_number = 0; /* a counter to track the number of jffs2 instances mounted */
|
|
+struct MtdNorDev jffs2_dev_list[CONFIG_MTD_PATTITION_NUM];
|
|
|
|
-static struct inode *jffs2_alloc_inode(struct super_block *sb)
|
|
+/*
|
|
+ * fill in the superblock
|
|
+ */
|
|
+int jffs2_fill_super(struct super_block *sb)
|
|
{
|
|
- struct jffs2_inode_info *f;
|
|
-
|
|
- f = kmem_cache_alloc(jffs2_inode_cachep, GFP_KERNEL);
|
|
- if (!f)
|
|
- return NULL;
|
|
- return &f->vfs_inode;
|
|
-}
|
|
+ int ret;
|
|
+ struct jffs2_sb_info *c;
|
|
+ struct MtdNorDev *device;
|
|
|
|
-static void jffs2_free_inode(struct inode *inode)
|
|
-{
|
|
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
|
+ c = JFFS2_SB_INFO(sb);
|
|
+ device = (struct MtdNorDev*)(sb->s_dev);
|
|
|
|
- kfree(f->target);
|
|
- kmem_cache_free(jffs2_inode_cachep, f);
|
|
-}
|
|
+ (void)mutex_init(&c->alloc_sem);
|
|
+ (void)mutex_init(&c->erase_free_sem);
|
|
+ spin_lock_init(&c->erase_completion_lock);
|
|
+ spin_lock_init(&c->inocache_lock);
|
|
|
|
-static void jffs2_i_init_once(void *foo)
|
|
-{
|
|
- struct jffs2_inode_info *f = foo;
|
|
+ /* sector size is the erase block size */
|
|
+ c->sector_size = device->blockSize;
|
|
+ c->flash_size = (device->blockEnd - device->blockStart + 1) * device->blockSize;
|
|
+ c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
|
|
|
|
- mutex_init(&f->sem);
|
|
- inode_init_once(&f->vfs_inode);
|
|
-}
|
|
+ ret = jffs2_do_mount_fs(c);
|
|
+ if (ret) {
|
|
+ (void)mutex_destroy(&c->alloc_sem);
|
|
+ (void)mutex_destroy(&c->erase_free_sem);
|
|
+ return ret;
|
|
+ }
|
|
+ D1(printk(KERN_DEBUG "jffs2_fill_super(): Getting root inode\n"));
|
|
+
|
|
+ sb->s_root = jffs2_iget(sb, 1);
|
|
+
|
|
+ if (IS_ERR(sb->s_root)) {
|
|
+ D1(printk(KERN_WARNING "get root inode failed\n"));
|
|
+ ret = PTR_ERR(sb->s_root);
|
|
+ sb->s_root = NULL;
|
|
+ jffs2_free_ino_caches(c);
|
|
+ jffs2_free_raw_node_refs(c);
|
|
+ free(c->blocks);
|
|
+ (void)mutex_destroy(&c->alloc_sem);
|
|
+ (void)mutex_destroy(&c->erase_free_sem);
|
|
|
|
-static const char *jffs2_compr_name(unsigned int compr)
|
|
-{
|
|
- switch (compr) {
|
|
- case JFFS2_COMPR_MODE_NONE:
|
|
- return "none";
|
|
-#ifdef CONFIG_JFFS2_LZO
|
|
- case JFFS2_COMPR_MODE_FORCELZO:
|
|
- return "lzo";
|
|
-#endif
|
|
-#ifdef CONFIG_JFFS2_ZLIB
|
|
- case JFFS2_COMPR_MODE_FORCEZLIB:
|
|
- return "zlib";
|
|
-#endif
|
|
- default:
|
|
- /* should never happen; programmer error */
|
|
- WARN_ON(1);
|
|
- return "";
|
|
+ return ret;
|
|
}
|
|
-}
|
|
-
|
|
-static int jffs2_show_options(struct seq_file *s, struct dentry *root)
|
|
-{
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(root->d_sb);
|
|
- struct jffs2_mount_opts *opts = &c->mount_opts;
|
|
-
|
|
- if (opts->override_compr)
|
|
- seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
|
|
- if (opts->set_rp_size)
|
|
- seq_printf(s, ",rp_size=%u", opts->rp_size / 1024);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int jffs2_sync_fs(struct super_block *sb, int wait)
|
|
-{
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
|
|
-
|
|
-#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
|
|
- if (jffs2_is_writebuffered(c))
|
|
- cancel_delayed_work_sync(&c->wbuf_dwork);
|
|
-#endif
|
|
-
|
|
- mutex_lock(&c->alloc_sem);
|
|
- jffs2_flush_wbuf_pad(c);
|
|
- mutex_unlock(&c->alloc_sem);
|
|
return 0;
|
|
}
|
|
|
|
-static struct inode *jffs2_nfs_get_inode(struct super_block *sb, uint64_t ino,
|
|
- uint32_t generation)
|
|
+int jffs2_mount(int part_no, struct jffs2_inode **root_node, unsigned long mountflags)
|
|
{
|
|
- /* We don't care about i_generation. We'll destroy the flash
|
|
- before we start re-using inode numbers anyway. And even
|
|
- if that wasn't true, we'd have other problems...*/
|
|
- return jffs2_iget(sb, ino);
|
|
-}
|
|
-
|
|
-static struct dentry *jffs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
|
|
- int fh_len, int fh_type)
|
|
-{
|
|
- return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
|
|
- jffs2_nfs_get_inode);
|
|
-}
|
|
-
|
|
-static struct dentry *jffs2_fh_to_parent(struct super_block *sb, struct fid *fid,
|
|
- int fh_len, int fh_type)
|
|
-{
|
|
- return generic_fh_to_parent(sb, fid, fh_len, fh_type,
|
|
- jffs2_nfs_get_inode);
|
|
-}
|
|
-
|
|
-static struct dentry *jffs2_get_parent(struct dentry *child)
|
|
-{
|
|
- struct jffs2_inode_info *f;
|
|
- uint32_t pino;
|
|
-
|
|
- BUG_ON(!d_is_dir(child));
|
|
-
|
|
- f = JFFS2_INODE_INFO(d_inode(child));
|
|
-
|
|
- pino = f->inocache->pino_nlink;
|
|
-
|
|
- JFFS2_DEBUG("Parent of directory ino #%u is #%u\n",
|
|
- f->inocache->ino, pino);
|
|
-
|
|
- return d_obtain_alias(jffs2_iget(child->d_sb, pino));
|
|
-}
|
|
-
|
|
-static const struct export_operations jffs2_export_ops = {
|
|
- .get_parent = jffs2_get_parent,
|
|
- .fh_to_dentry = jffs2_fh_to_dentry,
|
|
- .fh_to_parent = jffs2_fh_to_parent,
|
|
-};
|
|
-
|
|
-/*
|
|
- * JFFS2 mount options.
|
|
- *
|
|
- * Opt_source: The source device
|
|
- * Opt_override_compr: override default compressor
|
|
- * Opt_rp_size: size of reserved pool in KiB
|
|
- */
|
|
-enum {
|
|
- Opt_override_compr,
|
|
- Opt_rp_size,
|
|
-};
|
|
-
|
|
-static const struct constant_table jffs2_param_compr[] = {
|
|
- {"none", JFFS2_COMPR_MODE_NONE },
|
|
-#ifdef CONFIG_JFFS2_LZO
|
|
- {"lzo", JFFS2_COMPR_MODE_FORCELZO },
|
|
-#endif
|
|
-#ifdef CONFIG_JFFS2_ZLIB
|
|
- {"zlib", JFFS2_COMPR_MODE_FORCEZLIB },
|
|
-#endif
|
|
- {}
|
|
-};
|
|
+ struct super_block *sb = NULL;
|
|
+ struct jffs2_sb_info *c = NULL;
|
|
+ LOS_DL_LIST *part_head = NULL;
|
|
+ struct MtdDev *spinor_mtd = NULL;
|
|
+ mtd_partition *mtd_part = GetSpinorPartitionHead();
|
|
+ int ret;
|
|
|
|
-static const struct fs_parameter_spec jffs2_fs_parameters[] = {
|
|
- fsparam_enum ("compr", Opt_override_compr, jffs2_param_compr),
|
|
- fsparam_u32 ("rp_size", Opt_rp_size),
|
|
- {}
|
|
-};
|
|
+ jffs2_dbg(1, "begin los_jffs2_mount:%d\n", part_no);
|
|
|
|
-static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
|
-{
|
|
- struct fs_parse_result result;
|
|
- struct jffs2_sb_info *c = fc->s_fs_info;
|
|
- int opt;
|
|
-
|
|
- opt = fs_parse(fc, jffs2_fs_parameters, param, &result);
|
|
- if (opt < 0)
|
|
- return opt;
|
|
-
|
|
- switch (opt) {
|
|
- case Opt_override_compr:
|
|
- c->mount_opts.compr = result.uint_32;
|
|
- c->mount_opts.override_compr = true;
|
|
- break;
|
|
- case Opt_rp_size:
|
|
- if (result.uint_32 > UINT_MAX / 1024)
|
|
- return invalf(fc, "jffs2: rp_size unrepresentable");
|
|
- c->mount_opts.rp_size = result.uint_32 * 1024;
|
|
- c->mount_opts.set_rp_size = true;
|
|
- break;
|
|
- default:
|
|
- return -EINVAL;
|
|
+ sb = zalloc(sizeof(struct super_block));
|
|
+ if (sb == NULL) {
|
|
+ return -ENOMEM;
|
|
}
|
|
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static inline void jffs2_update_mount_opts(struct fs_context *fc)
|
|
-{
|
|
- struct jffs2_sb_info *new_c = fc->s_fs_info;
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(fc->root->d_sb);
|
|
-
|
|
- mutex_lock(&c->alloc_sem);
|
|
- if (new_c->mount_opts.override_compr) {
|
|
- c->mount_opts.override_compr = new_c->mount_opts.override_compr;
|
|
- c->mount_opts.compr = new_c->mount_opts.compr;
|
|
- }
|
|
- if (new_c->mount_opts.set_rp_size) {
|
|
- c->mount_opts.set_rp_size = new_c->mount_opts.set_rp_size;
|
|
- c->mount_opts.rp_size = new_c->mount_opts.rp_size;
|
|
+ ret = Jffs2HashInit(&sb->s_node_hash_lock, &sb->s_node_hash[0]);
|
|
+ if (ret) {
|
|
+ free(sb);
|
|
+ return ret;
|
|
+ }
|
|
+ part_head = &(GetSpinorPartitionHead()->node_info);
|
|
+ LOS_DL_LIST_FOR_EACH_ENTRY(mtd_part,part_head, mtd_partition, node_info) {
|
|
+ if (mtd_part->patitionnum == part_no)
|
|
+ break;
|
|
+ }
|
|
+#ifndef LOSCFG_PLATFORM_QEMU_ARM_VIRT_CA7
|
|
+ spinor_mtd = GetMtd("spinor");
|
|
+#else
|
|
+ spinor_mtd = (struct MtdDev *)LOS_DL_LIST_ENTRY(part_head->pstNext, mtd_partition, node_info)->mtd_info;
|
|
+#endif
|
|
+ if (spinor_mtd == NULL) {
|
|
+ free(sb);
|
|
+ return -EPERM;
|
|
+ }
|
|
+ jffs2_dev_list[part_no].blockEnd = mtd_part->end_block;
|
|
+ jffs2_dev_list[part_no].blockSize = spinor_mtd->eraseSize;
|
|
+ jffs2_dev_list[part_no].blockStart = mtd_part->start_block;
|
|
+#ifndef LOSCFG_PLATFORM_QEMU_ARM_VIRT_CA7
|
|
+ (void)FreeMtd(spinor_mtd);
|
|
+#endif
|
|
+ sb->jffs2_sb.mtd = mtd_part->mtd_info;
|
|
+ sb->s_dev = &jffs2_dev_list[part_no];
|
|
+
|
|
+ c = JFFS2_SB_INFO(sb);
|
|
+ c->flash_size = (mtd_part->end_block - mtd_part->start_block + 1) * spinor_mtd->eraseSize;
|
|
+ c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
|
|
+ c->sector_size = spinor_mtd->eraseSize;
|
|
+
|
|
+ jffs2_dbg(1, "C mtd_size:%d,mtd-erase:%d,blocks:%d,hashsize:%d\n",
|
|
+ c->flash_size, c->sector_size, c->flash_size / c->sector_size, c->inocache_hashsize);
|
|
+
|
|
+ c->inocache_list = zalloc(sizeof(struct jffs2_inode_cache *) * c->inocache_hashsize);
|
|
+ if (c->inocache_list == NULL) {
|
|
+ free(sb);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ if (jffs2_mounted_number++ == 0) {
|
|
+ (void)jffs2_create_slab_caches(); // No error check, cannot fail
|
|
+ (void)jffs2_compressors_init();
|
|
}
|
|
- mutex_unlock(&c->alloc_sem);
|
|
-}
|
|
-
|
|
-static int jffs2_reconfigure(struct fs_context *fc)
|
|
-{
|
|
- struct super_block *sb = fc->root->d_sb;
|
|
-
|
|
- sync_filesystem(sb);
|
|
- jffs2_update_mount_opts(fc);
|
|
-
|
|
- return jffs2_do_remount_fs(sb, fc);
|
|
-}
|
|
-
|
|
-static const struct super_operations jffs2_super_operations =
|
|
-{
|
|
- .alloc_inode = jffs2_alloc_inode,
|
|
- .free_inode = jffs2_free_inode,
|
|
- .put_super = jffs2_put_super,
|
|
- .statfs = jffs2_statfs,
|
|
- .evict_inode = jffs2_evict_inode,
|
|
- .dirty_inode = jffs2_dirty_inode,
|
|
- .show_options = jffs2_show_options,
|
|
- .sync_fs = jffs2_sync_fs,
|
|
-};
|
|
-
|
|
-/*
|
|
- * fill in the superblock
|
|
- */
|
|
-static int jffs2_fill_super(struct super_block *sb, struct fs_context *fc)
|
|
-{
|
|
- struct jffs2_sb_info *c = sb->s_fs_info;
|
|
-
|
|
- jffs2_dbg(1, "jffs2_get_sb_mtd():"
|
|
- " New superblock for device %d (\"%s\")\n",
|
|
- sb->s_mtd->index, sb->s_mtd->name);
|
|
-
|
|
- c->mtd = sb->s_mtd;
|
|
- c->os_priv = sb;
|
|
-
|
|
- if (c->mount_opts.rp_size > c->mtd->size)
|
|
- return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB",
|
|
- c->mtd->size / 1024);
|
|
-
|
|
- /* Initialize JFFS2 superblock locks, the further initialization will
|
|
- * be done later */
|
|
- mutex_init(&c->alloc_sem);
|
|
- mutex_init(&c->erase_free_sem);
|
|
- init_waitqueue_head(&c->erase_wait);
|
|
- init_waitqueue_head(&c->inocache_wq);
|
|
- spin_lock_init(&c->erase_completion_lock);
|
|
- spin_lock_init(&c->inocache_lock);
|
|
-
|
|
- sb->s_op = &jffs2_super_operations;
|
|
- sb->s_export_op = &jffs2_export_ops;
|
|
- sb->s_flags = sb->s_flags | SB_NOATIME;
|
|
- sb->s_xattr = jffs2_xattr_handlers;
|
|
-#ifdef CONFIG_JFFS2_FS_POSIX_ACL
|
|
- sb->s_flags |= SB_POSIXACL;
|
|
-#endif
|
|
- return jffs2_do_fill_super(sb, fc);
|
|
-}
|
|
-
|
|
-static int jffs2_get_tree(struct fs_context *fc)
|
|
-{
|
|
- return get_tree_mtd(fc, jffs2_fill_super);
|
|
-}
|
|
-
|
|
-static void jffs2_free_fc(struct fs_context *fc)
|
|
-{
|
|
- kfree(fc->s_fs_info);
|
|
-}
|
|
|
|
-static const struct fs_context_operations jffs2_context_ops = {
|
|
- .free = jffs2_free_fc,
|
|
- .parse_param = jffs2_parse_param,
|
|
- .get_tree = jffs2_get_tree,
|
|
- .reconfigure = jffs2_reconfigure,
|
|
-};
|
|
+ ret = jffs2_fill_super(sb);
|
|
+ if (ret) {
|
|
+ if (--jffs2_mounted_number == 0) {
|
|
+ jffs2_destroy_slab_caches();
|
|
+ (void)jffs2_compressors_exit();
|
|
+ }
|
|
|
|
-static int jffs2_init_fs_context(struct fs_context *fc)
|
|
-{
|
|
- struct jffs2_sb_info *ctx;
|
|
+ free(sb);
|
|
+ free(c->inocache_list);
|
|
+ c->inocache_list = NULL;
|
|
+ return ret;
|
|
+ }
|
|
|
|
- ctx = kzalloc(sizeof(struct jffs2_sb_info), GFP_KERNEL);
|
|
- if (!ctx)
|
|
- return -ENOMEM;
|
|
+ if (!(mountflags & MS_RDONLY)) {
|
|
+ jffs2_start_garbage_collect_thread(c);
|
|
+ }
|
|
|
|
- fc->s_fs_info = ctx;
|
|
- fc->ops = &jffs2_context_ops;
|
|
+ sb->s_mount_flags = mountflags;
|
|
+ *root_node = sb->s_root;
|
|
return 0;
|
|
}
|
|
|
|
-static void jffs2_put_super (struct super_block *sb)
|
|
+int jffs2_umount(struct jffs2_inode *root_node)
|
|
{
|
|
+ struct super_block *sb = root_node->i_sb;
|
|
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
|
|
+ struct jffs2_full_dirent *fd, *next;
|
|
|
|
- jffs2_dbg(2, "%s()\n", __func__);
|
|
+ D2(PRINTK("Jffs2Umount\n"));
|
|
|
|
- mutex_lock(&c->alloc_sem);
|
|
- jffs2_flush_wbuf_pad(c);
|
|
- mutex_unlock(&c->alloc_sem);
|
|
-
|
|
- jffs2_sum_exit(c);
|
|
-
|
|
- jffs2_free_ino_caches(c);
|
|
- jffs2_free_raw_node_refs(c);
|
|
- kvfree(c->blocks);
|
|
- jffs2_flash_cleanup(c);
|
|
- kfree(c->inocache_list);
|
|
- jffs2_clear_xattr_subsystem(c);
|
|
- mtd_sync(c->mtd);
|
|
- jffs2_dbg(1, "%s(): returning\n", __func__);
|
|
-}
|
|
-
|
|
-static void jffs2_kill_sb(struct super_block *sb)
|
|
-{
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
|
|
- if (c && !sb_rdonly(sb))
|
|
+ // Only really umount if this is the only mount
|
|
+ if (!(sb->s_mount_flags & MS_RDONLY)) {
|
|
jffs2_stop_garbage_collect_thread(c);
|
|
- kill_mtd_super(sb);
|
|
- kfree(c);
|
|
-}
|
|
-
|
|
-static struct file_system_type jffs2_fs_type = {
|
|
- .owner = THIS_MODULE,
|
|
- .name = "jffs2",
|
|
- .init_fs_context = jffs2_init_fs_context,
|
|
- .parameters = jffs2_fs_parameters,
|
|
- .kill_sb = jffs2_kill_sb,
|
|
-};
|
|
-MODULE_ALIAS_FS("jffs2");
|
|
+ }
|
|
|
|
-static int __init init_jffs2_fs(void)
|
|
-{
|
|
- int ret;
|
|
+ // free directory entries
|
|
+ for (fd = root_node->jffs2_i.dents; fd; fd = next) {
|
|
+ next = fd->next;
|
|
+ jffs2_free_full_dirent(fd);
|
|
+ }
|
|
|
|
- /* Paranoia checks for on-medium structures. If we ask GCC
|
|
- to pack them with __attribute__((packed)) then it _also_
|
|
- assumes that they're not aligned -- so it emits crappy
|
|
- code on some architectures. Ideally we want an attribute
|
|
- which means just 'no padding', without the alignment
|
|
- thing. But GCC doesn't have that -- we have to just
|
|
- hope the structs are the right sizes, instead. */
|
|
- BUILD_BUG_ON(sizeof(struct jffs2_unknown_node) != 12);
|
|
- BUILD_BUG_ON(sizeof(struct jffs2_raw_dirent) != 40);
|
|
- BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
|
|
- BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
|
|
-
|
|
- pr_info("version 2.2."
|
|
-#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
|
|
- " (NAND)"
|
|
-#endif
|
|
-#ifdef CONFIG_JFFS2_SUMMARY
|
|
- " (SUMMARY) "
|
|
-#endif
|
|
- " © 2001-2006 Red Hat, Inc.\n");
|
|
+ free(root_node);
|
|
|
|
- jffs2_inode_cachep = kmem_cache_create("jffs2_i",
|
|
- sizeof(struct jffs2_inode_info),
|
|
- 0, (SLAB_RECLAIM_ACCOUNT|
|
|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
|
|
- jffs2_i_init_once);
|
|
- if (!jffs2_inode_cachep) {
|
|
- pr_err("error: Failed to initialise inode cache\n");
|
|
- return -ENOMEM;
|
|
- }
|
|
- ret = jffs2_compressors_init();
|
|
- if (ret) {
|
|
- pr_err("error: Failed to initialise compressors\n");
|
|
- goto out;
|
|
- }
|
|
- ret = jffs2_create_slab_caches();
|
|
- if (ret) {
|
|
- pr_err("error: Failed to initialise slab caches\n");
|
|
- goto out_compressors;
|
|
- }
|
|
- ret = register_filesystem(&jffs2_fs_type);
|
|
- if (ret) {
|
|
- pr_err("error: Failed to register filesystem\n");
|
|
- goto out_slab;
|
|
+ // Clean up the super block and root_node inode
|
|
+ jffs2_free_ino_caches(c);
|
|
+ jffs2_free_raw_node_refs(c);
|
|
+ free(c->blocks);
|
|
+ c->blocks = NULL;
|
|
+ free(c->inocache_list);
|
|
+ c->inocache_list = NULL;
|
|
+ (void)Jffs2HashDeinit(&sb->s_node_hash_lock);
|
|
+
|
|
+ (void)mutex_destroy(&c->alloc_sem);
|
|
+ (void)mutex_destroy(&c->erase_free_sem);
|
|
+ free(sb);
|
|
+ // That's all folks.
|
|
+ D2(PRINTK("Jffs2Umount No current mounts\n"));
|
|
+
|
|
+ if (--jffs2_mounted_number == 0) {
|
|
+ jffs2_destroy_slab_caches();
|
|
+ (void)jffs2_compressors_exit();
|
|
}
|
|
return 0;
|
|
-
|
|
- out_slab:
|
|
- jffs2_destroy_slab_caches();
|
|
- out_compressors:
|
|
- jffs2_compressors_exit();
|
|
- out:
|
|
- kmem_cache_destroy(jffs2_inode_cachep);
|
|
- return ret;
|
|
}
|
|
-
|
|
-static void __exit exit_jffs2_fs(void)
|
|
-{
|
|
- unregister_filesystem(&jffs2_fs_type);
|
|
- jffs2_destroy_slab_caches();
|
|
- jffs2_compressors_exit();
|
|
-
|
|
- /*
|
|
- * Make sure all delayed rcu free inodes are flushed before we
|
|
- * destroy cache.
|
|
- */
|
|
- rcu_barrier();
|
|
- kmem_cache_destroy(jffs2_inode_cachep);
|
|
-}
|
|
-
|
|
-module_init(init_jffs2_fs);
|
|
-module_exit(exit_jffs2_fs);
|
|
-
|
|
-MODULE_DESCRIPTION("The Journalling Flash File System, v2");
|
|
-MODULE_AUTHOR("Red Hat, Inc.");
|
|
-MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for
|
|
- // the sake of this tag. It's Free Software.
|
|
diff -Nupr old/fs/jffs2/symlink.c new/fs/jffs2/symlink.c
|
|
--- old/fs/jffs2/symlink.c 2022-05-09 17:15:24.350000000 +0800
|
|
+++ new/fs/jffs2/symlink.c 1970-01-01 08:00:00.000000000 +0800
|
|
@@ -1,19 +0,0 @@
|
|
-/*
|
|
- * JFFS2 -- Journalling Flash File System, Version 2.
|
|
- *
|
|
- * Copyright © 2001-2007 Red Hat, Inc.
|
|
- *
|
|
- * Created by David Woodhouse <dwmw2@infradead.org>
|
|
- *
|
|
- * For licensing information, see the file 'LICENCE' in this directory.
|
|
- *
|
|
- */
|
|
-
|
|
-#include "nodelist.h"
|
|
-
|
|
-const struct inode_operations jffs2_symlink_inode_operations =
|
|
-{
|
|
- .get_link = simple_get_link,
|
|
- .setattr = jffs2_setattr,
|
|
- .listxattr = jffs2_listxattr,
|
|
-};
|
|
diff -Nupr old/fs/jffs2/wbuf.c new/fs/jffs2/wbuf.c
|
|
--- old/fs/jffs2/wbuf.c 2022-05-09 17:15:24.350000000 +0800
|
|
+++ new/fs/jffs2/wbuf.c 1970-01-01 08:00:00.000000000 +0800
|
|
@@ -1,1350 +0,0 @@
|
|
-/*
|
|
- * JFFS2 -- Journalling Flash File System, Version 2.
|
|
- *
|
|
- * Copyright © 2001-2007 Red Hat, Inc.
|
|
- * Copyright © 2004 Thomas Gleixner <tglx@linutronix.de>
|
|
- *
|
|
- * Created by David Woodhouse <dwmw2@infradead.org>
|
|
- * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
|
|
- *
|
|
- * For licensing information, see the file 'LICENCE' in this directory.
|
|
- *
|
|
- */
|
|
-
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/slab.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
-#include <linux/crc32.h>
|
|
-#include <linux/mtd/rawnand.h>
|
|
-#include <linux/jiffies.h>
|
|
-#include <linux/sched.h>
|
|
-#include <linux/writeback.h>
|
|
-
|
|
-#include "nodelist.h"
|
|
-
|
|
-/* For testing write failures */
|
|
-#undef BREAKME
|
|
-#undef BREAKMEHEADER
|
|
-
|
|
-#ifdef BREAKME
|
|
-static unsigned char *brokenbuf;
|
|
-#endif
|
|
-
|
|
-#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
|
|
-#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
|
|
-
|
|
-/* max. erase failures before we mark a block bad */
|
|
-#define MAX_ERASE_FAILURES 2
|
|
-
|
|
-struct jffs2_inodirty {
|
|
- uint32_t ino;
|
|
- struct jffs2_inodirty *next;
|
|
-};
|
|
-
|
|
-static struct jffs2_inodirty inodirty_nomem;
|
|
-
|
|
-static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
|
|
-{
|
|
- struct jffs2_inodirty *this = c->wbuf_inodes;
|
|
-
|
|
- /* If a malloc failed, consider _everything_ dirty */
|
|
- if (this == &inodirty_nomem)
|
|
- return 1;
|
|
-
|
|
- /* If ino == 0, _any_ non-GC writes mean 'yes' */
|
|
- if (this && !ino)
|
|
- return 1;
|
|
-
|
|
- /* Look to see if the inode in question is pending in the wbuf */
|
|
- while (this) {
|
|
- if (this->ino == ino)
|
|
- return 1;
|
|
- this = this->next;
|
|
- }
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
|
|
-{
|
|
- struct jffs2_inodirty *this;
|
|
-
|
|
- this = c->wbuf_inodes;
|
|
-
|
|
- if (this != &inodirty_nomem) {
|
|
- while (this) {
|
|
- struct jffs2_inodirty *next = this->next;
|
|
- kfree(this);
|
|
- this = next;
|
|
- }
|
|
- }
|
|
- c->wbuf_inodes = NULL;
|
|
-}
|
|
-
|
|
-static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
|
|
-{
|
|
- struct jffs2_inodirty *new;
|
|
-
|
|
- /* Schedule delayed write-buffer write-out */
|
|
- jffs2_dirty_trigger(c);
|
|
-
|
|
- if (jffs2_wbuf_pending_for_ino(c, ino))
|
|
- return;
|
|
-
|
|
- new = kmalloc(sizeof(*new), GFP_KERNEL);
|
|
- if (!new) {
|
|
- jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
|
|
- jffs2_clear_wbuf_ino_list(c);
|
|
- c->wbuf_inodes = &inodirty_nomem;
|
|
- return;
|
|
- }
|
|
- new->ino = ino;
|
|
- new->next = c->wbuf_inodes;
|
|
- c->wbuf_inodes = new;
|
|
- return;
|
|
-}
|
|
-
|
|
-static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
|
|
-{
|
|
- struct list_head *this, *next;
|
|
- static int n;
|
|
-
|
|
- if (list_empty(&c->erasable_pending_wbuf_list))
|
|
- return;
|
|
-
|
|
- list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
|
|
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
|
|
-
|
|
- jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
|
|
- jeb->offset);
|
|
- list_del(this);
|
|
- if ((jiffies + (n++)) & 127) {
|
|
- /* Most of the time, we just erase it immediately. Otherwise we
|
|
- spend ages scanning it on mount, etc. */
|
|
- jffs2_dbg(1, "...and adding to erase_pending_list\n");
|
|
- list_add_tail(&jeb->list, &c->erase_pending_list);
|
|
- c->nr_erasing_blocks++;
|
|
- jffs2_garbage_collect_trigger(c);
|
|
- } else {
|
|
- /* Sometimes, however, we leave it elsewhere so it doesn't get
|
|
- immediately reused, and we spread the load a bit. */
|
|
- jffs2_dbg(1, "...and adding to erasable_list\n");
|
|
- list_add_tail(&jeb->list, &c->erasable_list);
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
-#define REFILE_NOTEMPTY 0
|
|
-#define REFILE_ANYWAY 1
|
|
-
|
|
-static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
|
|
-{
|
|
- jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
|
|
-
|
|
- /* File the existing block on the bad_used_list.... */
|
|
- if (c->nextblock == jeb)
|
|
- c->nextblock = NULL;
|
|
- else /* Not sure this should ever happen... need more coffee */
|
|
- list_del(&jeb->list);
|
|
- if (jeb->first_node) {
|
|
- jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
|
|
- jeb->offset);
|
|
- list_add(&jeb->list, &c->bad_used_list);
|
|
- } else {
|
|
- BUG_ON(allow_empty == REFILE_NOTEMPTY);
|
|
- /* It has to have had some nodes or we couldn't be here */
|
|
- jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
|
|
- jeb->offset);
|
|
- list_add(&jeb->list, &c->erase_pending_list);
|
|
- c->nr_erasing_blocks++;
|
|
- jffs2_garbage_collect_trigger(c);
|
|
- }
|
|
-
|
|
- if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
|
|
- uint32_t oldfree = jeb->free_size;
|
|
-
|
|
- jffs2_link_node_ref(c, jeb,
|
|
- (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
|
|
- oldfree, NULL);
|
|
- /* convert to wasted */
|
|
- c->wasted_size += oldfree;
|
|
- jeb->wasted_size += oldfree;
|
|
- c->dirty_size -= oldfree;
|
|
- jeb->dirty_size -= oldfree;
|
|
- }
|
|
-
|
|
- jffs2_dbg_dump_block_lists_nolock(c);
|
|
- jffs2_dbg_acct_sanity_check_nolock(c,jeb);
|
|
- jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
|
|
-}
|
|
-
|
|
-static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
|
|
- struct jffs2_inode_info *f,
|
|
- struct jffs2_raw_node_ref *raw,
|
|
- union jffs2_node_union *node)
|
|
-{
|
|
- struct jffs2_node_frag *frag;
|
|
- struct jffs2_full_dirent *fd;
|
|
-
|
|
- dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
|
|
- node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
|
|
-
|
|
- BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
|
|
- je16_to_cpu(node->u.magic) != 0);
|
|
-
|
|
- switch (je16_to_cpu(node->u.nodetype)) {
|
|
- case JFFS2_NODETYPE_INODE:
|
|
- if (f->metadata && f->metadata->raw == raw) {
|
|
- dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
|
|
- return &f->metadata->raw;
|
|
- }
|
|
- frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
|
|
- BUG_ON(!frag);
|
|
- /* Find a frag which refers to the full_dnode we want to modify */
|
|
- while (!frag->node || frag->node->raw != raw) {
|
|
- frag = frag_next(frag);
|
|
- BUG_ON(!frag);
|
|
- }
|
|
- dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
|
|
- return &frag->node->raw;
|
|
-
|
|
- case JFFS2_NODETYPE_DIRENT:
|
|
- for (fd = f->dents; fd; fd = fd->next) {
|
|
- if (fd->raw == raw) {
|
|
- dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
|
|
- return &fd->raw;
|
|
- }
|
|
- }
|
|
- BUG();
|
|
-
|
|
- default:
|
|
- dbg_noderef("Don't care about replacing raw for nodetype %x\n",
|
|
- je16_to_cpu(node->u.nodetype));
|
|
- break;
|
|
- }
|
|
- return NULL;
|
|
-}
|
|
-
|
|
-#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
|
|
-static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
|
|
- uint32_t ofs)
|
|
-{
|
|
- int ret;
|
|
- size_t retlen;
|
|
- char *eccstr;
|
|
-
|
|
- ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
|
|
- if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
|
|
- pr_warn("%s(): Read back of page at %08x failed: %d\n",
|
|
- __func__, c->wbuf_ofs, ret);
|
|
- return ret;
|
|
- } else if (retlen != c->wbuf_pagesize) {
|
|
- pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
|
|
- __func__, ofs, retlen, c->wbuf_pagesize);
|
|
- return -EIO;
|
|
- }
|
|
- if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
|
|
- return 0;
|
|
-
|
|
- if (ret == -EUCLEAN)
|
|
- eccstr = "corrected";
|
|
- else if (ret == -EBADMSG)
|
|
- eccstr = "correction failed";
|
|
- else
|
|
- eccstr = "OK or unused";
|
|
-
|
|
- pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
|
|
- eccstr, c->wbuf_ofs);
|
|
- print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
|
|
- c->wbuf, c->wbuf_pagesize, 0);
|
|
-
|
|
- pr_warn("Read back:\n");
|
|
- print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
|
|
- c->wbuf_verify, c->wbuf_pagesize, 0);
|
|
-
|
|
- return -EIO;
|
|
-}
|
|
-#else
|
|
-#define jffs2_verify_write(c,b,o) (0)
|
|
-#endif
|
|
-
|
|
-/* Recover from failure to write wbuf. Recover the nodes up to the
|
|
- * wbuf, not the one which we were starting to try to write. */
|
|
-
|
|
-static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
|
|
-{
|
|
- struct jffs2_eraseblock *jeb, *new_jeb;
|
|
- struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
|
|
- size_t retlen;
|
|
- int ret;
|
|
- int nr_refile = 0;
|
|
- unsigned char *buf;
|
|
- uint32_t start, end, ofs, len;
|
|
-
|
|
- jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
|
|
-
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- if (c->wbuf_ofs % c->mtd->erasesize)
|
|
- jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
|
|
- else
|
|
- jffs2_block_refile(c, jeb, REFILE_ANYWAY);
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
-
|
|
- BUG_ON(!ref_obsolete(jeb->last_node));
|
|
-
|
|
- /* Find the first node to be recovered, by skipping over every
|
|
- node which ends before the wbuf starts, or which is obsolete. */
|
|
- for (next = raw = jeb->first_node; next; raw = next) {
|
|
- next = ref_next(raw);
|
|
-
|
|
- if (ref_obsolete(raw) ||
|
|
- (next && ref_offset(next) <= c->wbuf_ofs)) {
|
|
- dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
|
|
- ref_offset(raw), ref_flags(raw),
|
|
- (ref_offset(raw) + ref_totlen(c, jeb, raw)),
|
|
- c->wbuf_ofs);
|
|
- continue;
|
|
- }
|
|
- dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
|
|
- ref_offset(raw), ref_flags(raw),
|
|
- (ref_offset(raw) + ref_totlen(c, jeb, raw)));
|
|
-
|
|
- first_raw = raw;
|
|
- break;
|
|
- }
|
|
-
|
|
- if (!first_raw) {
|
|
- /* All nodes were obsolete. Nothing to recover. */
|
|
- jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
|
|
- c->wbuf_len = 0;
|
|
- return;
|
|
- }
|
|
-
|
|
- start = ref_offset(first_raw);
|
|
- end = ref_offset(jeb->last_node);
|
|
- nr_refile = 1;
|
|
-
|
|
- /* Count the number of refs which need to be copied */
|
|
- while ((raw = ref_next(raw)) != jeb->last_node)
|
|
- nr_refile++;
|
|
-
|
|
- dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
|
|
- start, end, end - start, nr_refile);
|
|
-
|
|
- buf = NULL;
|
|
- if (start < c->wbuf_ofs) {
|
|
- /* First affected node was already partially written.
|
|
- * Attempt to reread the old data into our buffer. */
|
|
-
|
|
- buf = kmalloc(end - start, GFP_KERNEL);
|
|
- if (!buf) {
|
|
- pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
|
|
-
|
|
- goto read_failed;
|
|
- }
|
|
-
|
|
- /* Do the read... */
|
|
- ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
|
|
- buf);
|
|
-
|
|
- /* ECC recovered ? */
|
|
- if ((ret == -EUCLEAN || ret == -EBADMSG) &&
|
|
- (retlen == c->wbuf_ofs - start))
|
|
- ret = 0;
|
|
-
|
|
- if (ret || retlen != c->wbuf_ofs - start) {
|
|
- pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
|
|
-
|
|
- kfree(buf);
|
|
- buf = NULL;
|
|
- read_failed:
|
|
- first_raw = ref_next(first_raw);
|
|
- nr_refile--;
|
|
- while (first_raw && ref_obsolete(first_raw)) {
|
|
- first_raw = ref_next(first_raw);
|
|
- nr_refile--;
|
|
- }
|
|
-
|
|
- /* If this was the only node to be recovered, give up */
|
|
- if (!first_raw) {
|
|
- c->wbuf_len = 0;
|
|
- return;
|
|
- }
|
|
-
|
|
- /* It wasn't. Go on and try to recover nodes complete in the wbuf */
|
|
- start = ref_offset(first_raw);
|
|
- dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
|
|
- start, end, end - start, nr_refile);
|
|
-
|
|
- } else {
|
|
- /* Read succeeded. Copy the remaining data from the wbuf */
|
|
- memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
|
|
- }
|
|
- }
|
|
- /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
|
|
- Either 'buf' contains the data, or we find it in the wbuf */
|
|
-
|
|
- /* ... and get an allocation of space from a shiny new block instead */
|
|
- ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
|
|
- if (ret) {
|
|
- pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
|
|
- kfree(buf);
|
|
- return;
|
|
- }
|
|
-
|
|
- /* The summary is not recovered, so it must be disabled for this erase block */
|
|
- jffs2_sum_disable_collecting(c->summary);
|
|
-
|
|
- ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
|
|
- if (ret) {
|
|
- pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
|
|
- kfree(buf);
|
|
- return;
|
|
- }
|
|
-
|
|
- ofs = write_ofs(c);
|
|
-
|
|
- if (end-start >= c->wbuf_pagesize) {
|
|
- /* Need to do another write immediately, but it's possible
|
|
- that this is just because the wbuf itself is completely
|
|
- full, and there's nothing earlier read back from the
|
|
- flash. Hence 'buf' isn't necessarily what we're writing
|
|
- from. */
|
|
- unsigned char *rewrite_buf = buf?:c->wbuf;
|
|
- uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
|
|
-
|
|
- jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
|
|
- towrite, ofs);
|
|
-
|
|
-#ifdef BREAKMEHEADER
|
|
- static int breakme;
|
|
- if (breakme++ == 20) {
|
|
- pr_notice("Faking write error at 0x%08x\n", ofs);
|
|
- breakme = 0;
|
|
- mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
|
|
- ret = -EIO;
|
|
- } else
|
|
-#endif
|
|
- ret = mtd_write(c->mtd, ofs, towrite, &retlen,
|
|
- rewrite_buf);
|
|
-
|
|
- if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
|
|
- /* Argh. We tried. Really we did. */
|
|
- pr_crit("Recovery of wbuf failed due to a second write error\n");
|
|
- kfree(buf);
|
|
-
|
|
- if (retlen)
|
|
- jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
|
|
-
|
|
- return;
|
|
- }
|
|
- pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
|
|
-
|
|
- c->wbuf_len = (end - start) - towrite;
|
|
- c->wbuf_ofs = ofs + towrite;
|
|
- memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
|
|
- /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
|
|
- } else {
|
|
- /* OK, now we're left with the dregs in whichever buffer we're using */
|
|
- if (buf) {
|
|
- memcpy(c->wbuf, buf, end-start);
|
|
- } else {
|
|
- memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
|
|
- }
|
|
- c->wbuf_ofs = ofs;
|
|
- c->wbuf_len = end - start;
|
|
- }
|
|
-
|
|
- /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
|
|
- new_jeb = &c->blocks[ofs / c->sector_size];
|
|
-
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
|
|
- uint32_t rawlen = ref_totlen(c, jeb, raw);
|
|
- struct jffs2_inode_cache *ic;
|
|
- struct jffs2_raw_node_ref *new_ref;
|
|
- struct jffs2_raw_node_ref **adjust_ref = NULL;
|
|
- struct jffs2_inode_info *f = NULL;
|
|
-
|
|
- jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
|
|
- rawlen, ref_offset(raw), ref_flags(raw), ofs);
|
|
-
|
|
- ic = jffs2_raw_ref_to_ic(raw);
|
|
-
|
|
- /* Ick. This XATTR mess should be fixed shortly... */
|
|
- if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
|
|
- struct jffs2_xattr_datum *xd = (void *)ic;
|
|
- BUG_ON(xd->node != raw);
|
|
- adjust_ref = &xd->node;
|
|
- raw->next_in_ino = NULL;
|
|
- ic = NULL;
|
|
- } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
|
|
- struct jffs2_xattr_datum *xr = (void *)ic;
|
|
- BUG_ON(xr->node != raw);
|
|
- adjust_ref = &xr->node;
|
|
- raw->next_in_ino = NULL;
|
|
- ic = NULL;
|
|
- } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
|
|
- struct jffs2_raw_node_ref **p = &ic->nodes;
|
|
-
|
|
- /* Remove the old node from the per-inode list */
|
|
- while (*p && *p != (void *)ic) {
|
|
- if (*p == raw) {
|
|
- (*p) = (raw->next_in_ino);
|
|
- raw->next_in_ino = NULL;
|
|
- break;
|
|
- }
|
|
- p = &((*p)->next_in_ino);
|
|
- }
|
|
-
|
|
- if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
|
|
- /* If it's an in-core inode, then we have to adjust any
|
|
- full_dirent or full_dnode structure to point to the
|
|
- new version instead of the old */
|
|
- f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink);
|
|
- if (IS_ERR(f)) {
|
|
- /* Should never happen; it _must_ be present */
|
|
- JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
|
|
- ic->ino, PTR_ERR(f));
|
|
- BUG();
|
|
- }
|
|
- /* We don't lock f->sem. There's a number of ways we could
|
|
- end up in here with it already being locked, and nobody's
|
|
- going to modify it on us anyway because we hold the
|
|
- alloc_sem. We're only changing one ->raw pointer too,
|
|
- which we can get away with without upsetting readers. */
|
|
- adjust_ref = jffs2_incore_replace_raw(c, f, raw,
|
|
- (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
|
|
- } else if (unlikely(ic->state != INO_STATE_PRESENT &&
|
|
- ic->state != INO_STATE_CHECKEDABSENT &&
|
|
- ic->state != INO_STATE_GC)) {
|
|
- JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
|
|
- BUG();
|
|
- }
|
|
- }
|
|
-
|
|
- new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
|
|
-
|
|
- if (adjust_ref) {
|
|
- BUG_ON(*adjust_ref != raw);
|
|
- *adjust_ref = new_ref;
|
|
- }
|
|
- if (f)
|
|
- jffs2_gc_release_inode(c, f);
|
|
-
|
|
- if (!ref_obsolete(raw)) {
|
|
- jeb->dirty_size += rawlen;
|
|
- jeb->used_size -= rawlen;
|
|
- c->dirty_size += rawlen;
|
|
- c->used_size -= rawlen;
|
|
- raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
|
|
- BUG_ON(raw->next_in_ino);
|
|
- }
|
|
- ofs += rawlen;
|
|
- }
|
|
-
|
|
- kfree(buf);
|
|
-
|
|
- /* Fix up the original jeb now it's on the bad_list */
|
|
- if (first_raw == jeb->first_node) {
|
|
- jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
|
|
- jeb->offset);
|
|
- list_move(&jeb->list, &c->erase_pending_list);
|
|
- c->nr_erasing_blocks++;
|
|
- jffs2_garbage_collect_trigger(c);
|
|
- }
|
|
-
|
|
- jffs2_dbg_acct_sanity_check_nolock(c, jeb);
|
|
- jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
|
|
-
|
|
- jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
|
|
- jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
|
|
-
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
-
|
|
- jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
|
|
- c->wbuf_ofs, c->wbuf_len);
|
|
-
|
|
-}
|
|
-
|
|
-/* Meaning of pad argument:
|
|
- 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
|
|
- 1: Pad, do not adjust nextblock free_size
|
|
- 2: Pad, adjust nextblock free_size
|
|
-*/
|
|
-#define NOPAD 0
|
|
-#define PAD_NOACCOUNT 1
|
|
-#define PAD_ACCOUNTING 2
|
|
-
|
|
-static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
|
|
-{
|
|
- struct jffs2_eraseblock *wbuf_jeb;
|
|
- int ret;
|
|
- size_t retlen;
|
|
-
|
|
- /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
|
|
- del_timer() the timer we never initialised. */
|
|
- if (!jffs2_is_writebuffered(c))
|
|
- return 0;
|
|
-
|
|
- if (!mutex_is_locked(&c->alloc_sem)) {
|
|
- pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
|
|
- BUG();
|
|
- }
|
|
-
|
|
- if (!c->wbuf_len) /* already checked c->wbuf above */
|
|
- return 0;
|
|
-
|
|
- wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
|
|
- if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
|
|
- return -ENOMEM;
|
|
-
|
|
- /* claim remaining space on the page
|
|
- this happens, if we have a change to a new block,
|
|
- or if fsync forces us to flush the writebuffer.
|
|
- if we have a switch to next page, we will not have
|
|
- enough remaining space for this.
|
|
- */
|
|
- if (pad ) {
|
|
- c->wbuf_len = PAD(c->wbuf_len);
|
|
-
|
|
- /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
|
|
- with 8 byte page size */
|
|
- memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
|
|
-
|
|
- if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
|
|
- struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
|
|
- padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
|
|
- padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
|
|
- padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
|
|
- padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
|
|
- }
|
|
- }
|
|
- /* else jffs2_flash_writev has actually filled in the rest of the
|
|
- buffer for us, and will deal with the node refs etc. later. */
|
|
-
|
|
-#ifdef BREAKME
|
|
- static int breakme;
|
|
- if (breakme++ == 20) {
|
|
- pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
|
|
- breakme = 0;
|
|
- mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
|
|
- brokenbuf);
|
|
- ret = -EIO;
|
|
- } else
|
|
-#endif
|
|
-
|
|
- ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
|
|
- &retlen, c->wbuf);
|
|
-
|
|
- if (ret) {
|
|
- pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
|
|
- goto wfail;
|
|
- } else if (retlen != c->wbuf_pagesize) {
|
|
- pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
|
|
- retlen, c->wbuf_pagesize);
|
|
- ret = -EIO;
|
|
- goto wfail;
|
|
- } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
|
|
- wfail:
|
|
- jffs2_wbuf_recover(c);
|
|
-
|
|
- return ret;
|
|
- }
|
|
-
|
|
- /* Adjust free size of the block if we padded. */
|
|
- if (pad) {
|
|
- uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
|
|
-
|
|
- jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
|
|
- (wbuf_jeb == c->nextblock) ? "next" : "",
|
|
- wbuf_jeb->offset);
|
|
-
|
|
- /* wbuf_pagesize - wbuf_len is the amount of space that's to be
|
|
- padded. If there is less free space in the block than that,
|
|
- something screwed up */
|
|
- if (wbuf_jeb->free_size < waste) {
|
|
- pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
|
|
- c->wbuf_ofs, c->wbuf_len, waste);
|
|
- pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
|
|
- wbuf_jeb->offset, wbuf_jeb->free_size);
|
|
- BUG();
|
|
- }
|
|
-
|
|
- spin_lock(&c->erase_completion_lock);
|
|
-
|
|
- jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
|
|
- /* FIXME: that made it count as dirty. Convert to wasted */
|
|
- wbuf_jeb->dirty_size -= waste;
|
|
- c->dirty_size -= waste;
|
|
- wbuf_jeb->wasted_size += waste;
|
|
- c->wasted_size += waste;
|
|
- } else
|
|
- spin_lock(&c->erase_completion_lock);
|
|
-
|
|
- /* Stick any now-obsoleted blocks on the erase_pending_list */
|
|
- jffs2_refile_wbuf_blocks(c);
|
|
- jffs2_clear_wbuf_ino_list(c);
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
-
|
|
- memset(c->wbuf,0xff,c->wbuf_pagesize);
|
|
- /* adjust write buffer offset, else we get a non contiguous write bug */
|
|
- c->wbuf_ofs += c->wbuf_pagesize;
|
|
- c->wbuf_len = 0;
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/* Trigger garbage collection to flush the write-buffer.
|
|
- If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
|
|
- outstanding. If ino arg non-zero, do it only if a write for the
|
|
- given inode is outstanding. */
|
|
-int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
|
|
-{
|
|
- uint32_t old_wbuf_ofs;
|
|
- uint32_t old_wbuf_len;
|
|
- int ret = 0;
|
|
-
|
|
- jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
|
|
-
|
|
- if (!c->wbuf)
|
|
- return 0;
|
|
-
|
|
- mutex_lock(&c->alloc_sem);
|
|
- if (!jffs2_wbuf_pending_for_ino(c, ino)) {
|
|
- jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
|
|
- mutex_unlock(&c->alloc_sem);
|
|
- return 0;
|
|
- }
|
|
-
|
|
- old_wbuf_ofs = c->wbuf_ofs;
|
|
- old_wbuf_len = c->wbuf_len;
|
|
-
|
|
- if (c->unchecked_size) {
|
|
- /* GC won't make any progress for a while */
|
|
- jffs2_dbg(1, "%s(): padding. Not finished checking\n",
|
|
- __func__);
|
|
- down_write(&c->wbuf_sem);
|
|
- ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
|
|
- /* retry flushing wbuf in case jffs2_wbuf_recover
|
|
- left some data in the wbuf */
|
|
- if (ret)
|
|
- ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
|
|
- up_write(&c->wbuf_sem);
|
|
- } else while (old_wbuf_len &&
|
|
- old_wbuf_ofs == c->wbuf_ofs) {
|
|
-
|
|
- mutex_unlock(&c->alloc_sem);
|
|
-
|
|
- jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
|
|
-
|
|
- ret = jffs2_garbage_collect_pass(c);
|
|
- if (ret) {
|
|
- /* GC failed. Flush it with padding instead */
|
|
- mutex_lock(&c->alloc_sem);
|
|
- down_write(&c->wbuf_sem);
|
|
- ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
|
|
- /* retry flushing wbuf in case jffs2_wbuf_recover
|
|
- left some data in the wbuf */
|
|
- if (ret)
|
|
- ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
|
|
- up_write(&c->wbuf_sem);
|
|
- break;
|
|
- }
|
|
- mutex_lock(&c->alloc_sem);
|
|
- }
|
|
-
|
|
- jffs2_dbg(1, "%s(): ends...\n", __func__);
|
|
-
|
|
- mutex_unlock(&c->alloc_sem);
|
|
- return ret;
|
|
-}
|
|
-
|
|
-/* Pad write-buffer to end and write it, wasting space. */
|
|
-int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
|
|
-{
|
|
- int ret;
|
|
-
|
|
- if (!c->wbuf)
|
|
- return 0;
|
|
-
|
|
- down_write(&c->wbuf_sem);
|
|
- ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
|
|
- /* retry - maybe wbuf recover left some data in wbuf. */
|
|
- if (ret)
|
|
- ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
|
|
- up_write(&c->wbuf_sem);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
|
|
- size_t len)
|
|
-{
|
|
- if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
|
|
- return 0;
|
|
-
|
|
- if (len > (c->wbuf_pagesize - c->wbuf_len))
|
|
- len = c->wbuf_pagesize - c->wbuf_len;
|
|
- memcpy(c->wbuf + c->wbuf_len, buf, len);
|
|
- c->wbuf_len += (uint32_t) len;
|
|
- return len;
|
|
-}
|
|
-
|
|
-int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
|
|
- unsigned long count, loff_t to, size_t *retlen,
|
|
- uint32_t ino)
|
|
-{
|
|
- struct jffs2_eraseblock *jeb;
|
|
- size_t wbuf_retlen, donelen = 0;
|
|
- uint32_t outvec_to = to;
|
|
- int ret, invec;
|
|
-
|
|
- /* If not writebuffered flash, don't bother */
|
|
- if (!jffs2_is_writebuffered(c))
|
|
- return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
|
|
-
|
|
- down_write(&c->wbuf_sem);
|
|
-
|
|
- /* If wbuf_ofs is not initialized, set it to target address */
|
|
- if (c->wbuf_ofs == 0xFFFFFFFF) {
|
|
- c->wbuf_ofs = PAGE_DIV(to);
|
|
- c->wbuf_len = PAGE_MOD(to);
|
|
- memset(c->wbuf,0xff,c->wbuf_pagesize);
|
|
- }
|
|
-
|
|
- /*
|
|
- * Sanity checks on target address. It's permitted to write
|
|
- * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
|
|
- * write at the beginning of a new erase block. Anything else,
|
|
- * and you die. New block starts at xxx000c (0-b = block
|
|
- * header)
|
|
- */
|
|
- if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
|
|
- /* It's a write to a new block */
|
|
- if (c->wbuf_len) {
|
|
- jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
|
|
- __func__, (unsigned long)to, c->wbuf_ofs);
|
|
- ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
|
|
- if (ret)
|
|
- goto outerr;
|
|
- }
|
|
- /* set pointer to new block */
|
|
- c->wbuf_ofs = PAGE_DIV(to);
|
|
- c->wbuf_len = PAGE_MOD(to);
|
|
- }
|
|
-
|
|
- if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
|
|
- /* We're not writing immediately after the writebuffer. Bad. */
|
|
- pr_crit("%s(): Non-contiguous write to %08lx\n",
|
|
- __func__, (unsigned long)to);
|
|
- if (c->wbuf_len)
|
|
- pr_crit("wbuf was previously %08x-%08x\n",
|
|
- c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
|
|
- BUG();
|
|
- }
|
|
-
|
|
- /* adjust alignment offset */
|
|
- if (c->wbuf_len != PAGE_MOD(to)) {
|
|
- c->wbuf_len = PAGE_MOD(to);
|
|
- /* take care of alignment to next page */
|
|
- if (!c->wbuf_len) {
|
|
- c->wbuf_len = c->wbuf_pagesize;
|
|
- ret = __jffs2_flush_wbuf(c, NOPAD);
|
|
- if (ret)
|
|
- goto outerr;
|
|
- }
|
|
- }
|
|
-
|
|
- for (invec = 0; invec < count; invec++) {
|
|
- int vlen = invecs[invec].iov_len;
|
|
- uint8_t *v = invecs[invec].iov_base;
|
|
-
|
|
- wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
|
|
-
|
|
- if (c->wbuf_len == c->wbuf_pagesize) {
|
|
- ret = __jffs2_flush_wbuf(c, NOPAD);
|
|
- if (ret)
|
|
- goto outerr;
|
|
- }
|
|
- vlen -= wbuf_retlen;
|
|
- outvec_to += wbuf_retlen;
|
|
- donelen += wbuf_retlen;
|
|
- v += wbuf_retlen;
|
|
-
|
|
- if (vlen >= c->wbuf_pagesize) {
|
|
- ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
|
|
- &wbuf_retlen, v);
|
|
- if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
|
|
- goto outfile;
|
|
-
|
|
- vlen -= wbuf_retlen;
|
|
- outvec_to += wbuf_retlen;
|
|
- c->wbuf_ofs = outvec_to;
|
|
- donelen += wbuf_retlen;
|
|
- v += wbuf_retlen;
|
|
- }
|
|
-
|
|
- wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
|
|
- if (c->wbuf_len == c->wbuf_pagesize) {
|
|
- ret = __jffs2_flush_wbuf(c, NOPAD);
|
|
- if (ret)
|
|
- goto outerr;
|
|
- }
|
|
-
|
|
- outvec_to += wbuf_retlen;
|
|
- donelen += wbuf_retlen;
|
|
- }
|
|
-
|
|
- /*
|
|
- * If there's a remainder in the wbuf and it's a non-GC write,
|
|
- * remember that the wbuf affects this ino
|
|
- */
|
|
- *retlen = donelen;
|
|
-
|
|
- if (jffs2_sum_active()) {
|
|
- int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
|
|
- if (res)
|
|
- return res;
|
|
- }
|
|
-
|
|
- if (c->wbuf_len && ino)
|
|
- jffs2_wbuf_dirties_inode(c, ino);
|
|
-
|
|
- ret = 0;
|
|
- up_write(&c->wbuf_sem);
|
|
- return ret;
|
|
-
|
|
-outfile:
|
|
- /*
|
|
- * At this point we have no problem, c->wbuf is empty. However
|
|
- * refile nextblock to avoid writing again to same address.
|
|
- */
|
|
-
|
|
- spin_lock(&c->erase_completion_lock);
|
|
-
|
|
- jeb = &c->blocks[outvec_to / c->sector_size];
|
|
- jffs2_block_refile(c, jeb, REFILE_ANYWAY);
|
|
-
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
-
|
|
-outerr:
|
|
- *retlen = 0;
|
|
- up_write(&c->wbuf_sem);
|
|
- return ret;
|
|
-}
|
|
-
|
|
-/*
|
|
- * This is the entry for flash write.
|
|
- * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
|
|
-*/
|
|
-int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
|
|
- size_t *retlen, const u_char *buf)
|
|
-{
|
|
- struct kvec vecs[1];
|
|
-
|
|
- if (!jffs2_is_writebuffered(c))
|
|
- return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
|
|
-
|
|
- vecs[0].iov_base = (unsigned char *) buf;
|
|
- vecs[0].iov_len = len;
|
|
- return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
|
|
-}
|
|
-
|
|
-/*
|
|
- Handle readback from writebuffer and ECC failure return
|
|
-*/
|
|
-int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
|
|
-{
|
|
- loff_t orbf = 0, owbf = 0, lwbf = 0;
|
|
- int ret;
|
|
-
|
|
- if (!jffs2_is_writebuffered(c))
|
|
- return mtd_read(c->mtd, ofs, len, retlen, buf);
|
|
-
|
|
- /* Read flash */
|
|
- down_read(&c->wbuf_sem);
|
|
- ret = mtd_read(c->mtd, ofs, len, retlen, buf);
|
|
-
|
|
- if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
|
|
- if (ret == -EBADMSG)
|
|
- pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
|
|
- len, ofs);
|
|
- /*
|
|
- * We have the raw data without ECC correction in the buffer,
|
|
- * maybe we are lucky and all data or parts are correct. We
|
|
- * check the node. If data are corrupted node check will sort
|
|
- * it out. We keep this block, it will fail on write or erase
|
|
- * and the we mark it bad. Or should we do that now? But we
|
|
- * should give him a chance. Maybe we had a system crash or
|
|
- * power loss before the ecc write or a erase was completed.
|
|
- * So we return success. :)
|
|
- */
|
|
- ret = 0;
|
|
- }
|
|
-
|
|
- /* if no writebuffer available or write buffer empty, return */
|
|
- if (!c->wbuf_pagesize || !c->wbuf_len)
|
|
- goto exit;
|
|
-
|
|
- /* if we read in a different block, return */
|
|
- if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
|
|
- goto exit;
|
|
-
|
|
- if (ofs >= c->wbuf_ofs) {
|
|
- owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
|
|
- if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
|
|
- goto exit;
|
|
- lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
|
|
- if (lwbf > len)
|
|
- lwbf = len;
|
|
- } else {
|
|
- orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
|
|
- if (orbf > len) /* is write beyond write buffer ? */
|
|
- goto exit;
|
|
- lwbf = len - orbf; /* number of bytes to copy */
|
|
- if (lwbf > c->wbuf_len)
|
|
- lwbf = c->wbuf_len;
|
|
- }
|
|
- if (lwbf > 0)
|
|
- memcpy(buf+orbf,c->wbuf+owbf,lwbf);
|
|
-
|
|
-exit:
|
|
- up_read(&c->wbuf_sem);
|
|
- return ret;
|
|
-}
|
|
-
|
|
-#define NR_OOB_SCAN_PAGES 4
|
|
-
|
|
-/* For historical reasons we use only 8 bytes for OOB clean marker */
|
|
-#define OOB_CM_SIZE 8
|
|
-
|
|
-static const struct jffs2_unknown_node oob_cleanmarker =
|
|
-{
|
|
- .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
|
|
- .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
|
|
- .totlen = constant_cpu_to_je32(8)
|
|
-};
|
|
-
|
|
-/*
|
|
- * Check, if the out of band area is empty. This function knows about the clean
|
|
- * marker and if it is present in OOB, treats the OOB as empty anyway.
|
|
- */
|
|
-int jffs2_check_oob_empty(struct jffs2_sb_info *c,
|
|
- struct jffs2_eraseblock *jeb, int mode)
|
|
-{
|
|
- int i, ret;
|
|
- int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
|
|
- struct mtd_oob_ops ops;
|
|
-
|
|
- ops.mode = MTD_OPS_AUTO_OOB;
|
|
- ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
|
|
- ops.oobbuf = c->oobbuf;
|
|
- ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
|
|
- ops.datbuf = NULL;
|
|
-
|
|
- ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
|
|
- if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
|
|
- pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
|
|
- jeb->offset, ops.ooblen, ops.oobretlen, ret);
|
|
- if (!ret || mtd_is_bitflip(ret))
|
|
- ret = -EIO;
|
|
- return ret;
|
|
- }
|
|
-
|
|
- for(i = 0; i < ops.ooblen; i++) {
|
|
- if (mode && i < cmlen)
|
|
- /* Yeah, we know about the cleanmarker */
|
|
- continue;
|
|
-
|
|
- if (ops.oobbuf[i] != 0xFF) {
|
|
- jffs2_dbg(2, "Found %02x at %x in OOB for "
|
|
- "%08x\n", ops.oobbuf[i], i, jeb->offset);
|
|
- return 1;
|
|
- }
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/*
|
|
- * Check for a valid cleanmarker.
|
|
- * Returns: 0 if a valid cleanmarker was found
|
|
- * 1 if no cleanmarker was found
|
|
- * negative error code if an error occurred
|
|
- */
|
|
-int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
|
|
- struct jffs2_eraseblock *jeb)
|
|
-{
|
|
- struct mtd_oob_ops ops;
|
|
- int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
|
|
-
|
|
- ops.mode = MTD_OPS_AUTO_OOB;
|
|
- ops.ooblen = cmlen;
|
|
- ops.oobbuf = c->oobbuf;
|
|
- ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
|
|
- ops.datbuf = NULL;
|
|
-
|
|
- ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
|
|
- if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
|
|
- pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
|
|
- jeb->offset, ops.ooblen, ops.oobretlen, ret);
|
|
- if (!ret || mtd_is_bitflip(ret))
|
|
- ret = -EIO;
|
|
- return ret;
|
|
- }
|
|
-
|
|
- return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
|
|
-}
|
|
-
|
|
-int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
|
|
- struct jffs2_eraseblock *jeb)
|
|
-{
|
|
- int ret;
|
|
- struct mtd_oob_ops ops;
|
|
- int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
|
|
-
|
|
- ops.mode = MTD_OPS_AUTO_OOB;
|
|
- ops.ooblen = cmlen;
|
|
- ops.oobbuf = (uint8_t *)&oob_cleanmarker;
|
|
- ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
|
|
- ops.datbuf = NULL;
|
|
-
|
|
- ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
|
|
- if (ret || ops.oobretlen != ops.ooblen) {
|
|
- pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
|
|
- jeb->offset, ops.ooblen, ops.oobretlen, ret);
|
|
- if (!ret)
|
|
- ret = -EIO;
|
|
- return ret;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/*
|
|
- * On NAND we try to mark this block bad. If the block was erased more
|
|
- * than MAX_ERASE_FAILURES we mark it finally bad.
|
|
- * Don't care about failures. This block remains on the erase-pending
|
|
- * or badblock list as long as nobody manipulates the flash with
|
|
- * a bootloader or something like that.
|
|
- */
|
|
-
|
|
-int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
|
|
-{
|
|
- int ret;
|
|
-
|
|
- /* if the count is < max, we try to write the counter to the 2nd page oob area */
|
|
- if( ++jeb->bad_count < MAX_ERASE_FAILURES)
|
|
- return 0;
|
|
-
|
|
- pr_warn("marking eraseblock at %08x as bad\n", bad_offset);
|
|
- ret = mtd_block_markbad(c->mtd, bad_offset);
|
|
-
|
|
- if (ret) {
|
|
- jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
|
|
- __func__, jeb->offset, ret);
|
|
- return ret;
|
|
- }
|
|
- return 1;
|
|
-}
|
|
-
|
|
-static struct jffs2_sb_info *work_to_sb(struct work_struct *work)
|
|
-{
|
|
- struct delayed_work *dwork;
|
|
-
|
|
- dwork = to_delayed_work(work);
|
|
- return container_of(dwork, struct jffs2_sb_info, wbuf_dwork);
|
|
-}
|
|
-
|
|
-static void delayed_wbuf_sync(struct work_struct *work)
|
|
-{
|
|
- struct jffs2_sb_info *c = work_to_sb(work);
|
|
- struct super_block *sb = OFNI_BS_2SFFJ(c);
|
|
-
|
|
- if (!sb_rdonly(sb)) {
|
|
- jffs2_dbg(1, "%s()\n", __func__);
|
|
- jffs2_flush_wbuf_gc(c, 0);
|
|
- }
|
|
-}
|
|
-
|
|
-void jffs2_dirty_trigger(struct jffs2_sb_info *c)
|
|
-{
|
|
- struct super_block *sb = OFNI_BS_2SFFJ(c);
|
|
- unsigned long delay;
|
|
-
|
|
- if (sb_rdonly(sb))
|
|
- return;
|
|
-
|
|
- delay = msecs_to_jiffies(dirty_writeback_interval * 10);
|
|
- if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay))
|
|
- jffs2_dbg(1, "%s()\n", __func__);
|
|
-}
|
|
-
|
|
-int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
|
|
-{
|
|
- if (!c->mtd->oobsize)
|
|
- return 0;
|
|
-
|
|
- /* Cleanmarker is out-of-band, so inline size zero */
|
|
- c->cleanmarker_size = 0;
|
|
-
|
|
- if (c->mtd->oobavail == 0) {
|
|
- pr_err("inconsistent device description\n");
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- jffs2_dbg(1, "using OOB on NAND\n");
|
|
-
|
|
- c->oobavail = c->mtd->oobavail;
|
|
-
|
|
- /* Initialise write buffer */
|
|
- init_rwsem(&c->wbuf_sem);
|
|
- INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
|
|
- c->wbuf_pagesize = c->mtd->writesize;
|
|
- c->wbuf_ofs = 0xFFFFFFFF;
|
|
-
|
|
- c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
|
|
- if (!c->wbuf)
|
|
- return -ENOMEM;
|
|
-
|
|
- c->oobbuf = kmalloc_array(NR_OOB_SCAN_PAGES, c->oobavail, GFP_KERNEL);
|
|
- if (!c->oobbuf) {
|
|
- kfree(c->wbuf);
|
|
- return -ENOMEM;
|
|
- }
|
|
-
|
|
-#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
|
|
- c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
|
|
- if (!c->wbuf_verify) {
|
|
- kfree(c->oobbuf);
|
|
- kfree(c->wbuf);
|
|
- return -ENOMEM;
|
|
- }
|
|
-#endif
|
|
- return 0;
|
|
-}
|
|
-
|
|
-void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
|
|
-{
|
|
-#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
|
|
- kfree(c->wbuf_verify);
|
|
-#endif
|
|
- kfree(c->wbuf);
|
|
- kfree(c->oobbuf);
|
|
-}
|
|
-
|
|
-int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
|
|
- c->cleanmarker_size = 0; /* No cleanmarkers needed */
|
|
-
|
|
- /* Initialize write buffer */
|
|
- init_rwsem(&c->wbuf_sem);
|
|
- INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
|
|
- c->wbuf_pagesize = c->mtd->erasesize;
|
|
-
|
|
- /* Find a suitable c->sector_size
|
|
- * - Not too much sectors
|
|
- * - Sectors have to be at least 4 K + some bytes
|
|
- * - All known dataflashes have erase sizes of 528 or 1056
|
|
- * - we take at least 8 eraseblocks and want to have at least 8K size
|
|
- * - The concatenation should be a power of 2
|
|
- */
|
|
-
|
|
- c->sector_size = 8 * c->mtd->erasesize;
|
|
-
|
|
- while (c->sector_size < 8192) {
|
|
- c->sector_size *= 2;
|
|
- }
|
|
-
|
|
- /* It may be necessary to adjust the flash size */
|
|
- c->flash_size = c->mtd->size;
|
|
-
|
|
- if ((c->flash_size % c->sector_size) != 0) {
|
|
- c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
|
|
- pr_warn("flash size adjusted to %dKiB\n", c->flash_size);
|
|
- }
|
|
-
|
|
- c->wbuf_ofs = 0xFFFFFFFF;
|
|
- c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
|
|
- if (!c->wbuf)
|
|
- return -ENOMEM;
|
|
-
|
|
-#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
|
|
- c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
|
|
- if (!c->wbuf_verify) {
|
|
- kfree(c->wbuf);
|
|
- return -ENOMEM;
|
|
- }
|
|
-#endif
|
|
-
|
|
- pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
|
|
- c->wbuf_pagesize, c->sector_size);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
|
|
-#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
|
|
- kfree(c->wbuf_verify);
|
|
-#endif
|
|
- kfree(c->wbuf);
|
|
-}
|
|
-
|
|
-int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
|
|
- /* Cleanmarker currently occupies whole programming regions,
|
|
- * either one or 2 for 8Byte STMicro flashes. */
|
|
- c->cleanmarker_size = max(16u, c->mtd->writesize);
|
|
-
|
|
- /* Initialize write buffer */
|
|
- init_rwsem(&c->wbuf_sem);
|
|
- INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
|
|
-
|
|
- c->wbuf_pagesize = c->mtd->writesize;
|
|
- c->wbuf_ofs = 0xFFFFFFFF;
|
|
-
|
|
- c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
|
|
- if (!c->wbuf)
|
|
- return -ENOMEM;
|
|
-
|
|
-#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
|
|
- c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
|
|
- if (!c->wbuf_verify) {
|
|
- kfree(c->wbuf);
|
|
- return -ENOMEM;
|
|
- }
|
|
-#endif
|
|
- return 0;
|
|
-}
|
|
-
|
|
-void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
|
|
-#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
|
|
- kfree(c->wbuf_verify);
|
|
-#endif
|
|
- kfree(c->wbuf);
|
|
-}
|
|
-
|
|
-int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
|
|
- c->cleanmarker_size = 0;
|
|
-
|
|
- if (c->mtd->writesize == 1)
|
|
- /* We do not need write-buffer */
|
|
- return 0;
|
|
-
|
|
- init_rwsem(&c->wbuf_sem);
|
|
- INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
|
|
-
|
|
- c->wbuf_pagesize = c->mtd->writesize;
|
|
- c->wbuf_ofs = 0xFFFFFFFF;
|
|
- c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
|
|
- if (!c->wbuf)
|
|
- return -ENOMEM;
|
|
-
|
|
- pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
|
|
- c->wbuf_pagesize, c->sector_size);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) {
|
|
- kfree(c->wbuf);
|
|
-}
|
|
diff -Nupr old/fs/jffs2/write.c new/fs/jffs2/write.c
|
|
--- old/fs/jffs2/write.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/write.c 2022-05-09 20:07:33.520000000 +0800
|
|
@@ -9,16 +9,15 @@
|
|
*
|
|
*/
|
|
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
+#include <dirent.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/fs.h>
|
|
-#include <linux/crc32.h>
|
|
#include <linux/pagemap.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
+#include <linux/semaphore.h>
|
|
+#include "mtd_dev.h"
|
|
#include "nodelist.h"
|
|
#include "compr.h"
|
|
-
|
|
+#include "los_crc32.h"
|
|
|
|
int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
|
|
uint32_t mode, struct jffs2_raw_inode *ri)
|
|
@@ -30,8 +29,6 @@ int jffs2_do_new_inode(struct jffs2_sb_i
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- memset(ic, 0, sizeof(*ic));
|
|
-
|
|
f->inocache = ic;
|
|
f->inocache->pino_nlink = 1; /* Will be overwritten shortly for directories */
|
|
f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
|
|
@@ -69,8 +66,10 @@ struct jffs2_full_dnode *jffs2_write_dno
|
|
int retried = 0;
|
|
unsigned long cnt = 2;
|
|
|
|
- D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) {
|
|
- pr_crit("Eep. CRC not correct in jffs2_write_dnode()\n");
|
|
+ D1(if (je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) {
|
|
+ printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dnode(), je32_to_cpu(ri->hdr_crc):%d, "
|
|
+ "crc32(0, ri, sizeof(struct jffs2_unknown_node) - 4):%d\n", je32_to_cpu(ri->hdr_crc),
|
|
+ crc32(0, ri, sizeof(struct jffs2_unknown_node) - 4));
|
|
BUG();
|
|
}
|
|
);
|
|
@@ -172,8 +171,8 @@ struct jffs2_full_dnode *jffs2_write_dno
|
|
beginning of a page and runs to the end of the file, or if
|
|
it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
|
|
*/
|
|
- if ((je32_to_cpu(ri->dsize) >= PAGE_SIZE) ||
|
|
- ( ((je32_to_cpu(ri->offset)&(PAGE_SIZE-1))==0) &&
|
|
+ if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) ||
|
|
+ ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) &&
|
|
(je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) {
|
|
flash_ofs |= REF_PRISTINE;
|
|
} else {
|
|
@@ -219,11 +218,13 @@ struct jffs2_full_dirent *jffs2_write_di
|
|
je32_to_cpu(rd->name_crc));
|
|
|
|
D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) {
|
|
- pr_crit("Eep. CRC not correct in jffs2_write_dirent()\n");
|
|
+ printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent(), je32_to_cpu(rd->hdr_crc):%d, "
|
|
+ "crc32(0, rd, sizeof(struct jffs2_unknown_node) - 4):%d\n", je32_to_cpu(rd->hdr_crc),
|
|
+ crc32(0, rd, sizeof(struct jffs2_unknown_node) - 4));
|
|
BUG();
|
|
});
|
|
|
|
- if (strnlen(name, namelen) != namelen) {
|
|
+ if (strnlen((const char *)name, namelen) != namelen) {
|
|
/* This should never happen, but seems to have done on at least one
|
|
occasion: https://dev.laptop.org/ticket/4184 */
|
|
pr_crit("Error in jffs2_write_dirent() -- name contains zero bytes!\n");
|
|
@@ -245,7 +246,7 @@ struct jffs2_full_dirent *jffs2_write_di
|
|
|
|
fd->version = je32_to_cpu(rd->version);
|
|
fd->ino = je32_to_cpu(rd->ino);
|
|
- fd->nhash = full_name_hash(NULL, name, namelen);
|
|
+ fd->nhash = full_name_hash(name, namelen);
|
|
fd->type = rd->type;
|
|
memcpy(fd->name, name, namelen);
|
|
fd->name[namelen]=0;
|
|
@@ -343,10 +344,24 @@ int jffs2_write_inode_range(struct jffs2
|
|
{
|
|
int ret = 0;
|
|
uint32_t writtenlen = 0;
|
|
+ unsigned char *bufRet = NULL;
|
|
+ unsigned char *bufRetBak = NULL;
|
|
|
|
jffs2_dbg(1, "%s(): Ino #%u, ofs 0x%x, len 0x%x\n",
|
|
__func__, f->inocache->ino, offset, writelen);
|
|
|
|
+ if (writelen > 0) {
|
|
+ bufRet = kmalloc(writelen, GFP_KERNEL);
|
|
+ if (bufRet == NULL) {
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ bufRetBak = bufRet;
|
|
+ if (LOS_CopyToKernel(bufRet, writelen, buf, writelen) != 0) {
|
|
+ kfree(bufRet);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ }
|
|
+
|
|
while(writelen) {
|
|
struct jffs2_full_dnode *fn;
|
|
unsigned char *comprbuf = NULL;
|
|
@@ -366,11 +381,10 @@ int jffs2_write_inode_range(struct jffs2
|
|
break;
|
|
}
|
|
mutex_lock(&f->sem);
|
|
- datalen = min_t(uint32_t, writelen,
|
|
- PAGE_SIZE - (offset & (PAGE_SIZE-1)));
|
|
+ datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1)));
|
|
cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
|
|
|
|
- comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen);
|
|
+ comprtype = jffs2_compress(c, f, bufRet, &comprbuf, &datalen, &cdatalen);
|
|
|
|
ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
|
|
ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
|
|
@@ -390,7 +404,7 @@ int jffs2_write_inode_range(struct jffs2
|
|
|
|
fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, ALLOC_NORETRY);
|
|
|
|
- jffs2_free_comprbuf(comprbuf, buf);
|
|
+ jffs2_free_comprbuf(comprbuf, bufRet);
|
|
|
|
if (IS_ERR(fn)) {
|
|
ret = PTR_ERR(fn);
|
|
@@ -432,15 +446,18 @@ int jffs2_write_inode_range(struct jffs2
|
|
writtenlen += datalen;
|
|
offset += datalen;
|
|
writelen -= datalen;
|
|
- buf += datalen;
|
|
+ bufRet += datalen;
|
|
}
|
|
*retlen = writtenlen;
|
|
+ if (bufRetBak != NULL) {
|
|
+ kfree(bufRetBak);
|
|
+ }
|
|
return ret;
|
|
}
|
|
|
|
int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
|
|
struct jffs2_inode_info *f, struct jffs2_raw_inode *ri,
|
|
- const struct qstr *qstr)
|
|
+ const char *name, int namelen)
|
|
{
|
|
struct jffs2_raw_dirent *rd;
|
|
struct jffs2_full_dnode *fn;
|
|
@@ -468,7 +485,8 @@ int jffs2_do_create(struct jffs2_sb_info
|
|
jemode_to_cpu(ri->mode));
|
|
|
|
if (IS_ERR(fn)) {
|
|
- jffs2_dbg(1, "jffs2_write_dnode() failed\n");
|
|
+ jffs2_dbg(1, "jffs2_write_dnode() failed,error:%ld\n",
|
|
+ PTR_ERR(fn));
|
|
/* Eeek. Wave bye bye */
|
|
mutex_unlock(&f->sem);
|
|
jffs2_complete_reservation(c);
|
|
@@ -482,19 +500,12 @@ int jffs2_do_create(struct jffs2_sb_info
|
|
mutex_unlock(&f->sem);
|
|
jffs2_complete_reservation(c);
|
|
|
|
- ret = jffs2_init_security(&f->vfs_inode, &dir_f->vfs_inode, qstr);
|
|
- if (ret)
|
|
- return ret;
|
|
- ret = jffs2_init_acl_post(&f->vfs_inode);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- ret = jffs2_reserve_space(c, sizeof(*rd)+qstr->len, &alloclen,
|
|
- ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(qstr->len));
|
|
+ ret = jffs2_reserve_space(c, sizeof(*rd)+ namelen, &alloclen,
|
|
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
|
|
|
|
if (ret) {
|
|
/* Eep. */
|
|
- jffs2_dbg(1, "jffs2_reserve_space() for dirent failed\n");
|
|
+ jffs2_dbg(1, "jffs2_reserve_space() for dirent failed,ret:%d\n",ret);
|
|
return ret;
|
|
}
|
|
|
|
@@ -509,19 +520,19 @@ int jffs2_do_create(struct jffs2_sb_info
|
|
|
|
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
|
|
rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
|
|
- rd->totlen = cpu_to_je32(sizeof(*rd) + qstr->len);
|
|
+ rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
|
|
rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
|
|
|
|
rd->pino = cpu_to_je32(dir_f->inocache->ino);
|
|
rd->version = cpu_to_je32(++dir_f->highest_version);
|
|
rd->ino = ri->ino;
|
|
rd->mctime = ri->ctime;
|
|
- rd->nsize = qstr->len;
|
|
+ rd->nsize = namelen;
|
|
rd->type = DT_REG;
|
|
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
|
|
- rd->name_crc = cpu_to_je32(crc32(0, qstr->name, qstr->len));
|
|
+ rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
|
|
|
|
- fd = jffs2_write_dirent(c, dir_f, rd, qstr->name, qstr->len, ALLOC_NORMAL);
|
|
+ fd = jffs2_write_dirent(c, dir_f, rd, (const unsigned char *)name, namelen, ALLOC_NORMAL);
|
|
|
|
jffs2_free_raw_dirent(rd);
|
|
|
|
@@ -553,7 +564,7 @@ int jffs2_do_unlink(struct jffs2_sb_info
|
|
uint32_t alloclen;
|
|
int ret;
|
|
|
|
- if (!jffs2_can_mark_obsolete(c)) {
|
|
+ if (jffs2_can_mark_obsolete(c)) {
|
|
/* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */
|
|
|
|
rd = jffs2_alloc_raw_dirent();
|
|
@@ -584,7 +595,7 @@ int jffs2_do_unlink(struct jffs2_sb_info
|
|
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
|
|
rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
|
|
|
|
- fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_DELETION);
|
|
+ fd = jffs2_write_dirent(c, dir_f, rd, (const unsigned char *)name, namelen, ALLOC_DELETION);
|
|
|
|
jffs2_free_raw_dirent(rd);
|
|
|
|
@@ -598,7 +609,7 @@ int jffs2_do_unlink(struct jffs2_sb_info
|
|
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
|
|
mutex_unlock(&dir_f->sem);
|
|
} else {
|
|
- uint32_t nhash = full_name_hash(NULL, name, namelen);
|
|
+ uint32_t nhash = full_name_hash((const unsigned char *)name, namelen);
|
|
|
|
fd = dir_f->dents;
|
|
/* We don't actually want to reserve any space, but we do
|
|
@@ -703,7 +714,7 @@ int jffs2_do_link (struct jffs2_sb_info
|
|
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
|
|
rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
|
|
|
|
- fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_NORMAL);
|
|
+ fd = jffs2_write_dirent(c, dir_f, rd, (const unsigned char *)name, namelen, ALLOC_NORMAL);
|
|
|
|
jffs2_free_raw_dirent(rd);
|
|
|
|
diff -Nupr old/fs/jffs2/writev.c new/fs/jffs2/writev.c
|
|
--- old/fs/jffs2/writev.c 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/writev.c 2022-05-09 20:05:36.440000000 +0800
|
|
@@ -10,42 +10,97 @@
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
+#include "mtd_dev.h"
|
|
#include "nodelist.h"
|
|
|
|
int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
|
|
unsigned long count, loff_t to, size_t *retlen)
|
|
{
|
|
- if (!jffs2_is_writebuffered(c)) {
|
|
- if (jffs2_sum_active()) {
|
|
- int res;
|
|
- res = jffs2_sum_add_kvec(c, vecs, count, (uint32_t) to);
|
|
- if (res) {
|
|
- return res;
|
|
+ unsigned long i;
|
|
+ size_t totlen = 0, thislen;
|
|
+ int ret = 0;
|
|
+
|
|
+ for (i = 0; i < count; i++) {
|
|
+ // writes need to be aligned but the data we're passed may not be
|
|
+ // Observation suggests most unaligned writes are small, so we
|
|
+ // optimize for that case.
|
|
+
|
|
+ if (((vecs[i].iov_len & (sizeof(int) - 1))) ||
|
|
+ (((unsigned long) vecs[i].iov_base & (sizeof(unsigned long) - 1)))) {
|
|
+ // are there iov's after this one? Or is it so much we'd need
|
|
+ // to do multiple writes anyway?
|
|
+ if ((i + 1) < count || vecs[i].iov_len > 256) {
|
|
+ // cop out and malloc
|
|
+ unsigned long j;
|
|
+ size_t sizetomalloc = 0, totvecsize = 0;
|
|
+ char *cbuf, *cbufptr;
|
|
+
|
|
+ for (j = i; j < count; j++)
|
|
+ totvecsize += vecs[j].iov_len;
|
|
+
|
|
+ // pad up in case unaligned
|
|
+ sizetomalloc = totvecsize + sizeof(int) - 1;
|
|
+ sizetomalloc &= ~(sizeof(int) - 1);
|
|
+ cbuf = (char *) malloc(sizetomalloc);
|
|
+ // malloc returns aligned memory
|
|
+ if (!cbuf) {
|
|
+ ret = -ENOMEM;
|
|
+ goto writev_out;
|
|
+ }
|
|
+ cbufptr = cbuf;
|
|
+ for (j = i; j < count; j++) {
|
|
+ (void)memcpy_s(cbufptr, vecs[j].iov_len, vecs[j].iov_base, vecs[j].iov_len);
|
|
+ cbufptr += vecs[j].iov_len;
|
|
+ }
|
|
+ ret = jffs2_flash_write(c, to, sizetomalloc, &thislen,
|
|
+ (unsigned char *) cbuf);
|
|
+ if (thislen > totvecsize) // in case it was aligned up
|
|
+ thislen = totvecsize;
|
|
+ totlen += thislen;
|
|
+ free(cbuf);
|
|
+ goto writev_out;
|
|
+ } else {
|
|
+ // otherwise optimize for the common case
|
|
+ int buf[256/sizeof(int)]; // int, so int aligned
|
|
+ size_t lentowrite;
|
|
+
|
|
+ lentowrite = vecs[i].iov_len;
|
|
+ // pad up in case its unaligned
|
|
+ lentowrite += sizeof(int) - 1;
|
|
+ lentowrite &= ~(sizeof(int) - 1);
|
|
+ ret = memcpy_s(buf, sizeof(buf), vecs[i].iov_base, vecs[i].iov_len);
|
|
+ if (ret != EOK)
|
|
+ goto writev_out;
|
|
+
|
|
+ ret = jffs2_flash_write(c, to, lentowrite, &thislen,
|
|
+ (unsigned char *) &buf[0]);
|
|
+ if (thislen > vecs[i].iov_len)
|
|
+ thislen = vecs[i].iov_len;
|
|
}
|
|
+ } else {
|
|
+ ret = jffs2_flash_write(c, to, vecs[i].iov_len, &thislen,
|
|
+ vecs[i].iov_base);
|
|
}
|
|
+ totlen += thislen;
|
|
+ if (ret || thislen != vecs[i].iov_len) break;
|
|
+ to += vecs[i].iov_len;
|
|
}
|
|
|
|
- return mtd_writev(c->mtd, vecs, count, to, retlen);
|
|
+writev_out:
|
|
+ if (retlen) *retlen = totlen;
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
|
|
size_t *retlen, const u_char *buf)
|
|
{
|
|
int ret;
|
|
- ret = mtd_write(c->mtd, ofs, len, retlen, buf);
|
|
-
|
|
- if (jffs2_sum_active()) {
|
|
- struct kvec vecs[1];
|
|
- int res;
|
|
-
|
|
- vecs[0].iov_base = (unsigned char *) buf;
|
|
- vecs[0].iov_len = len;
|
|
-
|
|
- res = jffs2_sum_add_kvec(c, vecs, 1, (uint32_t) ofs);
|
|
- if (res) {
|
|
- return res;
|
|
- }
|
|
+ ret = c->mtd->write(c->mtd, ofs, len, (char *)buf);
|
|
+ if (ret >= 0) {
|
|
+ *retlen = ret;
|
|
+ return 0;
|
|
}
|
|
+ *retlen = 0;
|
|
return ret;
|
|
}
|
|
diff -Nupr old/fs/jffs2/xattr.c new/fs/jffs2/xattr.c
|
|
--- old/fs/jffs2/xattr.c 2022-05-09 17:15:24.360000000 +0800
|
|
+++ new/fs/jffs2/xattr.c 1970-01-01 08:00:00.000000000 +0800
|
|
@@ -1,1347 +0,0 @@
|
|
-/*
|
|
- * JFFS2 -- Journalling Flash File System, Version 2.
|
|
- *
|
|
- * Copyright © 2006 NEC Corporation
|
|
- *
|
|
- * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
|
|
- *
|
|
- * For licensing information, see the file 'LICENCE' in this directory.
|
|
- *
|
|
- */
|
|
-
|
|
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
-
|
|
-#define JFFS2_XATTR_IS_CORRUPTED 1
|
|
-
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/slab.h>
|
|
-#include <linux/fs.h>
|
|
-#include <linux/time.h>
|
|
-#include <linux/pagemap.h>
|
|
-#include <linux/highmem.h>
|
|
-#include <linux/crc32.h>
|
|
-#include <linux/jffs2.h>
|
|
-#include <linux/xattr.h>
|
|
-#include <linux/posix_acl_xattr.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
-#include "nodelist.h"
|
|
-/* -------- xdatum related functions ----------------
|
|
- * xattr_datum_hashkey(xprefix, xname, xvalue, xsize)
|
|
- * is used to calcurate xdatum hashkey. The reminder of hashkey into XATTRINDEX_HASHSIZE is
|
|
- * the index of the xattr name/value pair cache (c->xattrindex).
|
|
- * is_xattr_datum_unchecked(c, xd)
|
|
- * returns 1, if xdatum contains any unchecked raw nodes. if all raw nodes are not
|
|
- * unchecked, it returns 0.
|
|
- * unload_xattr_datum(c, xd)
|
|
- * is used to release xattr name/value pair and detach from c->xattrindex.
|
|
- * reclaim_xattr_datum(c)
|
|
- * is used to reclaim xattr name/value pairs on the xattr name/value pair cache when
|
|
- * memory usage by cache is over c->xdatum_mem_threshold. Currently, this threshold
|
|
- * is hard coded as 32KiB.
|
|
- * do_verify_xattr_datum(c, xd)
|
|
- * is used to load the xdatum informations without name/value pair from the medium.
|
|
- * It's necessary once, because those informations are not collected during mounting
|
|
- * process when EBS is enabled.
|
|
- * 0 will be returned, if success. An negative return value means recoverable error, and
|
|
- * positive return value means unrecoverable error. Thus, caller must remove this xdatum
|
|
- * and xref when it returned positive value.
|
|
- * do_load_xattr_datum(c, xd)
|
|
- * is used to load name/value pair from the medium.
|
|
- * The meanings of return value is same as do_verify_xattr_datum().
|
|
- * load_xattr_datum(c, xd)
|
|
- * is used to be as a wrapper of do_verify_xattr_datum() and do_load_xattr_datum().
|
|
- * If xd need to call do_verify_xattr_datum() at first, it's called before calling
|
|
- * do_load_xattr_datum(). The meanings of return value is same as do_verify_xattr_datum().
|
|
- * save_xattr_datum(c, xd)
|
|
- * is used to write xdatum to medium. xd->version will be incremented.
|
|
- * create_xattr_datum(c, xprefix, xname, xvalue, xsize)
|
|
- * is used to create new xdatum and write to medium.
|
|
- * unrefer_xattr_datum(c, xd)
|
|
- * is used to delete a xdatum. When nobody refers this xdatum, JFFS2_XFLAGS_DEAD
|
|
- * is set on xd->flags and chained xattr_dead_list or release it immediately.
|
|
- * In the first case, the garbage collector release it later.
|
|
- * -------------------------------------------------- */
|
|
-static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize)
|
|
-{
|
|
- int name_len = strlen(xname);
|
|
-
|
|
- return crc32(xprefix, xname, name_len) ^ crc32(xprefix, xvalue, xsize);
|
|
-}
|
|
-
|
|
-static int is_xattr_datum_unchecked(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
|
|
-{
|
|
- struct jffs2_raw_node_ref *raw;
|
|
- int rc = 0;
|
|
-
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
|
|
- if (ref_flags(raw) == REF_UNCHECKED) {
|
|
- rc = 1;
|
|
- break;
|
|
- }
|
|
- }
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
- return rc;
|
|
-}
|
|
-
|
|
-static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
|
|
-{
|
|
- /* must be called under down_write(xattr_sem) */
|
|
- D1(dbg_xattr("%s: xid=%u, version=%u\n", __func__, xd->xid, xd->version));
|
|
- if (xd->xname) {
|
|
- c->xdatum_mem_usage -= (xd->name_len + 1 + xd->value_len);
|
|
- kfree(xd->xname);
|
|
- }
|
|
-
|
|
- list_del_init(&xd->xindex);
|
|
- xd->hashkey = 0;
|
|
- xd->xname = NULL;
|
|
- xd->xvalue = NULL;
|
|
-}
|
|
-
|
|
-static void reclaim_xattr_datum(struct jffs2_sb_info *c)
|
|
-{
|
|
- /* must be called under down_write(xattr_sem) */
|
|
- struct jffs2_xattr_datum *xd, *_xd;
|
|
- uint32_t target, before;
|
|
- static int index = 0;
|
|
- int count;
|
|
-
|
|
- if (c->xdatum_mem_threshold > c->xdatum_mem_usage)
|
|
- return;
|
|
-
|
|
- before = c->xdatum_mem_usage;
|
|
- target = c->xdatum_mem_usage * 4 / 5; /* 20% reduction */
|
|
- for (count = 0; count < XATTRINDEX_HASHSIZE; count++) {
|
|
- list_for_each_entry_safe(xd, _xd, &c->xattrindex[index], xindex) {
|
|
- if (xd->flags & JFFS2_XFLAGS_HOT) {
|
|
- xd->flags &= ~JFFS2_XFLAGS_HOT;
|
|
- } else if (!(xd->flags & JFFS2_XFLAGS_BIND)) {
|
|
- unload_xattr_datum(c, xd);
|
|
- }
|
|
- if (c->xdatum_mem_usage <= target)
|
|
- goto out;
|
|
- }
|
|
- index = (index+1) % XATTRINDEX_HASHSIZE;
|
|
- }
|
|
- out:
|
|
- JFFS2_NOTICE("xdatum_mem_usage from %u byte to %u byte (%u byte reclaimed)\n",
|
|
- before, c->xdatum_mem_usage, before - c->xdatum_mem_usage);
|
|
-}
|
|
-
|
|
-static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
|
|
-{
|
|
- /* must be called under down_write(xattr_sem) */
|
|
- struct jffs2_eraseblock *jeb;
|
|
- struct jffs2_raw_node_ref *raw;
|
|
- struct jffs2_raw_xattr rx;
|
|
- size_t readlen;
|
|
- uint32_t crc, offset, totlen;
|
|
- int rc;
|
|
-
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- offset = ref_offset(xd->node);
|
|
- if (ref_flags(xd->node) == REF_PRISTINE)
|
|
- goto complete;
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
-
|
|
- rc = jffs2_flash_read(c, offset, sizeof(rx), &readlen, (char *)&rx);
|
|
- if (rc || readlen != sizeof(rx)) {
|
|
- JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
|
|
- rc, sizeof(rx), readlen, offset);
|
|
- return rc ? rc : -EIO;
|
|
- }
|
|
- crc = crc32(0, &rx, sizeof(rx) - 4);
|
|
- if (crc != je32_to_cpu(rx.node_crc)) {
|
|
- JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
|
|
- offset, je32_to_cpu(rx.hdr_crc), crc);
|
|
- xd->flags |= JFFS2_XFLAGS_INVALID;
|
|
- return JFFS2_XATTR_IS_CORRUPTED;
|
|
- }
|
|
- totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len));
|
|
- if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK
|
|
- || je16_to_cpu(rx.nodetype) != JFFS2_NODETYPE_XATTR
|
|
- || je32_to_cpu(rx.totlen) != totlen
|
|
- || je32_to_cpu(rx.xid) != xd->xid
|
|
- || je32_to_cpu(rx.version) != xd->version) {
|
|
- JFFS2_ERROR("inconsistent xdatum at %#08x, magic=%#04x/%#04x, "
|
|
- "nodetype=%#04x/%#04x, totlen=%u/%u, xid=%u/%u, version=%u/%u\n",
|
|
- offset, je16_to_cpu(rx.magic), JFFS2_MAGIC_BITMASK,
|
|
- je16_to_cpu(rx.nodetype), JFFS2_NODETYPE_XATTR,
|
|
- je32_to_cpu(rx.totlen), totlen,
|
|
- je32_to_cpu(rx.xid), xd->xid,
|
|
- je32_to_cpu(rx.version), xd->version);
|
|
- xd->flags |= JFFS2_XFLAGS_INVALID;
|
|
- return JFFS2_XATTR_IS_CORRUPTED;
|
|
- }
|
|
- xd->xprefix = rx.xprefix;
|
|
- xd->name_len = rx.name_len;
|
|
- xd->value_len = je16_to_cpu(rx.value_len);
|
|
- xd->data_crc = je32_to_cpu(rx.data_crc);
|
|
-
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- complete:
|
|
- for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
|
|
- jeb = &c->blocks[ref_offset(raw) / c->sector_size];
|
|
- totlen = PAD(ref_totlen(c, jeb, raw));
|
|
- if (ref_flags(raw) == REF_UNCHECKED) {
|
|
- c->unchecked_size -= totlen; c->used_size += totlen;
|
|
- jeb->unchecked_size -= totlen; jeb->used_size += totlen;
|
|
- }
|
|
- raw->flash_offset = ref_offset(raw) | ((xd->node==raw) ? REF_PRISTINE : REF_NORMAL);
|
|
- }
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
-
|
|
- /* unchecked xdatum is chained with c->xattr_unchecked */
|
|
- list_del_init(&xd->xindex);
|
|
-
|
|
- dbg_xattr("success on verifying xdatum (xid=%u, version=%u)\n",
|
|
- xd->xid, xd->version);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
|
|
-{
|
|
- /* must be called under down_write(xattr_sem) */
|
|
- char *data;
|
|
- size_t readlen;
|
|
- uint32_t crc, length;
|
|
- int i, ret, retry = 0;
|
|
-
|
|
- BUG_ON(ref_flags(xd->node) != REF_PRISTINE);
|
|
- BUG_ON(!list_empty(&xd->xindex));
|
|
- retry:
|
|
- length = xd->name_len + 1 + xd->value_len;
|
|
- data = kmalloc(length, GFP_KERNEL);
|
|
- if (!data)
|
|
- return -ENOMEM;
|
|
-
|
|
- ret = jffs2_flash_read(c, ref_offset(xd->node)+sizeof(struct jffs2_raw_xattr),
|
|
- length, &readlen, data);
|
|
-
|
|
- if (ret || length!=readlen) {
|
|
- JFFS2_WARNING("jffs2_flash_read() returned %d, request=%d, readlen=%zu, at %#08x\n",
|
|
- ret, length, readlen, ref_offset(xd->node));
|
|
- kfree(data);
|
|
- return ret ? ret : -EIO;
|
|
- }
|
|
-
|
|
- data[xd->name_len] = '\0';
|
|
- crc = crc32(0, data, length);
|
|
- if (crc != xd->data_crc) {
|
|
- JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XATTR)"
|
|
- " at %#08x, read: 0x%08x calculated: 0x%08x\n",
|
|
- ref_offset(xd->node), xd->data_crc, crc);
|
|
- kfree(data);
|
|
- xd->flags |= JFFS2_XFLAGS_INVALID;
|
|
- return JFFS2_XATTR_IS_CORRUPTED;
|
|
- }
|
|
-
|
|
- xd->flags |= JFFS2_XFLAGS_HOT;
|
|
- xd->xname = data;
|
|
- xd->xvalue = data + xd->name_len+1;
|
|
-
|
|
- c->xdatum_mem_usage += length;
|
|
-
|
|
- xd->hashkey = xattr_datum_hashkey(xd->xprefix, xd->xname, xd->xvalue, xd->value_len);
|
|
- i = xd->hashkey % XATTRINDEX_HASHSIZE;
|
|
- list_add(&xd->xindex, &c->xattrindex[i]);
|
|
- if (!retry) {
|
|
- retry = 1;
|
|
- reclaim_xattr_datum(c);
|
|
- if (!xd->xname)
|
|
- goto retry;
|
|
- }
|
|
-
|
|
- dbg_xattr("success on loading xdatum (xid=%u, xprefix=%u, xname='%s')\n",
|
|
- xd->xid, xd->xprefix, xd->xname);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
|
|
-{
|
|
- /* must be called under down_write(xattr_sem);
|
|
- * rc < 0 : recoverable error, try again
|
|
- * rc = 0 : success
|
|
- * rc > 0 : Unrecoverable error, this node should be deleted.
|
|
- */
|
|
- int rc = 0;
|
|
-
|
|
- BUG_ON(xd->flags & JFFS2_XFLAGS_DEAD);
|
|
- if (xd->xname)
|
|
- return 0;
|
|
- if (xd->flags & JFFS2_XFLAGS_INVALID)
|
|
- return JFFS2_XATTR_IS_CORRUPTED;
|
|
- if (unlikely(is_xattr_datum_unchecked(c, xd)))
|
|
- rc = do_verify_xattr_datum(c, xd);
|
|
- if (!rc)
|
|
- rc = do_load_xattr_datum(c, xd);
|
|
- return rc;
|
|
-}
|
|
-
|
|
-static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
|
|
-{
|
|
- /* must be called under down_write(xattr_sem) */
|
|
- struct jffs2_raw_xattr rx;
|
|
- struct kvec vecs[2];
|
|
- size_t length;
|
|
- int rc, totlen;
|
|
- uint32_t phys_ofs = write_ofs(c);
|
|
-
|
|
- BUG_ON(!xd->xname);
|
|
- BUG_ON(xd->flags & (JFFS2_XFLAGS_DEAD|JFFS2_XFLAGS_INVALID));
|
|
-
|
|
- vecs[0].iov_base = ℞
|
|
- vecs[0].iov_len = sizeof(rx);
|
|
- vecs[1].iov_base = xd->xname;
|
|
- vecs[1].iov_len = xd->name_len + 1 + xd->value_len;
|
|
- totlen = vecs[0].iov_len + vecs[1].iov_len;
|
|
-
|
|
- /* Setup raw-xattr */
|
|
- memset(&rx, 0, sizeof(rx));
|
|
- rx.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
|
|
- rx.nodetype = cpu_to_je16(JFFS2_NODETYPE_XATTR);
|
|
- rx.totlen = cpu_to_je32(PAD(totlen));
|
|
- rx.hdr_crc = cpu_to_je32(crc32(0, &rx, sizeof(struct jffs2_unknown_node) - 4));
|
|
-
|
|
- rx.xid = cpu_to_je32(xd->xid);
|
|
- rx.version = cpu_to_je32(++xd->version);
|
|
- rx.xprefix = xd->xprefix;
|
|
- rx.name_len = xd->name_len;
|
|
- rx.value_len = cpu_to_je16(xd->value_len);
|
|
- rx.data_crc = cpu_to_je32(crc32(0, vecs[1].iov_base, vecs[1].iov_len));
|
|
- rx.node_crc = cpu_to_je32(crc32(0, &rx, sizeof(struct jffs2_raw_xattr) - 4));
|
|
-
|
|
- rc = jffs2_flash_writev(c, vecs, 2, phys_ofs, &length, 0);
|
|
- if (rc || totlen != length) {
|
|
- JFFS2_WARNING("jffs2_flash_writev()=%d, req=%u, wrote=%zu, at %#08x\n",
|
|
- rc, totlen, length, phys_ofs);
|
|
- rc = rc ? rc : -EIO;
|
|
- if (length)
|
|
- jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(totlen), NULL);
|
|
-
|
|
- return rc;
|
|
- }
|
|
- /* success */
|
|
- jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), (void *)xd);
|
|
-
|
|
- dbg_xattr("success on saving xdatum (xid=%u, version=%u, xprefix=%u, xname='%s')\n",
|
|
- xd->xid, xd->version, xd->xprefix, xd->xname);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
|
|
- int xprefix, const char *xname,
|
|
- const char *xvalue, int xsize)
|
|
-{
|
|
- /* must be called under down_write(xattr_sem) */
|
|
- struct jffs2_xattr_datum *xd;
|
|
- uint32_t hashkey, name_len;
|
|
- char *data;
|
|
- int i, rc;
|
|
-
|
|
- /* Search xattr_datum has same xname/xvalue by index */
|
|
- hashkey = xattr_datum_hashkey(xprefix, xname, xvalue, xsize);
|
|
- i = hashkey % XATTRINDEX_HASHSIZE;
|
|
- list_for_each_entry(xd, &c->xattrindex[i], xindex) {
|
|
- if (xd->hashkey==hashkey
|
|
- && xd->xprefix==xprefix
|
|
- && xd->value_len==xsize
|
|
- && !strcmp(xd->xname, xname)
|
|
- && !memcmp(xd->xvalue, xvalue, xsize)) {
|
|
- atomic_inc(&xd->refcnt);
|
|
- return xd;
|
|
- }
|
|
- }
|
|
-
|
|
- /* Not found, Create NEW XATTR-Cache */
|
|
- name_len = strlen(xname);
|
|
-
|
|
- xd = jffs2_alloc_xattr_datum();
|
|
- if (!xd)
|
|
- return ERR_PTR(-ENOMEM);
|
|
-
|
|
- data = kmalloc(name_len + 1 + xsize, GFP_KERNEL);
|
|
- if (!data) {
|
|
- jffs2_free_xattr_datum(xd);
|
|
- return ERR_PTR(-ENOMEM);
|
|
- }
|
|
- strcpy(data, xname);
|
|
- memcpy(data + name_len + 1, xvalue, xsize);
|
|
-
|
|
- atomic_set(&xd->refcnt, 1);
|
|
- xd->xid = ++c->highest_xid;
|
|
- xd->flags |= JFFS2_XFLAGS_HOT;
|
|
- xd->xprefix = xprefix;
|
|
-
|
|
- xd->hashkey = hashkey;
|
|
- xd->xname = data;
|
|
- xd->xvalue = data + name_len + 1;
|
|
- xd->name_len = name_len;
|
|
- xd->value_len = xsize;
|
|
- xd->data_crc = crc32(0, data, xd->name_len + 1 + xd->value_len);
|
|
-
|
|
- rc = save_xattr_datum(c, xd);
|
|
- if (rc) {
|
|
- kfree(xd->xname);
|
|
- jffs2_free_xattr_datum(xd);
|
|
- return ERR_PTR(rc);
|
|
- }
|
|
-
|
|
- /* Insert Hash Index */
|
|
- i = hashkey % XATTRINDEX_HASHSIZE;
|
|
- list_add(&xd->xindex, &c->xattrindex[i]);
|
|
-
|
|
- c->xdatum_mem_usage += (xd->name_len + 1 + xd->value_len);
|
|
- reclaim_xattr_datum(c);
|
|
-
|
|
- return xd;
|
|
-}
|
|
-
|
|
-static void unrefer_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
|
|
-{
|
|
- /* must be called under down_write(xattr_sem) */
|
|
- if (atomic_dec_and_lock(&xd->refcnt, &c->erase_completion_lock)) {
|
|
- unload_xattr_datum(c, xd);
|
|
- xd->flags |= JFFS2_XFLAGS_DEAD;
|
|
- if (xd->node == (void *)xd) {
|
|
- BUG_ON(!(xd->flags & JFFS2_XFLAGS_INVALID));
|
|
- jffs2_free_xattr_datum(xd);
|
|
- } else {
|
|
- list_add(&xd->xindex, &c->xattr_dead_list);
|
|
- }
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
-
|
|
- dbg_xattr("xdatum(xid=%u, version=%u) was removed.\n",
|
|
- xd->xid, xd->version);
|
|
- }
|
|
-}
|
|
-
|
|
-/* -------- xref related functions ------------------
|
|
- * verify_xattr_ref(c, ref)
|
|
- * is used to load xref information from medium. Because summary data does not
|
|
- * contain xid/ino, it's necessary to verify once while mounting process.
|
|
- * save_xattr_ref(c, ref)
|
|
- * is used to write xref to medium. If delete marker is marked, it write
|
|
- * a delete marker of xref into medium.
|
|
- * create_xattr_ref(c, ic, xd)
|
|
- * is used to create a new xref and write to medium.
|
|
- * delete_xattr_ref(c, ref)
|
|
- * is used to delete jffs2_xattr_ref. It marks xref XREF_DELETE_MARKER,
|
|
- * and allows GC to reclaim those physical nodes.
|
|
- * jffs2_xattr_delete_inode(c, ic)
|
|
- * is called to remove xrefs related to obsolete inode when inode is unlinked.
|
|
- * jffs2_xattr_free_inode(c, ic)
|
|
- * is called to release xattr related objects when unmounting.
|
|
- * check_xattr_ref_inode(c, ic)
|
|
- * is used to confirm inode does not have duplicate xattr name/value pair.
|
|
- * jffs2_xattr_do_crccheck_inode(c, ic)
|
|
- * is used to force xattr data integrity check during the initial gc scan.
|
|
- * -------------------------------------------------- */
|
|
-static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
|
|
-{
|
|
- struct jffs2_eraseblock *jeb;
|
|
- struct jffs2_raw_node_ref *raw;
|
|
- struct jffs2_raw_xref rr;
|
|
- size_t readlen;
|
|
- uint32_t crc, offset, totlen;
|
|
- int rc;
|
|
-
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- if (ref_flags(ref->node) != REF_UNCHECKED)
|
|
- goto complete;
|
|
- offset = ref_offset(ref->node);
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
-
|
|
- rc = jffs2_flash_read(c, offset, sizeof(rr), &readlen, (char *)&rr);
|
|
- if (rc || sizeof(rr) != readlen) {
|
|
- JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu, at %#08x\n",
|
|
- rc, sizeof(rr), readlen, offset);
|
|
- return rc ? rc : -EIO;
|
|
- }
|
|
- /* obsolete node */
|
|
- crc = crc32(0, &rr, sizeof(rr) - 4);
|
|
- if (crc != je32_to_cpu(rr.node_crc)) {
|
|
- JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
|
|
- offset, je32_to_cpu(rr.node_crc), crc);
|
|
- return JFFS2_XATTR_IS_CORRUPTED;
|
|
- }
|
|
- if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK
|
|
- || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF
|
|
- || je32_to_cpu(rr.totlen) != PAD(sizeof(rr))) {
|
|
- JFFS2_ERROR("inconsistent xref at %#08x, magic=%#04x/%#04x, "
|
|
- "nodetype=%#04x/%#04x, totlen=%u/%zu\n",
|
|
- offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
|
|
- je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF,
|
|
- je32_to_cpu(rr.totlen), PAD(sizeof(rr)));
|
|
- return JFFS2_XATTR_IS_CORRUPTED;
|
|
- }
|
|
- ref->ino = je32_to_cpu(rr.ino);
|
|
- ref->xid = je32_to_cpu(rr.xid);
|
|
- ref->xseqno = je32_to_cpu(rr.xseqno);
|
|
- if (ref->xseqno > c->highest_xseqno)
|
|
- c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER);
|
|
-
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- complete:
|
|
- for (raw=ref->node; raw != (void *)ref; raw=raw->next_in_ino) {
|
|
- jeb = &c->blocks[ref_offset(raw) / c->sector_size];
|
|
- totlen = PAD(ref_totlen(c, jeb, raw));
|
|
- if (ref_flags(raw) == REF_UNCHECKED) {
|
|
- c->unchecked_size -= totlen; c->used_size += totlen;
|
|
- jeb->unchecked_size -= totlen; jeb->used_size += totlen;
|
|
- }
|
|
- raw->flash_offset = ref_offset(raw) | ((ref->node==raw) ? REF_PRISTINE : REF_NORMAL);
|
|
- }
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
-
|
|
- dbg_xattr("success on verifying xref (ino=%u, xid=%u) at %#08x\n",
|
|
- ref->ino, ref->xid, ref_offset(ref->node));
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
|
|
-{
|
|
- /* must be called under down_write(xattr_sem) */
|
|
- struct jffs2_raw_xref rr;
|
|
- size_t length;
|
|
- uint32_t xseqno, phys_ofs = write_ofs(c);
|
|
- int ret;
|
|
-
|
|
- rr.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
|
|
- rr.nodetype = cpu_to_je16(JFFS2_NODETYPE_XREF);
|
|
- rr.totlen = cpu_to_je32(PAD(sizeof(rr)));
|
|
- rr.hdr_crc = cpu_to_je32(crc32(0, &rr, sizeof(struct jffs2_unknown_node) - 4));
|
|
-
|
|
- xseqno = (c->highest_xseqno += 2);
|
|
- if (is_xattr_ref_dead(ref)) {
|
|
- xseqno |= XREF_DELETE_MARKER;
|
|
- rr.ino = cpu_to_je32(ref->ino);
|
|
- rr.xid = cpu_to_je32(ref->xid);
|
|
- } else {
|
|
- rr.ino = cpu_to_je32(ref->ic->ino);
|
|
- rr.xid = cpu_to_je32(ref->xd->xid);
|
|
- }
|
|
- rr.xseqno = cpu_to_je32(xseqno);
|
|
- rr.node_crc = cpu_to_je32(crc32(0, &rr, sizeof(rr) - 4));
|
|
-
|
|
- ret = jffs2_flash_write(c, phys_ofs, sizeof(rr), &length, (char *)&rr);
|
|
- if (ret || sizeof(rr) != length) {
|
|
- JFFS2_WARNING("jffs2_flash_write() returned %d, request=%zu, retlen=%zu, at %#08x\n",
|
|
- ret, sizeof(rr), length, phys_ofs);
|
|
- ret = ret ? ret : -EIO;
|
|
- if (length)
|
|
- jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(sizeof(rr)), NULL);
|
|
-
|
|
- return ret;
|
|
- }
|
|
- /* success */
|
|
- ref->xseqno = xseqno;
|
|
- jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), (void *)ref);
|
|
-
|
|
- dbg_xattr("success on saving xref (ino=%u, xid=%u)\n", ref->ic->ino, ref->xd->xid);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static struct jffs2_xattr_ref *create_xattr_ref(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic,
|
|
- struct jffs2_xattr_datum *xd)
|
|
-{
|
|
- /* must be called under down_write(xattr_sem) */
|
|
- struct jffs2_xattr_ref *ref;
|
|
- int ret;
|
|
-
|
|
- ref = jffs2_alloc_xattr_ref();
|
|
- if (!ref)
|
|
- return ERR_PTR(-ENOMEM);
|
|
- ref->ic = ic;
|
|
- ref->xd = xd;
|
|
-
|
|
- ret = save_xattr_ref(c, ref);
|
|
- if (ret) {
|
|
- jffs2_free_xattr_ref(ref);
|
|
- return ERR_PTR(ret);
|
|
- }
|
|
-
|
|
- /* Chain to inode */
|
|
- ref->next = ic->xref;
|
|
- ic->xref = ref;
|
|
-
|
|
- return ref; /* success */
|
|
-}
|
|
-
|
|
-static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
|
|
-{
|
|
- /* must be called under down_write(xattr_sem) */
|
|
- struct jffs2_xattr_datum *xd;
|
|
-
|
|
- xd = ref->xd;
|
|
- ref->xseqno |= XREF_DELETE_MARKER;
|
|
- ref->ino = ref->ic->ino;
|
|
- ref->xid = ref->xd->xid;
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- ref->next = c->xref_dead_list;
|
|
- c->xref_dead_list = ref;
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
-
|
|
- dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) was removed.\n",
|
|
- ref->ino, ref->xid, ref->xseqno);
|
|
-
|
|
- unrefer_xattr_datum(c, xd);
|
|
-}
|
|
-
|
|
-void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
|
|
-{
|
|
- /* It's called from jffs2_evict_inode() on inode removing.
|
|
- When an inode with XATTR is removed, those XATTRs must be removed. */
|
|
- struct jffs2_xattr_ref *ref, *_ref;
|
|
-
|
|
- if (!ic || ic->pino_nlink > 0)
|
|
- return;
|
|
-
|
|
- down_write(&c->xattr_sem);
|
|
- for (ref = ic->xref; ref; ref = _ref) {
|
|
- _ref = ref->next;
|
|
- delete_xattr_ref(c, ref);
|
|
- }
|
|
- ic->xref = NULL;
|
|
- up_write(&c->xattr_sem);
|
|
-}
|
|
-
|
|
-void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
|
|
-{
|
|
- /* It's called from jffs2_free_ino_caches() until unmounting FS. */
|
|
- struct jffs2_xattr_datum *xd;
|
|
- struct jffs2_xattr_ref *ref, *_ref;
|
|
-
|
|
- down_write(&c->xattr_sem);
|
|
- for (ref = ic->xref; ref; ref = _ref) {
|
|
- _ref = ref->next;
|
|
- xd = ref->xd;
|
|
- if (atomic_dec_and_test(&xd->refcnt)) {
|
|
- unload_xattr_datum(c, xd);
|
|
- jffs2_free_xattr_datum(xd);
|
|
- }
|
|
- jffs2_free_xattr_ref(ref);
|
|
- }
|
|
- ic->xref = NULL;
|
|
- up_write(&c->xattr_sem);
|
|
-}
|
|
-
|
|
-static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
|
|
-{
|
|
- /* success of check_xattr_ref_inode() means that inode (ic) dose not have
|
|
- * duplicate name/value pairs. If duplicate name/value pair would be found,
|
|
- * one will be removed.
|
|
- */
|
|
- struct jffs2_xattr_ref *ref, *cmp, **pref, **pcmp;
|
|
- int rc = 0;
|
|
-
|
|
- if (likely(ic->flags & INO_FLAGS_XATTR_CHECKED))
|
|
- return 0;
|
|
- down_write(&c->xattr_sem);
|
|
- retry:
|
|
- rc = 0;
|
|
- for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) {
|
|
- if (!ref->xd->xname) {
|
|
- rc = load_xattr_datum(c, ref->xd);
|
|
- if (unlikely(rc > 0)) {
|
|
- *pref = ref->next;
|
|
- delete_xattr_ref(c, ref);
|
|
- goto retry;
|
|
- } else if (unlikely(rc < 0))
|
|
- goto out;
|
|
- }
|
|
- for (cmp=ref->next, pcmp=&ref->next; cmp; pcmp=&cmp->next, cmp=cmp->next) {
|
|
- if (!cmp->xd->xname) {
|
|
- ref->xd->flags |= JFFS2_XFLAGS_BIND;
|
|
- rc = load_xattr_datum(c, cmp->xd);
|
|
- ref->xd->flags &= ~JFFS2_XFLAGS_BIND;
|
|
- if (unlikely(rc > 0)) {
|
|
- *pcmp = cmp->next;
|
|
- delete_xattr_ref(c, cmp);
|
|
- goto retry;
|
|
- } else if (unlikely(rc < 0))
|
|
- goto out;
|
|
- }
|
|
- if (ref->xd->xprefix == cmp->xd->xprefix
|
|
- && !strcmp(ref->xd->xname, cmp->xd->xname)) {
|
|
- if (ref->xseqno > cmp->xseqno) {
|
|
- *pcmp = cmp->next;
|
|
- delete_xattr_ref(c, cmp);
|
|
- } else {
|
|
- *pref = ref->next;
|
|
- delete_xattr_ref(c, ref);
|
|
- }
|
|
- goto retry;
|
|
- }
|
|
- }
|
|
- }
|
|
- ic->flags |= INO_FLAGS_XATTR_CHECKED;
|
|
- out:
|
|
- up_write(&c->xattr_sem);
|
|
-
|
|
- return rc;
|
|
-}
|
|
-
|
|
-void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
|
|
-{
|
|
- check_xattr_ref_inode(c, ic);
|
|
-}
|
|
-
|
|
-/* -------- xattr subsystem functions ---------------
|
|
- * jffs2_init_xattr_subsystem(c)
|
|
- * is used to initialize semaphore and list_head, and some variables.
|
|
- * jffs2_find_xattr_datum(c, xid)
|
|
- * is used to lookup xdatum while scanning process.
|
|
- * jffs2_clear_xattr_subsystem(c)
|
|
- * is used to release any xattr related objects.
|
|
- * jffs2_build_xattr_subsystem(c)
|
|
- * is used to associate xdatum and xref while super block building process.
|
|
- * jffs2_setup_xattr_datum(c, xid, version)
|
|
- * is used to insert xdatum while scanning process.
|
|
- * -------------------------------------------------- */
|
|
-void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c)
|
|
-{
|
|
- int i;
|
|
-
|
|
- for (i=0; i < XATTRINDEX_HASHSIZE; i++)
|
|
- INIT_LIST_HEAD(&c->xattrindex[i]);
|
|
- INIT_LIST_HEAD(&c->xattr_unchecked);
|
|
- INIT_LIST_HEAD(&c->xattr_dead_list);
|
|
- c->xref_dead_list = NULL;
|
|
- c->xref_temp = NULL;
|
|
-
|
|
- init_rwsem(&c->xattr_sem);
|
|
- c->highest_xid = 0;
|
|
- c->highest_xseqno = 0;
|
|
- c->xdatum_mem_usage = 0;
|
|
- c->xdatum_mem_threshold = 32 * 1024; /* Default 32KB */
|
|
-}
|
|
-
|
|
-static struct jffs2_xattr_datum *jffs2_find_xattr_datum(struct jffs2_sb_info *c, uint32_t xid)
|
|
-{
|
|
- struct jffs2_xattr_datum *xd;
|
|
- int i = xid % XATTRINDEX_HASHSIZE;
|
|
-
|
|
- /* It's only used in scanning/building process. */
|
|
- BUG_ON(!(c->flags & (JFFS2_SB_FLAG_SCANNING|JFFS2_SB_FLAG_BUILDING)));
|
|
-
|
|
- list_for_each_entry(xd, &c->xattrindex[i], xindex) {
|
|
- if (xd->xid==xid)
|
|
- return xd;
|
|
- }
|
|
- return NULL;
|
|
-}
|
|
-
|
|
-void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c)
|
|
-{
|
|
- struct jffs2_xattr_datum *xd, *_xd;
|
|
- struct jffs2_xattr_ref *ref, *_ref;
|
|
- int i;
|
|
-
|
|
- for (ref=c->xref_temp; ref; ref = _ref) {
|
|
- _ref = ref->next;
|
|
- jffs2_free_xattr_ref(ref);
|
|
- }
|
|
-
|
|
- for (ref=c->xref_dead_list; ref; ref = _ref) {
|
|
- _ref = ref->next;
|
|
- jffs2_free_xattr_ref(ref);
|
|
- }
|
|
-
|
|
- for (i=0; i < XATTRINDEX_HASHSIZE; i++) {
|
|
- list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) {
|
|
- list_del(&xd->xindex);
|
|
- kfree(xd->xname);
|
|
- jffs2_free_xattr_datum(xd);
|
|
- }
|
|
- }
|
|
-
|
|
- list_for_each_entry_safe(xd, _xd, &c->xattr_dead_list, xindex) {
|
|
- list_del(&xd->xindex);
|
|
- jffs2_free_xattr_datum(xd);
|
|
- }
|
|
- list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) {
|
|
- list_del(&xd->xindex);
|
|
- jffs2_free_xattr_datum(xd);
|
|
- }
|
|
-}
|
|
-
|
|
-#define XREF_TMPHASH_SIZE (128)
|
|
-void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
|
|
-{
|
|
- struct jffs2_xattr_ref *ref, *_ref;
|
|
- struct jffs2_xattr_ref *xref_tmphash[XREF_TMPHASH_SIZE];
|
|
- struct jffs2_xattr_datum *xd, *_xd;
|
|
- struct jffs2_inode_cache *ic;
|
|
- struct jffs2_raw_node_ref *raw;
|
|
- int i, xdatum_count = 0, xdatum_unchecked_count = 0, xref_count = 0;
|
|
- int xdatum_orphan_count = 0, xref_orphan_count = 0, xref_dead_count = 0;
|
|
-
|
|
- BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
|
|
-
|
|
- /* Phase.1 : Merge same xref */
|
|
- for (i=0; i < XREF_TMPHASH_SIZE; i++)
|
|
- xref_tmphash[i] = NULL;
|
|
- for (ref=c->xref_temp; ref; ref=_ref) {
|
|
- struct jffs2_xattr_ref *tmp;
|
|
-
|
|
- _ref = ref->next;
|
|
- if (ref_flags(ref->node) != REF_PRISTINE) {
|
|
- if (verify_xattr_ref(c, ref)) {
|
|
- BUG_ON(ref->node->next_in_ino != (void *)ref);
|
|
- ref->node->next_in_ino = NULL;
|
|
- jffs2_mark_node_obsolete(c, ref->node);
|
|
- jffs2_free_xattr_ref(ref);
|
|
- continue;
|
|
- }
|
|
- }
|
|
-
|
|
- i = (ref->ino ^ ref->xid) % XREF_TMPHASH_SIZE;
|
|
- for (tmp=xref_tmphash[i]; tmp; tmp=tmp->next) {
|
|
- if (tmp->ino == ref->ino && tmp->xid == ref->xid)
|
|
- break;
|
|
- }
|
|
- if (tmp) {
|
|
- raw = ref->node;
|
|
- if (ref->xseqno > tmp->xseqno) {
|
|
- tmp->xseqno = ref->xseqno;
|
|
- raw->next_in_ino = tmp->node;
|
|
- tmp->node = raw;
|
|
- } else {
|
|
- raw->next_in_ino = tmp->node->next_in_ino;
|
|
- tmp->node->next_in_ino = raw;
|
|
- }
|
|
- jffs2_free_xattr_ref(ref);
|
|
- continue;
|
|
- } else {
|
|
- ref->next = xref_tmphash[i];
|
|
- xref_tmphash[i] = ref;
|
|
- }
|
|
- }
|
|
- c->xref_temp = NULL;
|
|
-
|
|
- /* Phase.2 : Bind xref with inode_cache and xattr_datum */
|
|
- for (i=0; i < XREF_TMPHASH_SIZE; i++) {
|
|
- for (ref=xref_tmphash[i]; ref; ref=_ref) {
|
|
- xref_count++;
|
|
- _ref = ref->next;
|
|
- if (is_xattr_ref_dead(ref)) {
|
|
- ref->next = c->xref_dead_list;
|
|
- c->xref_dead_list = ref;
|
|
- xref_dead_count++;
|
|
- continue;
|
|
- }
|
|
- /* At this point, ref->xid and ref->ino contain XID and inode number.
|
|
- ref->xd and ref->ic are not valid yet. */
|
|
- xd = jffs2_find_xattr_datum(c, ref->xid);
|
|
- ic = jffs2_get_ino_cache(c, ref->ino);
|
|
- if (!xd || !ic || !ic->pino_nlink) {
|
|
- dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) is orphan.\n",
|
|
- ref->ino, ref->xid, ref->xseqno);
|
|
- ref->xseqno |= XREF_DELETE_MARKER;
|
|
- ref->next = c->xref_dead_list;
|
|
- c->xref_dead_list = ref;
|
|
- xref_orphan_count++;
|
|
- continue;
|
|
- }
|
|
- ref->xd = xd;
|
|
- ref->ic = ic;
|
|
- atomic_inc(&xd->refcnt);
|
|
- ref->next = ic->xref;
|
|
- ic->xref = ref;
|
|
- }
|
|
- }
|
|
-
|
|
- /* Phase.3 : Link unchecked xdatum to xattr_unchecked list */
|
|
- for (i=0; i < XATTRINDEX_HASHSIZE; i++) {
|
|
- list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) {
|
|
- xdatum_count++;
|
|
- list_del_init(&xd->xindex);
|
|
- if (!atomic_read(&xd->refcnt)) {
|
|
- dbg_xattr("xdatum(xid=%u, version=%u) is orphan.\n",
|
|
- xd->xid, xd->version);
|
|
- xd->flags |= JFFS2_XFLAGS_DEAD;
|
|
- list_add(&xd->xindex, &c->xattr_unchecked);
|
|
- xdatum_orphan_count++;
|
|
- continue;
|
|
- }
|
|
- if (is_xattr_datum_unchecked(c, xd)) {
|
|
- dbg_xattr("unchecked xdatum(xid=%u, version=%u)\n",
|
|
- xd->xid, xd->version);
|
|
- list_add(&xd->xindex, &c->xattr_unchecked);
|
|
- xdatum_unchecked_count++;
|
|
- }
|
|
- }
|
|
- }
|
|
- /* build complete */
|
|
- JFFS2_NOTICE("complete building xattr subsystem, %u of xdatum"
|
|
- " (%u unchecked, %u orphan) and "
|
|
- "%u of xref (%u dead, %u orphan) found.\n",
|
|
- xdatum_count, xdatum_unchecked_count, xdatum_orphan_count,
|
|
- xref_count, xref_dead_count, xref_orphan_count);
|
|
-}
|
|
-
|
|
-struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
|
|
- uint32_t xid, uint32_t version)
|
|
-{
|
|
- struct jffs2_xattr_datum *xd;
|
|
-
|
|
- xd = jffs2_find_xattr_datum(c, xid);
|
|
- if (!xd) {
|
|
- xd = jffs2_alloc_xattr_datum();
|
|
- if (!xd)
|
|
- return ERR_PTR(-ENOMEM);
|
|
- xd->xid = xid;
|
|
- xd->version = version;
|
|
- if (xd->xid > c->highest_xid)
|
|
- c->highest_xid = xd->xid;
|
|
- list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]);
|
|
- }
|
|
- return xd;
|
|
-}
|
|
-
|
|
-/* -------- xattr subsystem functions ---------------
|
|
- * xprefix_to_handler(xprefix)
|
|
- * is used to translate xprefix into xattr_handler.
|
|
- * jffs2_listxattr(dentry, buffer, size)
|
|
- * is an implementation of listxattr handler on jffs2.
|
|
- * do_jffs2_getxattr(inode, xprefix, xname, buffer, size)
|
|
- * is an implementation of getxattr handler on jffs2.
|
|
- * do_jffs2_setxattr(inode, xprefix, xname, buffer, size, flags)
|
|
- * is an implementation of setxattr handler on jffs2.
|
|
- * -------------------------------------------------- */
|
|
-const struct xattr_handler *jffs2_xattr_handlers[] = {
|
|
- &jffs2_user_xattr_handler,
|
|
-#ifdef CONFIG_JFFS2_FS_SECURITY
|
|
- &jffs2_security_xattr_handler,
|
|
-#endif
|
|
-#ifdef CONFIG_JFFS2_FS_POSIX_ACL
|
|
- &posix_acl_access_xattr_handler,
|
|
- &posix_acl_default_xattr_handler,
|
|
-#endif
|
|
- &jffs2_trusted_xattr_handler,
|
|
- NULL
|
|
-};
|
|
-
|
|
-static const struct xattr_handler *xprefix_to_handler(int xprefix) {
|
|
- const struct xattr_handler *ret;
|
|
-
|
|
- switch (xprefix) {
|
|
- case JFFS2_XPREFIX_USER:
|
|
- ret = &jffs2_user_xattr_handler;
|
|
- break;
|
|
-#ifdef CONFIG_JFFS2_FS_SECURITY
|
|
- case JFFS2_XPREFIX_SECURITY:
|
|
- ret = &jffs2_security_xattr_handler;
|
|
- break;
|
|
-#endif
|
|
-#ifdef CONFIG_JFFS2_FS_POSIX_ACL
|
|
- case JFFS2_XPREFIX_ACL_ACCESS:
|
|
- ret = &posix_acl_access_xattr_handler;
|
|
- break;
|
|
- case JFFS2_XPREFIX_ACL_DEFAULT:
|
|
- ret = &posix_acl_default_xattr_handler;
|
|
- break;
|
|
-#endif
|
|
- case JFFS2_XPREFIX_TRUSTED:
|
|
- ret = &jffs2_trusted_xattr_handler;
|
|
- break;
|
|
- default:
|
|
- ret = NULL;
|
|
- break;
|
|
- }
|
|
- return ret;
|
|
-}
|
|
-
|
|
-ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
|
-{
|
|
- struct inode *inode = d_inode(dentry);
|
|
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
|
|
- struct jffs2_inode_cache *ic = f->inocache;
|
|
- struct jffs2_xattr_ref *ref, **pref;
|
|
- struct jffs2_xattr_datum *xd;
|
|
- const struct xattr_handler *xhandle;
|
|
- const char *prefix;
|
|
- ssize_t prefix_len, len, rc;
|
|
- int retry = 0;
|
|
-
|
|
- rc = check_xattr_ref_inode(c, ic);
|
|
- if (unlikely(rc))
|
|
- return rc;
|
|
-
|
|
- down_read(&c->xattr_sem);
|
|
- retry:
|
|
- len = 0;
|
|
- for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) {
|
|
- BUG_ON(ref->ic != ic);
|
|
- xd = ref->xd;
|
|
- if (!xd->xname) {
|
|
- /* xdatum is unchached */
|
|
- if (!retry) {
|
|
- retry = 1;
|
|
- up_read(&c->xattr_sem);
|
|
- down_write(&c->xattr_sem);
|
|
- goto retry;
|
|
- } else {
|
|
- rc = load_xattr_datum(c, xd);
|
|
- if (unlikely(rc > 0)) {
|
|
- *pref = ref->next;
|
|
- delete_xattr_ref(c, ref);
|
|
- goto retry;
|
|
- } else if (unlikely(rc < 0))
|
|
- goto out;
|
|
- }
|
|
- }
|
|
- xhandle = xprefix_to_handler(xd->xprefix);
|
|
- if (!xhandle || (xhandle->list && !xhandle->list(dentry)))
|
|
- continue;
|
|
- prefix = xhandle->prefix ?: xhandle->name;
|
|
- prefix_len = strlen(prefix);
|
|
- rc = prefix_len + xd->name_len + 1;
|
|
-
|
|
- if (buffer) {
|
|
- if (rc > size - len) {
|
|
- rc = -ERANGE;
|
|
- goto out;
|
|
- }
|
|
- memcpy(buffer, prefix, prefix_len);
|
|
- buffer += prefix_len;
|
|
- memcpy(buffer, xd->xname, xd->name_len);
|
|
- buffer += xd->name_len;
|
|
- *buffer++ = 0;
|
|
- }
|
|
- len += rc;
|
|
- }
|
|
- rc = len;
|
|
- out:
|
|
- if (!retry) {
|
|
- up_read(&c->xattr_sem);
|
|
- } else {
|
|
- up_write(&c->xattr_sem);
|
|
- }
|
|
- return rc;
|
|
-}
|
|
-
|
|
-int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname,
|
|
- char *buffer, size_t size)
|
|
-{
|
|
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
|
|
- struct jffs2_inode_cache *ic = f->inocache;
|
|
- struct jffs2_xattr_datum *xd;
|
|
- struct jffs2_xattr_ref *ref, **pref;
|
|
- int rc, retry = 0;
|
|
-
|
|
- rc = check_xattr_ref_inode(c, ic);
|
|
- if (unlikely(rc))
|
|
- return rc;
|
|
-
|
|
- down_read(&c->xattr_sem);
|
|
- retry:
|
|
- for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) {
|
|
- BUG_ON(ref->ic!=ic);
|
|
-
|
|
- xd = ref->xd;
|
|
- if (xd->xprefix != xprefix)
|
|
- continue;
|
|
- if (!xd->xname) {
|
|
- /* xdatum is unchached */
|
|
- if (!retry) {
|
|
- retry = 1;
|
|
- up_read(&c->xattr_sem);
|
|
- down_write(&c->xattr_sem);
|
|
- goto retry;
|
|
- } else {
|
|
- rc = load_xattr_datum(c, xd);
|
|
- if (unlikely(rc > 0)) {
|
|
- *pref = ref->next;
|
|
- delete_xattr_ref(c, ref);
|
|
- goto retry;
|
|
- } else if (unlikely(rc < 0)) {
|
|
- goto out;
|
|
- }
|
|
- }
|
|
- }
|
|
- if (!strcmp(xname, xd->xname)) {
|
|
- rc = xd->value_len;
|
|
- if (buffer) {
|
|
- if (size < rc) {
|
|
- rc = -ERANGE;
|
|
- } else {
|
|
- memcpy(buffer, xd->xvalue, rc);
|
|
- }
|
|
- }
|
|
- goto out;
|
|
- }
|
|
- }
|
|
- rc = -ENODATA;
|
|
- out:
|
|
- if (!retry) {
|
|
- up_read(&c->xattr_sem);
|
|
- } else {
|
|
- up_write(&c->xattr_sem);
|
|
- }
|
|
- return rc;
|
|
-}
|
|
-
|
|
-int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
|
|
- const char *buffer, size_t size, int flags)
|
|
-{
|
|
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
|
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
|
|
- struct jffs2_inode_cache *ic = f->inocache;
|
|
- struct jffs2_xattr_datum *xd;
|
|
- struct jffs2_xattr_ref *ref, *newref, **pref;
|
|
- uint32_t length, request;
|
|
- int rc;
|
|
-
|
|
- rc = check_xattr_ref_inode(c, ic);
|
|
- if (unlikely(rc))
|
|
- return rc;
|
|
-
|
|
- request = PAD(sizeof(struct jffs2_raw_xattr) + strlen(xname) + 1 + size);
|
|
- rc = jffs2_reserve_space(c, request, &length,
|
|
- ALLOC_NORMAL, JFFS2_SUMMARY_XATTR_SIZE);
|
|
- if (rc) {
|
|
- JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request);
|
|
- return rc;
|
|
- }
|
|
-
|
|
- /* Find existing xattr */
|
|
- down_write(&c->xattr_sem);
|
|
- retry:
|
|
- for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) {
|
|
- xd = ref->xd;
|
|
- if (xd->xprefix != xprefix)
|
|
- continue;
|
|
- if (!xd->xname) {
|
|
- rc = load_xattr_datum(c, xd);
|
|
- if (unlikely(rc > 0)) {
|
|
- *pref = ref->next;
|
|
- delete_xattr_ref(c, ref);
|
|
- goto retry;
|
|
- } else if (unlikely(rc < 0))
|
|
- goto out;
|
|
- }
|
|
- if (!strcmp(xd->xname, xname)) {
|
|
- if (flags & XATTR_CREATE) {
|
|
- rc = -EEXIST;
|
|
- goto out;
|
|
- }
|
|
- if (!buffer) {
|
|
- ref->ino = ic->ino;
|
|
- ref->xid = xd->xid;
|
|
- ref->xseqno |= XREF_DELETE_MARKER;
|
|
- rc = save_xattr_ref(c, ref);
|
|
- if (!rc) {
|
|
- *pref = ref->next;
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- ref->next = c->xref_dead_list;
|
|
- c->xref_dead_list = ref;
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
- unrefer_xattr_datum(c, xd);
|
|
- } else {
|
|
- ref->ic = ic;
|
|
- ref->xd = xd;
|
|
- ref->xseqno &= ~XREF_DELETE_MARKER;
|
|
- }
|
|
- goto out;
|
|
- }
|
|
- goto found;
|
|
- }
|
|
- }
|
|
- /* not found */
|
|
- if (flags & XATTR_REPLACE) {
|
|
- rc = -ENODATA;
|
|
- goto out;
|
|
- }
|
|
- if (!buffer) {
|
|
- rc = -ENODATA;
|
|
- goto out;
|
|
- }
|
|
- found:
|
|
- xd = create_xattr_datum(c, xprefix, xname, buffer, size);
|
|
- if (IS_ERR(xd)) {
|
|
- rc = PTR_ERR(xd);
|
|
- goto out;
|
|
- }
|
|
- up_write(&c->xattr_sem);
|
|
- jffs2_complete_reservation(c);
|
|
-
|
|
- /* create xattr_ref */
|
|
- request = PAD(sizeof(struct jffs2_raw_xref));
|
|
- rc = jffs2_reserve_space(c, request, &length,
|
|
- ALLOC_NORMAL, JFFS2_SUMMARY_XREF_SIZE);
|
|
- down_write(&c->xattr_sem);
|
|
- if (rc) {
|
|
- JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request);
|
|
- unrefer_xattr_datum(c, xd);
|
|
- up_write(&c->xattr_sem);
|
|
- return rc;
|
|
- }
|
|
- if (ref)
|
|
- *pref = ref->next;
|
|
- newref = create_xattr_ref(c, ic, xd);
|
|
- if (IS_ERR(newref)) {
|
|
- if (ref) {
|
|
- ref->next = ic->xref;
|
|
- ic->xref = ref;
|
|
- }
|
|
- rc = PTR_ERR(newref);
|
|
- unrefer_xattr_datum(c, xd);
|
|
- } else if (ref) {
|
|
- delete_xattr_ref(c, ref);
|
|
- }
|
|
- out:
|
|
- up_write(&c->xattr_sem);
|
|
- jffs2_complete_reservation(c);
|
|
- return rc;
|
|
-}
|
|
-
|
|
-/* -------- garbage collector functions -------------
|
|
- * jffs2_garbage_collect_xattr_datum(c, xd, raw)
|
|
- * is used to move xdatum into new node.
|
|
- * jffs2_garbage_collect_xattr_ref(c, ref, raw)
|
|
- * is used to move xref into new node.
|
|
- * jffs2_verify_xattr(c)
|
|
- * is used to call do_verify_xattr_datum() before garbage collecting.
|
|
- * jffs2_release_xattr_datum(c, xd)
|
|
- * is used to release an in-memory object of xdatum.
|
|
- * jffs2_release_xattr_ref(c, ref)
|
|
- * is used to release an in-memory object of xref.
|
|
- * -------------------------------------------------- */
|
|
-int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd,
|
|
- struct jffs2_raw_node_ref *raw)
|
|
-{
|
|
- uint32_t totlen, length, old_ofs;
|
|
- int rc = 0;
|
|
-
|
|
- down_write(&c->xattr_sem);
|
|
- if (xd->node != raw)
|
|
- goto out;
|
|
- if (xd->flags & (JFFS2_XFLAGS_DEAD|JFFS2_XFLAGS_INVALID))
|
|
- goto out;
|
|
-
|
|
- rc = load_xattr_datum(c, xd);
|
|
- if (unlikely(rc)) {
|
|
- rc = (rc > 0) ? 0 : rc;
|
|
- goto out;
|
|
- }
|
|
- old_ofs = ref_offset(xd->node);
|
|
- totlen = PAD(sizeof(struct jffs2_raw_xattr)
|
|
- + xd->name_len + 1 + xd->value_len);
|
|
- rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XATTR_SIZE);
|
|
- if (rc) {
|
|
- JFFS2_WARNING("jffs2_reserve_space_gc()=%d, request=%u\n", rc, totlen);
|
|
- goto out;
|
|
- }
|
|
- rc = save_xattr_datum(c, xd);
|
|
- if (!rc)
|
|
- dbg_xattr("xdatum (xid=%u, version=%u) GC'ed from %#08x to %08x\n",
|
|
- xd->xid, xd->version, old_ofs, ref_offset(xd->node));
|
|
- out:
|
|
- if (!rc)
|
|
- jffs2_mark_node_obsolete(c, raw);
|
|
- up_write(&c->xattr_sem);
|
|
- return rc;
|
|
-}
|
|
-
|
|
-int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref,
|
|
- struct jffs2_raw_node_ref *raw)
|
|
-{
|
|
- uint32_t totlen, length, old_ofs;
|
|
- int rc = 0;
|
|
-
|
|
- down_write(&c->xattr_sem);
|
|
- BUG_ON(!ref->node);
|
|
-
|
|
- if (ref->node != raw)
|
|
- goto out;
|
|
- if (is_xattr_ref_dead(ref) && (raw->next_in_ino == (void *)ref))
|
|
- goto out;
|
|
-
|
|
- old_ofs = ref_offset(ref->node);
|
|
- totlen = ref_totlen(c, c->gcblock, ref->node);
|
|
-
|
|
- rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XREF_SIZE);
|
|
- if (rc) {
|
|
- JFFS2_WARNING("%s: jffs2_reserve_space_gc() = %d, request = %u\n",
|
|
- __func__, rc, totlen);
|
|
- goto out;
|
|
- }
|
|
- rc = save_xattr_ref(c, ref);
|
|
- if (!rc)
|
|
- dbg_xattr("xref (ino=%u, xid=%u) GC'ed from %#08x to %08x\n",
|
|
- ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node));
|
|
- out:
|
|
- if (!rc)
|
|
- jffs2_mark_node_obsolete(c, raw);
|
|
- up_write(&c->xattr_sem);
|
|
- return rc;
|
|
-}
|
|
-
|
|
-int jffs2_verify_xattr(struct jffs2_sb_info *c)
|
|
-{
|
|
- struct jffs2_xattr_datum *xd, *_xd;
|
|
- struct jffs2_eraseblock *jeb;
|
|
- struct jffs2_raw_node_ref *raw;
|
|
- uint32_t totlen;
|
|
- int rc;
|
|
-
|
|
- down_write(&c->xattr_sem);
|
|
- list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) {
|
|
- rc = do_verify_xattr_datum(c, xd);
|
|
- if (rc < 0)
|
|
- continue;
|
|
- list_del_init(&xd->xindex);
|
|
- spin_lock(&c->erase_completion_lock);
|
|
- for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
|
|
- if (ref_flags(raw) != REF_UNCHECKED)
|
|
- continue;
|
|
- jeb = &c->blocks[ref_offset(raw) / c->sector_size];
|
|
- totlen = PAD(ref_totlen(c, jeb, raw));
|
|
- c->unchecked_size -= totlen; c->used_size += totlen;
|
|
- jeb->unchecked_size -= totlen; jeb->used_size += totlen;
|
|
- raw->flash_offset = ref_offset(raw)
|
|
- | ((xd->node == (void *)raw) ? REF_PRISTINE : REF_NORMAL);
|
|
- }
|
|
- if (xd->flags & JFFS2_XFLAGS_DEAD)
|
|
- list_add(&xd->xindex, &c->xattr_dead_list);
|
|
- spin_unlock(&c->erase_completion_lock);
|
|
- }
|
|
- up_write(&c->xattr_sem);
|
|
- return list_empty(&c->xattr_unchecked) ? 1 : 0;
|
|
-}
|
|
-
|
|
-void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
|
|
-{
|
|
- /* must be called under spin_lock(&c->erase_completion_lock) */
|
|
- if (atomic_read(&xd->refcnt) || xd->node != (void *)xd)
|
|
- return;
|
|
-
|
|
- list_del(&xd->xindex);
|
|
- jffs2_free_xattr_datum(xd);
|
|
-}
|
|
-
|
|
-void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
|
|
-{
|
|
- /* must be called under spin_lock(&c->erase_completion_lock) */
|
|
- struct jffs2_xattr_ref *tmp, **ptmp;
|
|
-
|
|
- if (ref->node != (void *)ref)
|
|
- return;
|
|
-
|
|
- for (tmp=c->xref_dead_list, ptmp=&c->xref_dead_list; tmp; ptmp=&tmp->next, tmp=tmp->next) {
|
|
- if (ref == tmp) {
|
|
- *ptmp = tmp->next;
|
|
- break;
|
|
- }
|
|
- }
|
|
- jffs2_free_xattr_ref(ref);
|
|
-}
|
|
diff -Nupr old/fs/jffs2/xattr.h new/fs/jffs2/xattr.h
|
|
--- old/fs/jffs2/xattr.h 2022-05-09 17:22:53.000000000 +0800
|
|
+++ new/fs/jffs2/xattr.h 2022-05-09 20:04:55.580000000 +0800
|
|
@@ -12,7 +12,6 @@
|
|
#ifndef _JFFS2_FS_XATTR_H_
|
|
#define _JFFS2_FS_XATTR_H_
|
|
|
|
-#include <linux/xattr.h>
|
|
#include <linux/list.h>
|
|
|
|
#define JFFS2_XFLAGS_HOT (0x01) /* This datum is HOT */
|
|
@@ -48,7 +47,7 @@ struct jffs2_xattr_ref
|
|
struct jffs2_raw_node_ref *node;
|
|
uint8_t class;
|
|
uint8_t flags; /* Currently unused */
|
|
- u16 unused;
|
|
+ uint16_t unused;
|
|
|
|
uint32_t xseqno;
|
|
union {
|
|
@@ -89,16 +88,14 @@ extern int jffs2_verify_xattr(struct jff
|
|
extern void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd);
|
|
extern void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref);
|
|
|
|
-extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname,
|
|
- char *buffer, size_t size);
|
|
-extern int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
|
|
- const char *buffer, size_t size, int flags);
|
|
-
|
|
extern const struct xattr_handler *jffs2_xattr_handlers[];
|
|
extern const struct xattr_handler jffs2_user_xattr_handler;
|
|
extern const struct xattr_handler jffs2_trusted_xattr_handler;
|
|
|
|
extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t);
|
|
+#define jffs2_getxattr generic_getxattr
|
|
+#define jffs2_setxattr generic_setxattr
|
|
+#define jffs2_removexattr generic_removexattr
|
|
|
|
#else
|
|
|
|
@@ -113,12 +110,13 @@ extern ssize_t jffs2_listxattr(struct de
|
|
|
|
#define jffs2_xattr_handlers NULL
|
|
#define jffs2_listxattr NULL
|
|
+#define jffs2_getxattr NULL
|
|
+#define jffs2_setxattr NULL
|
|
+#define jffs2_removexattr NULL
|
|
|
|
#endif /* CONFIG_JFFS2_FS_XATTR */
|
|
|
|
#ifdef CONFIG_JFFS2_FS_SECURITY
|
|
-extern int jffs2_init_security(struct inode *inode, struct inode *dir,
|
|
- const struct qstr *qstr);
|
|
extern const struct xattr_handler jffs2_security_xattr_handler;
|
|
#else
|
|
#define jffs2_init_security(inode,dir,qstr) (0)
|
|
diff -Nupr old/fs/jffs2/xattr_trusted.c new/fs/jffs2/xattr_trusted.c
|
|
--- old/fs/jffs2/xattr_trusted.c 2022-05-09 17:15:24.360000000 +0800
|
|
+++ new/fs/jffs2/xattr_trusted.c 1970-01-01 08:00:00.000000000 +0800
|
|
@@ -1,46 +0,0 @@
|
|
-/*
|
|
- * JFFS2 -- Journalling Flash File System, Version 2.
|
|
- *
|
|
- * Copyright © 2006 NEC Corporation
|
|
- *
|
|
- * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
|
|
- *
|
|
- * For licensing information, see the file 'LICENCE' in this directory.
|
|
- *
|
|
- */
|
|
-
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/fs.h>
|
|
-#include <linux/jffs2.h>
|
|
-#include <linux/xattr.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
-#include "nodelist.h"
|
|
-
|
|
-static int jffs2_trusted_getxattr(const struct xattr_handler *handler,
|
|
- struct dentry *unused, struct inode *inode,
|
|
- const char *name, void *buffer, size_t size)
|
|
-{
|
|
- return do_jffs2_getxattr(inode, JFFS2_XPREFIX_TRUSTED,
|
|
- name, buffer, size);
|
|
-}
|
|
-
|
|
-static int jffs2_trusted_setxattr(const struct xattr_handler *handler,
|
|
- struct dentry *unused, struct inode *inode,
|
|
- const char *name, const void *buffer,
|
|
- size_t size, int flags)
|
|
-{
|
|
- return do_jffs2_setxattr(inode, JFFS2_XPREFIX_TRUSTED,
|
|
- name, buffer, size, flags);
|
|
-}
|
|
-
|
|
-static bool jffs2_trusted_listxattr(struct dentry *dentry)
|
|
-{
|
|
- return capable(CAP_SYS_ADMIN);
|
|
-}
|
|
-
|
|
-const struct xattr_handler jffs2_trusted_xattr_handler = {
|
|
- .prefix = XATTR_TRUSTED_PREFIX,
|
|
- .list = jffs2_trusted_listxattr,
|
|
- .set = jffs2_trusted_setxattr,
|
|
- .get = jffs2_trusted_getxattr
|
|
-};
|
|
diff -Nupr old/fs/jffs2/xattr_user.c new/fs/jffs2/xattr_user.c
|
|
--- old/fs/jffs2/xattr_user.c 2022-05-09 17:15:24.360000000 +0800
|
|
+++ new/fs/jffs2/xattr_user.c 1970-01-01 08:00:00.000000000 +0800
|
|
@@ -1,40 +0,0 @@
|
|
-/*
|
|
- * JFFS2 -- Journalling Flash File System, Version 2.
|
|
- *
|
|
- * Copyright © 2006 NEC Corporation
|
|
- *
|
|
- * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
|
|
- *
|
|
- * For licensing information, see the file 'LICENCE' in this directory.
|
|
- *
|
|
- */
|
|
-
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/fs.h>
|
|
-#include <linux/jffs2.h>
|
|
-#include <linux/xattr.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
-#include "nodelist.h"
|
|
-
|
|
-static int jffs2_user_getxattr(const struct xattr_handler *handler,
|
|
- struct dentry *unused, struct inode *inode,
|
|
- const char *name, void *buffer, size_t size)
|
|
-{
|
|
- return do_jffs2_getxattr(inode, JFFS2_XPREFIX_USER,
|
|
- name, buffer, size);
|
|
-}
|
|
-
|
|
-static int jffs2_user_setxattr(const struct xattr_handler *handler,
|
|
- struct dentry *unused, struct inode *inode,
|
|
- const char *name, const void *buffer,
|
|
- size_t size, int flags)
|
|
-{
|
|
- return do_jffs2_setxattr(inode, JFFS2_XPREFIX_USER,
|
|
- name, buffer, size, flags);
|
|
-}
|
|
-
|
|
-const struct xattr_handler jffs2_user_xattr_handler = {
|
|
- .prefix = XATTR_USER_PREFIX,
|
|
- .set = jffs2_user_setxattr,
|
|
- .get = jffs2_user_getxattr
|
|
-};
|