[{"data":1,"prerenderedAt":323},["ShallowReactive",2],{"/open_source/home/core_concepts":3,"surround-/open_source/home/core_concepts":307},{"id":4,"title":5,"avatar":6,"banner":6,"body":7,"category":6,"desc":300,"description":282,"extension":301,"links":6,"meta":302,"navigation":6,"path":303,"seo":304,"stem":305,"__hash__":306},"docs/en/open_source/home/core_concepts.md","Core Concepts",null,{"type":8,"value":9,"toc":281},"minimark",[10,15,44,47,55,61,64,69,74,77,80,142,147,153,158,162,167,173,177,188,192,197,202,206,209,220,235,247,250,254,257,261,264,274,278],[11,12,14],"h2",{"id":13},"overview","Overview",[16,17,18,26,32,38],"ul",{},[19,20,21],"li",{},[22,23,25],"a",{"href":24},"#mos-memory-operating-system","MOS (Memory Operating System)",[19,27,28],{},[22,29,31],{"href":30},"#%EF%B8%8Fmemcube","MemCube",[19,33,34],{},[22,35,37],{"href":36},"#memory-types","Memory Types",[19,39,40],{},[22,41,43],{"href":42},"#cross-cutting-concepts","Cross-Cutting Concepts",[11,45,25],{"id":46},"mos-memory-operating-system",[48,49,50,54],"p",{},[51,52,53],"strong",{},"What it is:","\nThe orchestration layer that coordinates multiple MemCubes and memory operations. It connects your LLMs with structured, explainable memory for reasoning and planning.",[48,56,57,60],{},[51,58,59],{},"When to use:","\nUse MOS whenever you need to bridge users, sessions, or agents with consistent, auditable memory workflows.",[11,62,31],{"id":63},"memcube",[48,65,66,68],{},[51,67,53],{},"\nA MemCube is like a flexible, swappable memory cartridge. Each user, session, or task can have its own MemCube, which can hold one or more memory types.",[48,70,71,73],{},[51,72,59],{},"\nUse different MemCubes to isolate, reuse, or scale your memory as your system grows.",[11,75,37],{"id":76},"memory-types",[48,78,79],{},"MemOS treats memory like a living system — not just static data but evolving knowledge. Here's how the three core memory types work together:",[81,82,83,99],"table",{},[84,85,86],"thead",{},[87,88,89,93,96],"tr",{},[90,91,92],"th",{},"Memory Type",[90,94,95],{},"Description",[90,97,98],{},"When to Use",[100,101,102,116,129],"tbody",{},[87,103,104,110,113],{},[105,106,107],"td",{},[51,108,109],{},"Parametric",[105,111,112],{},"Knowledge distilled into model weights",[105,114,115],{},"Evergreen skills, stable domain expertise",[87,117,118,123,126],{},[105,119,120],{},[51,121,122],{},"Activation",[105,124,125],{},"Short-term KV cache and hidden states",[105,127,128],{},"Fast reuse in dialogue, multi-turn sessions",[87,130,131,136,139],{},[105,132,133],{},[51,134,135],{},"Plaintext",[105,137,138],{},"Text, docs, graph nodes, or vector chunks",[105,140,141],{},"Searchable, inspectable, evolving knowledge",[143,144,146],"h3",{"id":145},"parametric-memory","Parametric Memory",[48,148,149,152],{},[51,150,151],{},"What:","\nKnowledge embedded directly into the model's weights — think of this as the model's \"cortex\". It's always on, providing zero-latency reasoning.",[48,154,155,157],{},[51,156,59],{},"\nPerfect for stable domain knowledge, distilled FAQs, or skills that rarely change.",[143,159,161],{"id":160},"activation-memory","Activation Memory",[48,163,164,166],{},[51,165,151],{},"\nActivation Memory is your model's reusable \"working memory\" — it includes precomputed key-value caches and hidden states that can be directly injected into the model's attention mechanism.\nThink of it as pre-cooked context that saves your LLM from repeatedly\nre-encoding static or frequently used information.",[48,168,169,172],{},[51,170,171],{},"Why it matters:","\nBy storing stable background content (like FAQs or known facts) in a KV-cache, your model can skip redundant computation during the prefill phase.\nThis dramatically reduces Time To First Token (TTFT) and improves throughput for multi-turn conversations or retrieval-augmented generation.",[48,174,175],{},[51,176,59],{},[16,178,179,182,185],{},[19,180,181],{},"Reuse background knowledge across many user queries.",[19,183,184],{},"Speed up chatbots that rely on the same domain context each turn.",[19,186,187],{},"Combine with MemScheduler to auto-promote stable plaintext memory to KV format.",[143,189,191],{"id":190},"explicit-memory","Explicit Memory",[48,193,194,196],{},[51,195,151],{},"\nStructured or unstructured knowledge units — user-visible, explainable. These can be documents, chat logs, graph nodes, or vector embeddings.",[48,198,199,201],{},[51,200,59],{},"\nBest for semantic search, user preferences, or traceable facts that evolve over time. Supports tags, provenance, and lifecycle states.",[11,203,205],{"id":204},"how-they-work-together","How They Work Together",[48,207,208],{},"MemOS lets you orchestrate all three memory types in a living loop:",[16,210,211,214,217],{},[19,212,213],{},"Hot plaintext memories can be distilled into parametric weights.",[19,215,216],{},"High-frequency activation paths become reusable KV templates.",[19,218,219],{},"Stale parametric or activation units can be downgraded to plaintext nodes for traceability.",[48,221,222,223,226,227,230,231,234],{},"With MemOS, your AI doesn't just store facts — it ",[51,224,225],{},"remembers",", ",[51,228,229],{},"understands",", and ",[51,232,233],{},"grows",".",[236,237,238],"note",{},[48,239,240,243,246],{},[51,241,242],{},"Insight",[244,245],"br",{},"\nOver time, frequently used plaintext memories can be distilled into parametric form.\nRarely used weights or caches can be demoted to plaintext storage for auditing and retraining.",[11,248,43],{"id":249},"cross-cutting-concepts",[143,251,253],{"id":252},"hybrid-retrieval","Hybrid Retrieval",[48,255,256],{},"Combines vector similarity and graph traversal for robust, context-aware search.",[143,258,260],{"id":259},"governance-lifecycle","Governance & Lifecycle",[48,262,263],{},"Every memory unit supports states (active, merged, archived), provenance tracking, and fine-grained access control — essential for auditing and compliance.",[236,265,266],{},[48,267,268,271,273],{},[51,269,270],{},"Compliance Reminder",[244,272],{},"\nAlways track provenance and state changes for each memory unit.\nThis helps meet audit and data governance requirements.",[11,275,277],{"id":276},"key-takeaway","Key Takeaway",[48,279,280],{},"With MemOS, your LLM applications gain structured, evolving memory — empowering agents to plan, reason, and adapt like never before.",{"title":282,"searchDepth":283,"depth":283,"links":284},"",2,[285,286,287,288,294,295,299],{"id":13,"depth":283,"text":14},{"id":46,"depth":283,"text":25},{"id":63,"depth":283,"text":31},{"id":76,"depth":283,"text":37,"children":289},[290,292,293],{"id":145,"depth":291,"text":146},3,{"id":160,"depth":291,"text":161},{"id":190,"depth":291,"text":191},{"id":204,"depth":283,"text":205},{"id":249,"depth":283,"text":43,"children":296},[297,298],{"id":252,"depth":291,"text":253},{"id":259,"depth":291,"text":260},{"id":276,"depth":283,"text":277},"MemOS treats memory as a first-class citizen. Its core design revolves around how to orchestrate, store, retrieve, and govern memory for your LLM applications.","md",{},"/en/open_source/home/core_concepts",{"title":5,"description":282},"en/open_source/home/core_concepts","nxvas8i-37ODOFWZrZe_61CnbxDOoLtjMn4u9H9EvH8",[308,316],{"title":309,"path":310,"stem":311,"icon":312,"framework":6,"module":6,"class":313,"target":-1,"active":314,"defaultOpen":314,"children":-1,"description":315},"Your First Memory","/open_source/getting_started/your_first_memory","open_source/getting_started/your_first_memory","i-ri-bookmark-line",[],false,"Let’s build your first plaintext memory in MemOS! GeneralTextMemory is the easiest way to get hands-on with extracting, embedding, and searching simple text memories.",{"title":317,"path":318,"stem":319,"icon":320,"framework":6,"module":6,"class":321,"target":-1,"active":314,"defaultOpen":314,"children":-1,"description":322},"Architecture","/open_source/home/architecture","open_source/home/architecture","i-ri-building-2-line",[],"MemOS is made up of core modules that work together to turn your LLM into a truly memory-augmented system — from orchestration to storage to retrieval.",1770372088065]