VIDEOS 》 Linux Operating System - User-space Processes

Linux Kernel Source:
struct task_struct data-structure - ...
Task state bitmask flags - tsk->state - ...
struct task_struct instance example2: struct task_struct *p - p->state == TASK_RUNNING - ...

Linux Process task state - ...
Linux Kernel sub-systems - ...
Linux system architecture - ...
Linux system architecture Image2 - ...
Java JVM architecture - ...
Kernel type: Monolithic-Kernel, Micro-Kernel and Hybrid-Kernel examples - ...
Android OS on top of Linux Kernel (OS):
Andrid architecture - ...

Here is the struct task_struct data-structure data-structure (/include/linux/sched.h) from the Kernel-source version 4.14 for quick reference:

struct task_struct {
	 * For reasons of header soup (see current_thread_info()), this
	 * must be the first element of task_struct.
	struct thread_info		thread_info;
	/* -1 unrunnable, 0 runnable, >0 stopped: */
	volatile long			state;

	 * This begins the randomizable portion of task_struct. Only
	 * scheduling-critical items should be added above here.

	void				*stack;
	atomic_t			usage;
	/* Per task flags (PF_*), defined further below: */
	unsigned int			flags;
	unsigned int			ptrace;

	struct llist_node		wake_entry;
	int				on_cpu;
	/* Current CPU: */
	unsigned int			cpu;
	unsigned int			wakee_flips;
	unsigned long			wakee_flip_decay_ts;
	struct task_struct		*last_wakee;

	int				wake_cpu;
	int				on_rq;

	int				prio;
	int				static_prio;
	int				normal_prio;
	unsigned int			rt_priority;

	const struct sched_class	*sched_class;
	struct sched_entity		se;
	struct sched_rt_entity		rt;
	struct task_group		*sched_task_group;
	struct sched_dl_entity		dl;

	/* List of struct preempt_notifier: */
	struct hlist_head		preempt_notifiers;

	unsigned int			btrace_seq;

	unsigned int			policy;
	int				nr_cpus_allowed;
	cpumask_t			cpus_allowed;

	int				rcu_read_lock_nesting;
	union rcu_special		rcu_read_unlock_special;
	struct list_head		rcu_node_entry;
	struct rcu_node			*rcu_blocked_node;
#endif /* #ifdef CONFIG_PREEMPT_RCU */

	unsigned long			rcu_tasks_nvcsw;
	u8				rcu_tasks_holdout;
	u8				rcu_tasks_idx;
	int				rcu_tasks_idle_cpu;
	struct list_head		rcu_tasks_holdout_list;
#endif /* #ifdef CONFIG_TASKS_RCU */

	struct sched_info		sched_info;

	struct list_head		tasks;
	struct plist_node		pushable_tasks;
	struct rb_node			pushable_dl_tasks;

	struct mm_struct		*mm;
	struct mm_struct		*active_mm;

	/* Per-thread vma caching: */
	struct vmacache			vmacache;

	struct task_rss_stat		rss_stat;
	int				exit_state;
	int				exit_code;
	int				exit_signal;
	/* The signal sent when the parent dies: */
	int				pdeath_signal;
	/* JOBCTL_*, siglock protected: */
	unsigned long			jobctl;

	/* Used for emulating ABI behavior of previous Linux versions: */
	unsigned int			personality;

	/* Scheduler bits, serialized by scheduler locks: */
	unsigned			sched_reset_on_fork:1;
	unsigned			sched_contributes_to_load:1;
	unsigned			sched_migrated:1;
	unsigned			sched_remote_wakeup:1;
	/* Force alignment to the next boundary: */
	unsigned			:0;

	/* Unserialized, strictly 'current' */

	/* Bit to tell LSMs we're in execve(): */
	unsigned			in_execve:1;
	unsigned			in_iowait:1;
	unsigned			restore_sigmask:1;
	unsigned			memcg_may_oom:1;
	unsigned			memcg_kmem_skip_account:1;
	unsigned			brk_randomized:1;
	/* disallow userland-initiated cgroup migration */
	unsigned			no_cgroup_migration:1;

	unsigned long			atomic_flags; /* Flags requiring atomic access. */

	struct restart_block		restart_block;

	pid_t				pid;
	pid_t				tgid;

	/* Canary value for the -fstack-protector GCC feature: */
	unsigned long			stack_canary;
	 * Pointers to the (original) parent process, youngest child, younger sibling,
	 * older sibling, respectively.  (p->father can be replaced with
	 * p->real_parent->pid)

	/* Real parent process: */
	struct task_struct __rcu	*real_parent;

	/* Recipient of SIGCHLD, wait4() reports: */
	struct task_struct __rcu	*parent;

	 * Children/sibling form the list of natural children:
	struct list_head		children;
	struct list_head		sibling;
	struct task_struct		*group_leader;

	 * 'ptraced' is the list of tasks this task is using ptrace() on.
	 * This includes both natural children and PTRACE_ATTACH targets.
	 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
	struct list_head		ptraced;
	struct list_head		ptrace_entry;

	/* PID/PID hash table linkage. */
	struct pid_link			pids[PIDTYPE_MAX];
	struct list_head		thread_group;
	struct list_head		thread_node;

	struct completion		*vfork_done;

	int __user			*set_child_tid;

	int __user			*clear_child_tid;

	u64				utime;
	u64				stime;
	u64				utimescaled;
	u64				stimescaled;
	u64				gtime;
	struct prev_cputime		prev_cputime;
	struct vtime			vtime;

	atomic_t			tick_dep_mask;
	/* Context switch counts: */
	unsigned long			nvcsw;
	unsigned long			nivcsw;

	/* Monotonic time in nsecs: */
	u64				start_time;

	/* Boot based time in nsecs: */
	u64				real_start_time;

	/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
	unsigned long			min_flt;
	unsigned long			maj_flt;

	struct task_cputime		cputime_expires;
	struct list_head		cpu_timers[3];

	/* Process credentials: */

	/* Tracer's credentials at attach: */
	const struct cred __rcu		*ptracer_cred;

	/* Objective and real subjective task credentials (COW): */
	const struct cred __rcu		*real_cred;

	/* Effective (overridable) subjective task credentials (COW): */
	const struct cred __rcu		*cred;

	 * executable name, excluding path.
	 * - normally initialized setup_new_exec()
	 * - access it with [gs]et_task_comm()
	 * - lock it with task_lock()
	char				comm[TASK_COMM_LEN];

	struct nameidata		*nameidata;

	struct sysv_sem			sysvsem;
	struct sysv_shm			sysvshm;
	unsigned long			last_switch_count;
	/* Filesystem information: */
	struct fs_struct		*fs;

	/* Open file information: */
	struct files_struct		*files;

	/* Namespaces: */
	struct nsproxy			*nsproxy;

	/* Signal handlers: */
	struct signal_struct		*signal;
	struct sighand_struct		*sighand;
	sigset_t			blocked;
	sigset_t			real_blocked;
	/* Restored if set_restore_sigmask() was used: */
	sigset_t			saved_sigmask;
	struct sigpending		pending;
	unsigned long			sas_ss_sp;
	size_t				sas_ss_size;
	unsigned int			sas_ss_flags;

	struct callback_head		*task_works;

	struct audit_context		*audit_context;
	kuid_t				loginuid;
	unsigned int			sessionid;
	struct seccomp			seccomp;

	/* Thread group tracking: */
	u32				parent_exec_id;
	u32				self_exec_id;

	/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
	spinlock_t			alloc_lock;

	/* Protection of the PI data structures: */
	raw_spinlock_t			pi_lock;

	struct wake_q_node		wake_q;

	/* PI waiters blocked on a rt_mutex held by this task: */
	struct rb_root_cached		pi_waiters;
	/* Updated under owner's pi_lock and rq lock */
	struct task_struct		*pi_top_task;
	/* Deadlock detection and priority inheritance handling: */
	struct rt_mutex_waiter		*pi_blocked_on;

	/* Mutex deadlock detection: */
	struct mutex_waiter		*blocked_on;

	unsigned int			irq_events;
	unsigned long			hardirq_enable_ip;
	unsigned long			hardirq_disable_ip;
	unsigned int			hardirq_enable_event;
	unsigned int			hardirq_disable_event;
	int				hardirqs_enabled;
	int				hardirq_context;
	unsigned long			softirq_disable_ip;
	unsigned long			softirq_enable_ip;
	unsigned int			softirq_disable_event;
	unsigned int			softirq_enable_event;
	int				softirqs_enabled;
	int				softirq_context;

# define MAX_LOCK_DEPTH			48UL
	u64				curr_chain_key;
	int				lockdep_depth;
	unsigned int			lockdep_recursion;
	struct held_lock		held_locks[MAX_LOCK_DEPTH];

	struct hist_lock *xhlocks; /* Crossrelease history locks */
	unsigned int xhlock_idx;
	/* For restoring at history boundaries */
	unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
	unsigned int hist_id;
	/* For overwrite check at each context exit */
	unsigned int hist_id_save[XHLOCK_CTX_NR];

	unsigned int			in_ubsan;

	/* Journalling filesystem info: */
	void				*journal_info;

	/* Stacked block device info: */
	struct bio_list			*bio_list;

	/* Stack plugging: */
	struct blk_plug			*plug;

	/* VM state: */
	struct reclaim_state		*reclaim_state;

	struct backing_dev_info		*backing_dev_info;

	struct io_context		*io_context;

	/* Ptrace state: */
	unsigned long			ptrace_message;
	siginfo_t			*last_siginfo;

	struct task_io_accounting	ioac;
	/* Accumulated RSS usage: */
	u64				acct_rss_mem1;
	/* Accumulated virtual memory usage: */
	u64				acct_vm_mem1;
	/* stime + utime since last update: */
	u64				acct_timexpd;
	/* Protected by ->alloc_lock: */
	nodemask_t			mems_allowed;
	/* Seqence number to catch updates: */
	seqcount_t			mems_allowed_seq;
	int				cpuset_mem_spread_rotor;
	int				cpuset_slab_spread_rotor;
	/* Control Group info protected by css_set_lock: */
	struct css_set __rcu		*cgroups;
	/* cg_list protected by css_set_lock and tsk->alloc_lock: */
	struct list_head		cg_list;
	u32				closid;
	u32				rmid;
	struct robust_list_head __user	*robust_list;
	struct compat_robust_list_head __user *compat_robust_list;
	struct list_head		pi_state_list;
	struct futex_pi_state		*pi_state_cache;
	struct perf_event_context	*perf_event_ctxp[perf_nr_task_contexts];
	struct mutex			perf_event_mutex;
	struct list_head		perf_event_list;
	unsigned long			preempt_disable_ip;
	/* Protected by alloc_lock: */
	struct mempolicy		*mempolicy;
	short				il_prev;
	short				pref_node_fork;
	int				numa_scan_seq;
	unsigned int			numa_scan_period;
	unsigned int			numa_scan_period_max;
	int				numa_preferred_nid;
	unsigned long			numa_migrate_retry;
	/* Migration stamp: */
	u64				node_stamp;
	u64				last_task_numa_placement;
	u64				last_sum_exec_runtime;
	struct callback_head		numa_work;

	struct list_head		numa_entry;
	struct numa_group		*numa_group;

	 * numa_faults is an array split into four regions:
	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
	 * in this precise order.
	 * faults_memory: Exponential decaying average of faults on a per-node
	 * basis. Scheduling placement decisions are made based on these
	 * counts. The values remain static for the duration of a PTE scan.
	 * faults_cpu: Track the nodes the process was running on when a NUMA
	 * hinting fault was incurred.
	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
	 * during the current scan window. When the scan completes, the counts
	 * in faults_memory and faults_cpu decay and these values are copied.
	unsigned long			*numa_faults;
	unsigned long			total_numa_faults;

	 * numa_faults_locality tracks if faults recorded during the last
	 * scan window were remote/local or failed to migrate. The task scan
	 * period is adapted based on the locality of the faults with different
	 * weights depending on whether they were shared or private faults
	unsigned long			numa_faults_locality[3];

	unsigned long			numa_pages_migrated;

	struct tlbflush_unmap_batch	tlb_ubc;

	struct rcu_head			rcu;

	/* Cache last used pipe for splice(): */
	struct pipe_inode_info		*splice_pipe;

	struct page_frag		task_frag;

	struct task_delay_info		*delays;

	int				make_it_fail;
	unsigned int			fail_nth;
	 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
	 * balance_dirty_pages() for a dirty throttling pause:
	int				nr_dirtied;
	int				nr_dirtied_pause;
	/* Start of a write-and-pause period: */
	unsigned long			dirty_paused_when;

	int				latency_record_count;
	struct latency_record		latency_record[LT_SAVECOUNT];
	 * Time slack values; these are used to round up poll() and
	 * select() etc timeout values. These are in nanoseconds.
	u64				timer_slack_ns;
	u64				default_timer_slack_ns;

	unsigned int			kasan_depth;

	/* Index of current stored address in ret_stack: */
	int				curr_ret_stack;

	/* Stack of return addresses for return function tracing: */
	struct ftrace_ret_stack		*ret_stack;

	/* Timestamp for last schedule: */
	unsigned long long		ftrace_timestamp;

	 * Number of functions that haven't been traced
	 * because of depth overrun:
	atomic_t			trace_overrun;

	/* Pause tracing: */
	atomic_t			tracing_graph_pause;

	/* State flags for use by tracers: */
	unsigned long			trace;

	/* Bitmask and counter of trace recursion: */
	unsigned long			trace_recursion;
#endif /* CONFIG_TRACING */

	/* Coverage collection mode enabled for this task (0 if disabled): */
	enum kcov_mode			kcov_mode;

	/* Size of the kcov_area: */
	unsigned int			kcov_size;

	/* Buffer for coverage collection: */
	void				*kcov_area;

	/* KCOV descriptor wired with this task or NULL: */
	struct kcov			*kcov;

	struct mem_cgroup		*memcg_in_oom;
	gfp_t				memcg_oom_gfp_mask;
	int				memcg_oom_order;

	/* Number of pages to reclaim on returning to userland: */
	unsigned int			memcg_nr_pages_over_high;

	struct uprobe_task		*utask;
	unsigned int			sequential_io;
	unsigned int			sequential_io_avg;
	unsigned long			task_state_change;
	int				pagefault_disabled;
	struct task_struct		*oom_reaper_list;
	struct vm_struct		*stack_vm_area;
	/* A live task holds one reference: */
	atomic_t			stack_refcount;
	int patch_state;
	/* Used by LSM modules for access restriction: */
	void				*security;

	 * New fields for task_struct should be added above here, so that
	 * they are included in the randomized portion of task_struct.

	/* CPU-specific state of this task: */
	struct thread_struct		thread;

	 * WARNING: on x86, 'thread_struct' contains a variable-sized
	 * structure.  It *MUST* be at the end of 'task_struct'.
	 * Do not put anything below here!

Here is the task state bitmask representing process states (/include/linux/sched.h) from the Kernel-source version 4.14 for quick reference:

 * Task state bitmask. NOTE! These bits are also
 * encoded in fs/proc/array.c: get_task_state().
 * We have two separate sets of flags: task->state
 * is about runnability, while task->exit_state are
 * about the task exiting. Confusing, but this way
 * modifying one set can't modify the other one by
 * mistake.

/* Used in tsk->state: */
#define TASK_RUNNING			0x0000
#define TASK_INTERRUPTIBLE		0x0001
#define __TASK_STOPPED			0x0004
#define __TASK_TRACED			0x0008
/* Used in tsk->exit_state: */
#define EXIT_DEAD			0x0010
#define EXIT_ZOMBIE			0x0020
/* Used in tsk->state again: */
#define TASK_PARKED			0x0040
#define TASK_DEAD			0x0080
#define TASK_WAKEKILL			0x0100
#define TASK_WAKING			0x0200
#define TASK_NOLOAD			0x0400
#define TASK_NEW			0x0800
#define TASK_STATE_MAX			0x1000

/* Convenience macros for the sake of set_current_state: */


/* Convenience macros for the sake of wake_up(): */

/* get_task_state(): */

Linux Kernel Source:
struct task_struct data-structure - ...
struct mm_struct data-structure - ...
Task state bitmask flags - tsk->state - ...

Here is the struct mm_struct data-structure data-structure (/include/linux/mm_types.h) from the Kernel-source version 4.14 for quick reference:

struct mm_struct {
	struct vm_area_struct *mmap;		/* list of VMAs */
	struct rb_root mm_rb;
	u32 vmacache_seqnum;                   /* per-thread vmacache */
	unsigned long (*get_unmapped_area) (struct file *filp,
				unsigned long addr, unsigned long len,
				unsigned long pgoff, unsigned long flags);
	unsigned long mmap_base;		/* base of mmap area */
	unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
	/* Base adresses for compatible mmap() */
	unsigned long mmap_compat_base;
	unsigned long mmap_compat_legacy_base;
	unsigned long task_size;		/* size of task vm space */
	unsigned long highest_vm_end;		/* highest vma end address */
	pgd_t * pgd;

	 * @mm_users: The number of users including userspace.
	 * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
	 * to 0 (i.e. when the task exits and there are no other temporary
	 * reference holders), we also release a reference on @mm_count
	 * (which may then free the &struct mm_struct if @mm_count also
	 * drops to 0).
	atomic_t mm_users;

	 * @mm_count: The number of references to &struct mm_struct
	 * (@mm_users count as 1).
	 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
	 * &struct mm_struct is freed.
	atomic_t mm_count;

	atomic_long_t nr_ptes;			/* PTE page table pages */
	atomic_long_t nr_pmds;			/* PMD page table pages */
	int map_count;				/* number of VMAs */

	spinlock_t page_table_lock;		/* Protects page tables and some counters */
	struct rw_semaphore mmap_sem;

	struct list_head mmlist;		/* List of maybe swapped mm's.	These are globally strung
						 * together off init_mm.mmlist, and are protected
						 * by mmlist_lock

	unsigned long hiwater_rss;	/* High-watermark of RSS usage */
	unsigned long hiwater_vm;	/* High-water virtual memory usage */

	unsigned long total_vm;		/* Total pages mapped */
	unsigned long locked_vm;	/* Pages that have PG_mlocked set */
	unsigned long pinned_vm;	/* Refcount permanently increased */
	unsigned long data_vm;		/* VM_WRITE & ~VM_SHARED & ~VM_STACK */
	unsigned long exec_vm;		/* VM_EXEC & ~VM_WRITE & ~VM_STACK */
	unsigned long stack_vm;		/* VM_STACK */
	unsigned long def_flags;
	unsigned long start_code, end_code, start_data, end_data;
	unsigned long start_brk, brk, start_stack;
	unsigned long arg_start, arg_end, env_start, env_end;

	unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */

	 * Special counters, in some configurations protected by the
	 * page_table_lock, in other configurations by being atomic.
	struct mm_rss_stat rss_stat;

	struct linux_binfmt *binfmt;

	cpumask_var_t cpu_vm_mask_var;

	/* Architecture-specific MM context */
	mm_context_t context;

	unsigned long flags; /* Must use atomic bitops to access the bits */

	struct core_state *core_state; /* coredumping support */
	atomic_t membarrier_state;
	spinlock_t			ioctx_lock;
	struct kioctx_table __rcu	*ioctx_table;
	 * "owner" points to a task that is regarded as the canonical
	 * user/owner of this mm. All of the following must be true in
	 * order for it to be changed:
	 * current == mm->owner
	 * current->mm != mm
	 * new_owner->mm == mm
	 * new_owner->alloc_lock is held
	struct task_struct __rcu *owner;
	struct user_namespace *user_ns;

	/* store ref to file /proc//exe symlink points to */
	struct file __rcu *exe_file;
	struct mmu_notifier_mm *mmu_notifier_mm;
	pgtable_t pmd_huge_pte; /* protected by page_table_lock */
	struct cpumask cpumask_allocation;
	 * numa_next_scan is the next time that the PTEs will be marked
	 * pte_numa. NUMA hinting faults will gather statistics and migrate
	 * pages to new nodes if necessary.
	unsigned long numa_next_scan;

	/* Restart point for scanning and setting pte_numa */
	unsigned long numa_scan_offset;

	/* numa_scan_seq prevents two threads setting pte_numa */
	int numa_scan_seq;
	 * An operation with batched TLB flushing is going on. Anything that
	 * can move process memory needs to flush the TLB when moving a
	 * PROT_NONE or PROT_NUMA mapped page.
	atomic_t tlb_flush_pending;
	/* See flush_tlb_batched_pending() */
	bool tlb_flush_batched;
	struct uprobes_state uprobes_state;
	atomic_long_t hugetlb_usage;
	struct work_struct async_put_work;

	/* HMM needs to track a few things per mm */
	struct hmm *hmm;
} __randomize_layout;

Suggested Topics:

Video Episodes :: Linux Kernel programming

Linux ioctl() API interface ↗
Thursday' 09-Jul-2020
Watch detailed videos and read topics on Linux Kernel Programming and Linux ioctl() API interface

Linux Kernel FileSystems Subsystem ↗
Thursday' 09-Jul-2020

Linux Kernel Compilation ↗
Thursday' 09-Jul-2020

Linux Kernel Architecture ↗
Thursday' 09-Jul-2020

Linux Kernel Programming - Device Drivers ↗
Thursday' 09-Jul-2020
Watch detailed videos and read topics on Linux Kernel Programming - Device Drivers

Linux Operating System - User-space Processes ↗
Thursday' 09-Jul-2020

Linux Kernel - Containers and Namespaces ↗
Thursday' 09-Jul-2020

Linux Kernel Programming ↗
Thursday' 09-Jul-2020

Linux Kernel /proc Interface ↗
Thursday' 09-Jul-2020

Join The Linux Channel :: Facebook Group ↗

Visit The Linux Channel :: on Youtube ↗

Join a course:

💎 Linux, Kernel, Networking and Device Drivers: PDF Brochure
💎 PhD or equivalent (or Post Doctoral) looking for assistance: Details
💎 ... or unlimited life-time mentorship: Details

💗 Help shape the future: Sponsor/Donate

Tópicos recomendados:
Featured Video:
Assista no Youtube - [478//0] 195 Linux Kernel /proc Interface - Kernel source /proc samples ↗

Compiling a C Compiler with a C Compilter | Compile gcc with gcc ↗
Thursday' 09-Jul-2020
The fundamental aspect of a programming language compiler is to translate code written from language to other. But most commonly compilers will compile code written in high-level human friendly language such as C, C++, Java, etc. to native CPU architecture specific (machine understandable) binary code which is nothing but sequence of CPU instructions. Hence if we see that way we should able to compile gcc Compiler source code with a gcc Compiler binary.

AT&T Archives: The UNIX Operating System ↗
Thursday' 09-Jul-2020

CEO, CTO Talk ↗
Thursday' 09-Jul-2020

Programming Language Performance and Overheads ↗
Thursday' 09-Jul-2020
A detailed Youtube video series of various programming language performance and overheads - a big picture

Arduino UNO - RO Water Purifier Controller ↗
Thursday' 09-Jul-2020
Here is a Youtube VLOG of my DIY RO Water Purifier Controller done via Arduino UNO. I want the Arduino UNO to control the RO pump, so that it pumps for a specific duration and stops automatically. This is done via Opto-isolated 4 Channel 5V 10A Relay Board meant for Arduino UNO, Raspberry Pi or similar SoC boards which offers GPIO pins. To this relay I have connected the RO water purifier booster pump which works at 24V DC connected via 220V AC to 24V DC power supply adaptar. I have also connected a small active 5V buzzer to notify the progress and completion as it fills the tank/canister.

Nmap Network Scanning ↗
Thursday' 09-Jul-2020

Apache web-server installation ↗
Thursday' 09-Jul-2020

Telnet installation and remote access ↗
Thursday' 09-Jul-2020

Linux Kernel Network Programming - Transport Layer L4 TCP/UDP Registration - Protocol APIs ↗
Thursday' 09-Jul-2020

mmap() munmap() - map or unmap files or devices into memory ↗
Thursday' 09-Jul-2020

Trending Video:
Assista no Youtube - [174//0] 0x9 FreeBSD Networking Sub-system | Layer2 Bridging Stack | Design, APIs and Data-structures | Ep4 ↗

Programming Language Performance and Overheads ↗
Thursday' 09-Jul-2020
A detailed Youtube video series of various programming language performance and overheads - a big picture

Recommended Video:
Assista no Youtube - [4039//0] B.E and M.E Final Year Projects - writing Project Report ↗