吴恩达Coursera, 机器学习专项课程, Machine Learning:Supervised Machine Learning: Regression and Classification第三周所有jupyter notebook文件:
吴恩达,机器学习专项课程, Supervised Machine Learning第三周所有Python编程文件
本次作业
Exercise 1
| |
| |
| |
| def sigmoid(z): |
| """ |
| Compute the sigmoid of z |
| |
| Args: |
| z (ndarray): A scalar, numpy array of any size. |
| |
| Returns: |
| g (ndarray): sigmoid(z), with the same shape as z |
| |
| """ |
| |
| |
| g = 1/(1+np.exp(-z)) |
| |
| |
| |
| return g |
Exercise 2
| |
| |
| def compute_cost(X, y, w, b, lambda_= 1): |
| """ |
| Computes the cost over all examples |
| Args: |
| X : (ndarray Shape (m,n)) data, m examples by n features |
| y : (array_like Shape (m,)) target value |
| w : (array_like Shape (n,)) Values of parameters of the model |
| b : scalar Values of bias parameter of the model |
| lambda_: unused placeholder |
| Returns: |
| total_cost: (scalar) cost |
| """ |
| |
| m, n = X.shape |
| |
| |
| total_cost = 0.0 |
| for i in range(m): |
| z_i = np.dot(X[i],w) + b |
| f_wb_i = sigmoid(z_i) |
| total_cost += -y[i]*np.log(f_wb_i) - (1-y[i])*np.log(1-f_wb_i) |
| |
| total_cost = total_cost / m |
| |
| |
| |
| return total_cost |
Exercise 3
| |
| |
| def compute_gradient(X, y, w, b, lambda_=None): |
| """ |
| Computes the gradient for logistic regression |
| |
| Args: |
| X : (ndarray Shape (m,n)) variable such as house size |
| y : (array_like Shape (m,1)) actual value |
| w : (array_like Shape (n,1)) values of parameters of the model |
| b : (scalar) value of parameter of the model |
| lambda_: unused placeholder. |
| Returns |
| dj_dw: (array_like Shape (n,1)) The gradient of the cost w.r.t. the parameters w. |
| dj_db: (scalar) The gradient of the cost w.r.t. the parameter b. |
| """ |
| m, n = X.shape |
| dj_dw = np.zeros(w.shape) |
| dj_db = 0. |
| |
| |
| for i in range(m): |
| z_wb = 0 |
| for j in range(n): |
| z_wb += 0 |
| z_wb += 0 |
| f_wb = sigmoid(np.dot(X[i],w) + b) |
| |
| dj_db_i = f_wb - y[i] |
| dj_db += dj_db_i |
| |
| for j in range(n): |
| dj_dw[j] = dj_dw[j] + dj_db_i * X[i,j] |
| |
| dj_dw = dj_dw/m |
| dj_db = dj_db/m |
| |
| |
| |
| return dj_db, dj_dw |
Exercise 4
| |
| |
| |
| def predict(X, w, b): |
| """ |
| Predict whether the label is 0 or 1 using learned logistic |
| regression parameters w |
| |
| Args: |
| X : (ndarray Shape (m, n)) |
| w : (array_like Shape (n,)) Parameters of the model |
| b : (scalar, float) Parameter of the model |
| |
| Returns: |
| p: (ndarray (m,1)) |
| The predictions for X using a threshold at 0.5 |
| """ |
| |
| m, n = X.shape |
| p = np.zeros(m) |
| |
| |
| |
| for i in range(m): |
| z_wb = 0 |
| |
| for j in range(n): |
| |
| z_wb += 0 |
| |
| |
| z_wb += 0 |
| |
| |
| f_wb = sigmoid(np.dot(X[i],w) + b) |
| |
| |
| p[i] = 1 if f_wb >= 0.5 else 0 |
| |
| |
| return p |
Exercise 5
| |
| def compute_cost_reg(X, y, w, b, lambda_ = 1): |
| """ |
| Computes the cost over all examples |
| Args: |
| X : (array_like Shape (m,n)) data, m examples by n features |
| y : (array_like Shape (m,)) target value |
| w : (array_like Shape (n,)) Values of parameters of the model |
| b : (array_like Shape (n,)) Values of bias parameter of the model |
| lambda_ : (scalar, float) Controls amount of regularization |
| Returns: |
| total_cost: (scalar) cost |
| """ |
| |
| m, n = X.shape |
| |
| |
| cost_without_reg = compute_cost(X, y, w, b) |
| |
| |
| reg_cost = 0. |
| |
| |
| for j in range(n): |
| reg_cost += (w[j]**2) |
| |
| |
| |
| total_cost = cost_without_reg + (lambda_/(2 * m)) * reg_cost |
| |
| return total_cost |
Exercise 6
| |
| def compute_gradient_reg(X, y, w, b, lambda_ = 1): |
| """ |
| Computes the gradient for linear regression |
| |
| Args: |
| X : (ndarray Shape (m,n)) variable such as house size |
| y : (ndarray Shape (m,)) actual value |
| w : (ndarray Shape (n,)) values of parameters of the model |
| b : (scalar) value of parameter of the model |
| lambda_ : (scalar,float) regularization constant |
| Returns |
| dj_db: (scalar) The gradient of the cost w.r.t. the parameter b. |
| dj_dw: (ndarray Shape (n,)) The gradient of the cost w.r.t. the parameters w. |
| |
| """ |
| m, n = X.shape |
| |
| dj_db, dj_dw = compute_gradient(X, y, w, b) |
| |
| |
| for j in range(n): |
| dj_dw[j] = dj_dw[j] + (lambda_/m) * w[j] |
| |
| |
| |
| return dj_db, dj_dw |
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· TypeScript + Deepseek 打造卜卦网站:技术与玄学的结合
· Manus的开源复刻OpenManus初探
· AI 智能体引爆开源社区「GitHub 热点速览」
· 从HTTP原因短语缺失研究HTTP/2和HTTP/3的设计差异
· 三行代码完成国际化适配,妙~啊~